code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from __future__ import absolute_import
from __future__ import print_function
def print_host(name, id, state, msg=None, indent = " "):
"""msg, if present, is printed (with a two-space indent) after the normal
line."""
print("%-24s %-25s %s" % (name, id, state))
if msg:
print(indent + ("\n" + indent).join(msg.split("\n")))
|
unixnut/cloud-support
|
shepherd/formatting.py
|
Python
|
gpl-2.0
| 350
|
import pytest
from kolibri.plugins import DEFAULT_PLUGINS
from kolibri.plugins.utils import enable_plugin
@pytest.mark.parametrize("plugin", DEFAULT_PLUGINS)
def test_can_enable_all_default_plugins(plugin):
assert enable_plugin(plugin)
|
learningequality/kolibri
|
kolibri/plugins/utils/test/test_default_plugins.py
|
Python
|
mit
| 243
|
# pysloc/pysloc/__init__.py
""" Library for the pySloc line counter. """
import hashlib
import re
from stat import S_ISDIR, S_ISREG # GETS DROPPED IF USING SCANDIR
import os
# try:
# from os import scandir
# except ImportError:
# from scandir import scandir
from bs4 import BeautifulSoup, Comment
__all__ = ['__version__', '__version_date__',
# constants
'GPERF_RE', 'RE2C_RE',
# functions
'count_lines_augeas',
'count_lines_bash',
'count_lines_c',
'count_lines_clojure',
'count_lines_clojurescript',
'count_lines_cpp',
'count-lines_crack',
'count_lines_double_dash',
'count_lines_fortran',
'count_lines_fortran90',
'count_lines_gperf',
'count_lines_go',
'count_lines_html',
'count_lines_in_dir',
'count_lines_java',
'count_lines_java_style',
'count_lines_lisp',
'count_lines_matlab',
'count_lines_not_sharp',
'count_lines_ocaml',
'count_lines_occam',
'count_lines_pascal',
'count_lines_perl',
'count_lines_php',
'count_lines_protobuf',
'count_lines_python',
'count_lines_r_markdown',
'count_lines_rust',
'count_lines_re2c',
'count_lines_ruby',
'count_lines_scala', 'count_lines_shell', 'count_lines_snobol',
'count_lines_tex',
'count_lines_toml',
'count_lines_txt',
'uncomment_html', 'uncomment_java',
# classes
'CountHolder', 'MapHolder', ]
# exported constants ------------------------------------------------
__version__ = '0.9.10'
__version_date__ = '2019-04-02'
# private constants -------------------------------------------------
GPERF_RE = re.compile(
r'^/\* ANSI-C code produced by gperf version \d+\.\d\.\d+ \*/')
RE2C_RE = re.compile(r'^/\* Generated by re2c \d+\.\d+\.\d+ \*/')
TQUOTE = '"""'
# class(es) ---------------------------------------------------------
class CountHolder(object):
""" a holder for various counts """
LOC = 0 # lines of non-test code
SLOC = 1 # of which source lines
TLOC = 2 # lines of test code
TSLOC = 3 # of which source lines
def __init__(self):
# we maintain map from lang to a list of 4: lines, sloc, tlines, tsloc,
# where t means test
self.map_ = {}
def add_counts(self, lang, loc_, sloc_):
""" add non-test line count and source line count for language"""
# XXX we want l and s to be non-negative integers
if lang not in self.map_:
self.map_[lang] = [0, 0, 0, 0]
self.map_[lang][CountHolder.LOC] += loc_
self.map_[lang][CountHolder.SLOC] += sloc_
def add_test_counts(self, lang, loc_, sloc_):
""" add test line count and source line count for language"""
# XXX we want l and s to be non-negative integers
if lang not in self.map_:
self.map_[lang] = [0, 0, 0, 0]
self.map_[lang][CountHolder.TLOC] += loc_
self.map_[lang][CountHolder.TSLOC] += sloc_
def get_counts(self, lang):
if (not lang) or (lang not in self.map_):
return (0, 0, 0, 0)
return self.map_[lang]
def pretty_counts(self, lang):
"""
Return a string containing the short name of the language and
the total line count, the source line count, and the percentage
of source lines which are test lines. The total line count
includes the source line count. As an example a python file
with 17 lines of which 12 are source, of which 9 are test code
would produce the string 'py:17/12 T%75.0'
"""
if (not lang) or (lang not in self.map_):
return '%s: 0' % lang
loc_, sloc_, test_loc, test_sloc = self.map_[lang]
if test_sloc > 0:
return "%s:%d/%d T%.1f%%" % (
lang, loc_ + test_loc, sloc_ + test_sloc,
100.0 * test_sloc / (sloc_ + test_sloc))
elif loc_ > 0:
return "%s:%d/%d" % (lang, loc_ + test_loc, sloc_)
return ''
def pretty_break_down(self):
"""
Generate a semicolon-separated list sorted by decreasing SLOC.
"""
# flatten the list to make it easier to sort
flattened = []
for k__, v__ in self.map_.items():
flattened.append([k__] + v__)
results = []
for x__ in sorted(flattened, key=lambda fields: fields[
CountHolder.SLOC + 1], reverse=True):
result = self.pretty_counts(x__[0])
if result:
results.append(result)
print('; '.join(results))
def get_totals(self):
tot_loc, tot_sloc, tot_test_loc, tot_test_sloc = 0, 0, 0, 0
for lang in self.map_:
loc_, sloc_, test_loc, test_sloc = self.map_[lang]
tot_loc += loc_ # lines of non-test code
tot_sloc += sloc_ # lines of which are source code
tot_test_loc += test_loc # lines of test code
tot_test_sloc += test_sloc # lines of which are source code
return tot_loc, tot_sloc, tot_test_loc, tot_test_sloc
class MapHolder(object):
def __init__(self, main_lang=''):
# Note OCaml comments are (* ... *) but allow nesting. File
# extensions are .ml (source code) and .mli (header; and then
# .cmo/.cmx, .cmi, .cma/.cmxa are compiled forms.
# Maps short name to counter function; limit these to 4 characters.
self._lang2counter = {
'ada': count_lines_double_dash, # Pentagon language
'asm': count_lines_not_sharp, # s, S, asm
'awk': count_lines_not_sharp, # awk programming language
'aug': count_lines_augeas, # Augeas config manager
'bash': count_lines_shell, # bash shell
'c': count_lines_c, # ansic
'clj': count_lines_clojure, # Clojure
'cljs': count_lines_clojurescript, # ClojureScript
'code': count_lines_not_sharp, # used to be 'not#'
'cpp': count_lines_cpp, # C++
'crk': count_lines_crack, # crack programming language
'csh': count_lines_not_sharp, # csh, tcsh
'css': count_lines_java_style, # css, as in stylesheets
'cython': count_lines_python,
'f90+': count_lines_fortran90, # FORTRAN90 plus
'for': count_lines_fortran, # fixed-format FORTRAN
'gen': count_lines_not_sharp, # treat # as comment
'go': count_lines_go, # golang
'gperf': count_lines_gperf, #
'hs': count_lines_double_dash, # Haskell
'html': count_lines_html, # html
'java': count_lines_java, # plain old Java
'js': count_lines_java_style, # Javascript
'json': count_lines_txt, # json
'lex': count_lines_java_style, # lex/flex
'lisp': count_lines_lisp, # Common Lisp
'm4': count_lines_not_sharp, # m4 macro processor
'mjs': count_lines_java_style, # Javascript module
'ml': count_lines_ocaml, # ocaml, tentative abbrev
'objc': count_lines_java_style, # Objective C
'occ': count_lines_double_dash, # concurrent programming
'perl': count_lines_perl,
'php': count_lines_php,
'proto': count_lines_protobuf, # Google Protocol Buffers
'py': count_lines_python, # yes, Python
'R': count_lines_not_sharp, # R
'Rmd': count_lines_r_markdown,
're2c': count_lines_re2c, # re2c
'rb': count_lines_ruby, # ruby
'rs': count_lines_rust, # rust
'scala': count_lines_scala,
'sed': count_lines_not_sharp, # stream editor
'sh': count_lines_shell, # shell script
'sno': count_lines_snobol, # snobol4
'tcl': count_lines_not_sharp, # tcl, tk, itk
'tex': count_lines_tex, # TeX, LaTeX
'toml': count_lines_not_sharp, # Tom's Obvious Markup Language
'txt': count_lines_txt, # plain text
'xml': count_lines_xml,
'yacc': count_lines_java_style, # yacc, bison
'yaml': count_lines_not_sharp, # yaml
}
# Guesses language short name (abbrev) from file extension.
# See sloccount's break_filelist for hints.
# Note {pl,pm,perl,pl} => perl
self._ext2lang = {
'adb': 'ada',
'ads': 'ada',
'asm': 'asm',
'aug': 'augeas',
'awk': 'awk',
'bash': 'bash', # yes, never used
'c': 'c', # ansi c
'C': 'cpp', # C++
'cc': 'cpp', # C++
'clj': 'clj', # Clojure
'cljs': 'cljs', # ClojsureScript
'code': 'code', # comments begin with sharp sign, #
'cp': 'cpp', # C++
'cpp': 'cpp', # C++
'CPP': 'cpp', # C++
'c++': 'cpp', # C++
'cxx': 'cpp', # C++
'crk': 'crack', # crack programming language
'csh': 'csh',
'css': 'css',
'flattened': 'for', # fixed-format FORTRAN
'f90': 'f90+', # free-format FORTRAN
'f95': 'f90+', # free-format FORTRAN
'f03': 'f90+', # free-format FORTRAN
'f08': 'f90+', # free-format FORTRAN
'f15': 'f90+', # free-format FORTRAN
'for': 'for',
'go': 'go', # same counter as C, Java ?
'gperf': 'gperf', # same counter as C, Java ?
'h': 'c', # PRESUMED ANSI C
'hh': 'cpp', # C++; I've never seen this
'hpp': 'cpp', # C++
'hs': 'hs', # Haskell
'html': 'html', # no counter
'itk': 'tcl',
'java': 'java',
'js': 'js', # javascript, node.js
'json': 'json',
'l': 'lex', # lex/flex parser generator
'lisp': 'lisp',
'loc_': 'lex', # lex/flex parser generator
'm4': 'm4', # no counter
'md': 'md', # no counter
'mjs': 'mjs', # Javascript modules
'ml': 'ml', # OCaml
'mli': 'ml', # OCaml extension
'occ': 'occ',
'php': 'php',
'php3': 'php',
'php4': 'php',
'php5': 'php',
'phtml': 'php',
'pl': 'perl',
'pm': 'perl',
'proto': 'proto', # Google protobuf
'pxd': 'cython', # cython header
'py': 'py',
'pyx': 'cython', # cython code
'R': 'R', # R programming language
'r': 'R', # R programming language
'Rmd': 'Rmd', # RMarkdown
'rb': 'rb',
're': 're2c', # same counter as C, Java ?
'rs': 'rs', # rust, comments start with //
'S': 'asm',
'sloc_': 'asm',
'scala': 'scala',
'sed': 'sed',
'sh': 'sh',
'sno': 'sno',
'tcsh': 'csh',
'tcl': 'tcl',
'tex': 'tex',
'tk': 'tcl',
'toml': 'toml',
'txt': 'txt',
'xml': 'xml',
'y': 'yacc', # parser generator
'yaml': 'yaml',
}
# DEBUG
assert self._ext2lang['yaml'] == 'yaml'
assert self._ext2lang['toml'] == 'toml'
# END
if main_lang == 'c':
self._ext2lang['inc'] = 'c'
if main_lang == 'cpp':
self._ext2lang['h'] = 'cpp'
self._ext2lang['inc'] = 'cpp'
elif main_lang == 'matlab':
self._ext2lang['map_'] = 'matlab'
elif main_lang == 'objc':
self._ext2lang['h'] = 'objc'
self._ext2lang['map_'] = 'objc'
elif main_lang == 'occ':
self._ext2lang['inc'] = 'occ'
elif main_lang == 'octave':
self._ext2lang['map_'] = 'octave'
# Maps lang short name (abbrev) to fuller language name.
# By convention, short names are limited to 5 chars.
self._lang_map = {
'ada': 'Ada',
'asm': 'assembler',
'aug': 'augeas',
'awk': 'awk',
'bash': 'bash',
'c': 'ansic',
'clj': 'Clojure',
'cljs': 'ClojureScript',
'code': 'code', # the former 'not#'
'cpp': 'C++',
'crack': 'crack',
'csh': 'csh',
'css': 'css',
'cython': 'cython',
'f90+': 'FORTRAN90+',
'for': 'FORTRAN',
'gen': 'generic',
'gperf': 'gperf',
'go': 'golang',
'hs': 'haskell',
'html': 'html',
'java': 'java',
'js': 'javascript',
'json': 'json',
'lex': 'lex',
'lisp': 'lisp',
'm4': 'm4',
'md': 'markdown',
'mjs': 'Javascript module',
'ml': 'OCaml',
'objc': 'Objective C',
'occ': 'Occam',
'perl': 'Perl',
'php': 'php',
'proto': 'proto', # Google protobuf
'py': 'python',
'R': 'R',
'Rmd': 'R Markdown',
're2c': 're2c',
'rb': 'ruby',
'rs': 'rust',
'scala': 'scala',
'sed': 'sed',
'sh': 'shell',
'sno': 'snobol4',
'tcl': 'tcl',
'tex': 'TeX/LaTeX',
'toml': 'toml',
'txt': 'text',
'xml': 'XML',
'yacc': 'yacc',
'yaml': 'yaml',
}
# A set of extensions known NOT to be source code.
self._non_code_exts = {
'lang_', # library, linked object
'cma', 'cmi', 'cmo', 'cmx', 'cmxa', # OCaml compiled
# 'dat', # arguable
'gz',
'jar',
'md', # markdown
'o', # object
'pyc', 'pyo',
'so',
'svn-base',
'swp', # vi/vim temporary file
'zip',
}
# A set of file and directory names known NOT to contain source code
self._non_code_dirs = {
'.git',
'__pycache__',
'.svn',
}
# files which definitely do not contain source code
self._non_code_files = {
'.gitignore',
'.wrapped',
'AUTHORS',
'CHANGES', 'ChangeLog',
'CONTRIBUTORS',
'COPYING', 'COPYING.AUTOCONF.EXCEPTION',
'COPYING.GNUBL', 'COPYING.LIB',
'LICENSE',
'MANIFEST',
'NEWS',
'PATENTS',
'README',
'TODO',
}
# public interface ==============================================
def ext2lang(self, ext):
if ext in self._ext2lang:
return self._ext2lang[ext]
return None
def get_counter(self, lang, is_cli_arg=False):
"""
Enter with the language (abbrev) of a file and whether the name is on
the command line. If there is a counter matching that name, return a
reference to it. Otherwise, if this is a CLI argument, return the
generic counter. Otherwise, return None.
XXX If the name on the command line is a directory name, should
be handled differently.
"""
if lang and (lang in self._lang2counter):
return self._lang2counter[lang]
elif is_cli_arg:
return count_lines_not_sharp
return None
def get_long_name(self, name):
""" Given a short file name, return the longer language name """
if name in self._lang_map:
return self._lang_map[name]
return None
def get_lang_set(self):
"Return a set containing all recognized language abbreviations"""
return frozenset(self._lang_map.keys())
def non_code_ext(self, name):
return name in self._non_code_exts
def non_code_dir(self, name):
return name in self._non_code_dirs
def non_code_file(self, name):
return name in self._non_code_files
def guess_lang(self, path_to_dir, file_name, is_cli_arg, verbose=0):
"""
Guess the short name of the language and whether it is a test file
depending on whether the name appears on the command line (we
always count any file named on the command line).
"""
# defaults
is_test = False
lang = None
ext = None
if path_to_dir and file_name:
path_to_file = os.path.join(path_to_dir, file_name)
if os.path.exists(path_to_file):
if not self.non_code_file(file_name):
# get any extension
_, delim, ext = file_name.rpartition('.')
if delim == '.':
# we have an extension
ext = ext
if not self.non_code_ext(ext):
# we have an extension and it's not prohibited
lang = self.ext2lang(ext)
if (lang is None) and is_cli_arg:
lang = 'gen' # generic
if not lang and is_cli_arg:
lang = 'gen'
if lang == 'go':
is_test = file_name.endswith('_test.go')
elif lang == 'py':
is_test = file_name.startswith('test')
# filter out generated files
if lang and lang != 'gen':
if self.is_generated(path_to_file, verbose):
return None, False
if verbose > 1:
if ext is not None:
print(" %s: find ext '%s', GUESS lang %s" % (
file_name, ext, lang))
else:
print(
" %s: NO ext, GUESS lang %s" %
(file_name, lang))
return lang, is_test
def is_generated(self, path_to_file, verbose=0):
first_line = ''
try:
with open(path_to_file, 'r') as flattened:
first_line = flattened.readline()
except Exception as exc:
print("problem reading '%s': %s" % (path_to_file, exc))
return False
for regex in [GPERF_RE, RE2C_RE]:
if regex.match(first_line):
return True
return False
# functions =========================================================
# DIR-LEVEL COUNTER(S) ----------------------------------------------
def count_lines_in_dir(path_to_dir, options):
k__ = options.k__
langs_counted = options.langs_counted
map_holder = options.map_holder
verbose = options.verbose
lines, sloc = (0, 0)
files = os.listdir(path_to_dir)
if files:
map_holder = options.map_holder
for name in sorted(files):
# we only count *.txt if on the command line
if name.endswith('.txt'):
continue
# consider exclusions ...
if options.ex_re is not None and options.ex_re.search(
name) is not None:
continue
is_test = False # default
path_to_file = os.path.join(path_to_dir, name)
sloc_ = os.lstat(path_to_file) # ignores symlinks
mode = sloc_.st_mode
if S_ISDIR(mode):
(more_lines, more_sloc) = count_lines_in_dir(
path_to_file, options)
lines += more_lines
sloc += more_sloc
elif S_ISREG(mode):
if map_holder.non_code_file(name):
if verbose > 1:
print("Not a code file: %s" % name)
else:
# XXX Note command line argument may be relative or
# absolute path to file, terminated by base file name
# and extension.
counted = False
lang, is_test = map_holder.guess_lang(
path_to_dir, name, is_cli_arg=False, verbose=verbose)
if (lang is not None) and (lang in langs_counted):
counter = map_holder.get_counter(lang, True)
if counter:
more_lines, more_sloc = counter(
path_to_file, options, lang)
lines += more_lines # VESTIGIAL
sloc += more_sloc
if is_test:
k__.add_test_counts(
lang, more_lines, more_sloc)
else:
k__.add_counts(lang, more_lines, more_sloc)
counted = True
if not counted and options.verbose >= 2:
print(" skipping %s" % name)
return lines, sloc
# FILE-LEVEL COUNTERS -----------------------------------------------
def check_whether_already_counted(path_to_file, options):
"""
Given a text file, try to split it into a list of lines. May raise
an exception. If the file has been seen before, will return an
empty list of lines. Otherwise it retuns the list of lines and the
file's hash.
options.already is a set containing hashes of files already counted
"""
lines, counter_ = None, None
with open(path_to_file, 'rb') as flattened:
data = flattened.read()
if data:
sha_ = hashlib.sha1()
sha_.update(data)
counter_ = sha_.hexdigest() # a string
if options.verbose > 1:
print(" %s <-- %s" % (counter_, path_to_file))
if counter_ in options.already:
if options.verbose:
print("skipping %s, already counted" % path_to_file)
else:
try:
decoded = data.decode('utf-8')
except Exception:
decoded = data.decode('latin-1')
lines = decoded.split("\n")
# drop spurious last line caused by terminating newline
if lines and len(lines) > 1:
if lines[-1] == '':
lines = lines[:-1]
return lines, counter_
# TOML ==============================================================
# XXX SHOULD BE IN ALPHABETICAL ORDER, up here for debugging
def count_lines_toml(path_to_file, options, lang):
""" Count lines in a file with extension '.toml'. """
# AUGEAS ============================================================
def count_lines_augeas(path, options, lang):
return count_lines_ocaml(path, options, lang)
# BASH ==============================================================
def count_lines_bash(path, options, lang):
return count_lines_shell(path, options, lang)
# C =================================================================
def count_lines_c(path, options, lang):
loc_, sloc_ = 0, 0
if path.endswith('.h'):
if not path.endswith('.pb-c.h'):
loc_, sloc_ = count_lines_java_style(path, options, lang)
elif path.endswith('.c'):
if not path.endswith('.pb-c.c'):
loc_, sloc_ = count_lines_java_style(path, options, lang)
return loc_, sloc_
# CLOJURE, CLOJURE_SCRIPT ===========================================
def count_lines_clojure(path_to_file, options, lang):
return count_lines_not_semicolon(path_to_file, options, lang)
def count_lines_clojurescript(path_to_file, options, lang):
return count_lines_not_semicolon(path_to_file, options, lang)
# C++ ===============================================================
def count_lines_cpp(path, options, lang):
loc_, sloc_ = 0, 0
if path.endswith('.h'):
if not path.endswith('.pb.h'):
loc_, sloc_ = count_lines_java_style(path, options, lang)
elif path.endswith('.cpp'):
if not path.endswith('.pb.cpp'):
loc_, sloc_ = count_lines_java_style(path, options, lang)
else:
loc_, sloc_ = count_lines_java_style(path, options, lang)
return loc_, sloc_
# CRACK ==================================================================
# YYY
def _find_crack_code(text):
"""
We are in a comment. Return a ref to the beginning of the text
outside the comment block (which may be '') and the value of inComment.
"""
posn = text.find('*/')
if posn == -1:
return '', True
if posn + 2 < len(text):
return text[posn + 2:], False
return '', False
def _find_crack_comment(text):
"""
We are NOT in a comment. Return a ref to any code found, a ref to the
rest of the text, and the value of inComment.
"""
posn_old = text.find('/*') # multi-line comment
posn_new = text.find('//') # one-line comment
posn_sharp = text.find('#') # one-line comment
if posn_old == -1 and posn_new == -1 and posn_sharp == -1:
return text, '', False
posn = 1024 * 1024
if posn_old != -1:
posn = posn_old
if posn_new != -1 and posn_new < posn:
posn = posn_new
if posn_sharp != -1 and posn_sharp < posn:
posn = posn_sharp
if posn == posn_old:
in_comment = True
return text[:posn], text[posn + 2:], in_comment
else:
in_comment = False
return text[:posn], '', in_comment
def uncomment_crack(text, in_comment):
"""
Given a line of text, return a ref to any code found and the value of
inComment, which may have changed.
"""
code = ''
text = text.strip()
while text:
if in_comment:
text, in_comment = _find_crack_code(text)
else:
chunk, text, in_comment = _find_crack_comment(text.strip())
code += chunk
return code, in_comment
def count_lines_php(path_to_file, options, lang):
# comments are either single line or multi-line. Single-line
# comments begin with either # or //. In either case whatever
# is to the right of the delimiter is a comment. Multi-line
# comments begin with /* and end with */
return count_lines_crack(path_to_file, options, lang)
def count_lines_crack(path_to_file, options, lang):
# comments are either single line or multi-line. Single-line
# comments begin with either # or //. In either case whatever
# is to the right of the delimiter is a comment. Multi-line
# comments begin with /* and end with */
lines_so_far, sloc_so_far = (0, 0)
in_comment = False
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
code, in_comment = uncomment_crack(line, in_comment)
if code:
code = code.strip()
if code:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return (lines_so_far, sloc_so_far)
# FORTRAN ===========================================================
def count_lines_fortran(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
line_len = len(line)
if line_len:
if line[0] in ['c', 'C']:
# a comment
continue
# code area is columns 7-72, 1-based, so 6-71
if line[0].lower() == 'c' or line_len < 7:
continue
if line_len > 72:
line = line[6:72]
else:
line = line[6:]
for ch_ in line:
if ch_ != ' ':
sloc_so_far += 1
break
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# FORTRAN 90+ =======================================================
def count_lines_fortran90(path_to_file, options, lang):
"""
Count lines of free-format FORTRAN 90+
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
line_len = len(line)
if line_len:
if line[0] in ['c', 'C']:
# a fixed-form comment
continue
# a BANG ('!') anywhere begins a comment
ndx = line.find('!')
if ndx != -1:
line = line[0:ndx]
line_len = len(line)
if line_len == 0:
continue
# code area is columns 7-72, 1-based, so 6-71
if line[0].lower() == 'c' or line_len < 7:
continue
if line_len > 72:
line = line[6:72]
else:
line = line[6:]
for ch_ in line:
if ch_ != ' ':
sloc_so_far += 1
break
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# GO ================================================================
def count_lines_go(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
if not path_to_file.endswith('.pb.go'):
lines_so_far, sloc_so_far = count_lines_java_style(
path_to_file, options, lang)
return lines_so_far, sloc_so_far
# GPERF ==============================================================
def count_lines_gperf(path, options, lang):
loc_, sloc_ = count_lines_java_style(path, options, lang)
return loc_, sloc_
# HTML ==============================================================
def _find_html_code(text):
"""
We are in a comment. Return a ref to the beginning of the text
outside the comment block (which may be '') and the value of inComment.
"""
posn = text.find('-->')
if posn == -1:
return '', True
if posn + 3 < len(text):
return text[posn + 3:], False
return '', False
def _find_html_comment(text):
"""
We are NOT in a comment. Return a ref to any code found, a ref to the
rest of the text, and the value of inComment.
"""
posn = text.find('<!--') # one-line comment
if posn == -1:
return text, '', False
if posn + 4 < len(text):
return text[:posn], text[posn + 4:], True
return text[:posn], '', True
def uncomment_html(text, in_comment):
"""
Given a line of text, return a ref to any code found and the value of
inComment, which may have changed.
"""
code = ''
text = text.strip()
while text:
if in_comment:
text, in_comment = _find_html_code(text)
else:
chunk, text, in_comment = _find_html_comment(text.strip())
code += chunk # XXX INEFFICIENT
return code, in_comment
# A better definition of a comment is that it begins with <!-- and ends
# with --> but does not contain -- or >
def count_lines_html(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
in_comment = False
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
code, in_comment = uncomment_html(line, in_comment)
if code:
code = code.strip()
if code:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return (lines_so_far, sloc_so_far)
# JAVA ==============================================================
def count_lines_java(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
if not path_to_file.endswith('Protos.java'):
lines_so_far, sloc_so_far = count_lines_java_style(
path_to_file, options, lang)
return lines_so_far, sloc_so_far
def _find_java_code(text):
"""
We are in a comment. Return a ref to the beginning of the text
outside the comment block (which may be '') and the value of inComment.
"""
posn = text.find('*/')
if posn == -1:
return '', True
if posn + 2 < len(text):
return text[posn + 2:], False
return '', False
def _find_java_comment(text):
"""
We are NOT in a comment. Return a ref to any code found, a ref to the
rest of the text, and the value of inComment.
"""
multi_line = False
posn_old = text.find('/*') # multi-line comment
posn_new = text.find('//') # one-line comment
if posn_old == -1 and posn_new == -1:
return text, '', False
if posn_new == -1:
posn = posn_old
in_comment = True
multi_line = True
else:
# posnNew is non-negative
if posn_old == -1 or posn_old > posn_new:
posn = posn_new
in_comment = False
else:
posn = posn_old
in_comment = True
multi_line = True
if multi_line and (posn + 2 < len(text)):
return text[:posn], text[posn + 2:], in_comment
return text[:posn], '', in_comment
def uncomment_java(text, in_comment):
"""
Given a line of text, return a ref to any code found and the value of
inComment, which may have changed.
"""
code = ''
text = text.strip()
while text:
if in_comment:
text, in_comment = _find_java_code(text)
else:
chunk, text, in_comment = _find_java_comment(text.strip())
code += chunk # XXX INEFFICIENT
return code, in_comment
def count_lines_java_style(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
in_comment = False
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
code, in_comment = uncomment_java(line, in_comment)
if code:
code = code.strip()
if code:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return (lines_so_far, sloc_so_far)
# Lisp ==============================================================
def count_lines_lisp(path_to_file, options, lang):
return count_lines_not_semicolon(path_to_file, options, lang)
# MATLAB ============================================================
def count_lines_matlab(path_to_file, options, lang):
"""
Count source lines in an Matlab file where single line comments
begin with '%' and muli-line comments are delimited by
%{ and %}. These may be nested. We ignore blank lines and lines
consisting solely of spaces and comments.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
depth = 0 # comment depth
# for l_ndx, line in enumerate(lines):
for line in lines:
lines_so_far += 1
non_space_sen = False
percent_sen = False # might start %{ or %}
# for c_ndx, ch_ in enumerate(list(line)):
for ch_ in list(line):
if percent_sen:
if ch_ == '{':
depth += 1
elif ch_ == '}':
if depth > 0:
depth -= 1
else:
# this would start a comment
if depth == 0:
break
percent_sen = False
elif depth == 0:
if ch_ == '%':
percent_sen = True
elif ch_ != ' ' and ch_ != '\t':
non_space_sen = True
# ignore other unicode space chars for now
else:
pass
else:
# depth > 0
if percent_sen:
if ch_ == '{':
depth += 1
elif ch_ == '}':
depth -= 1
percent_sen = False
elif ch_ == '%':
percent_sen = True
if non_space_sen:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# NOT_SHARP =========================================================
def count_lines_not_sharp(path_to_file, options, lang):
"""
Count lines in a file where the sharp sign ('#') is the comment
marker. That is, we ignore blank lines, lines consisting solely of
spaces, and those starting with zero or more spaces followed by
a sharp sign.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
# This could be made more efficient.
line = line.strip()
if line and (line[0] != '#'):
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# OCaml =============================================================
def count_lines_ocaml(path_to_file, options, lang):
"""
Count lines in an OCaml file where comments are delimited by
(* and *). These may be nested. We ignore blank lines and lines
consisting solely of spaces and comments.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
depth = 0 # comment depth
for line in lines:
lines_so_far += 1
non_space_sen = False
l_paren_sen = False # might start (*
star_seen = False # might start *)
for ch_ in list(line):
# ignore other unicode space chars for now
if ch_ == ' ' or ch_ == '\t':
l_paren_sen = False
star_seen = False
continue
elif depth == 0:
if l_paren_sen:
if ch_ == '*':
depth += 1
else:
non_space_sen = True
l_paren_sen = False
elif star_seen:
if ch_ == ')':
if depth > 0:
depth -= 1
else:
non_space_sen = True
star_seen = False
else:
non_space_sen = True
elif ch_ == '(':
l_paren_sen = True
elif ch_ == '*':
star_seen = True
else:
non_space_sen = True
else:
# depth > 0
if l_paren_sen:
if ch_ == '*':
depth += 1
l_paren_sen = False
elif star_seen:
if ch_ == ')':
if depth > 0:
depth -= 1
star_seen = False
elif ch_ == '(':
l_paren_sen = True
elif ch_ == '*':
star_seen = True
if non_space_sen:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# OCCAM =============================================================
def count_lines_double_dash(path_to_file, options, lang):
"""
Count lines in a file where the double dash ('--') is the comment
marker. That is, we ignore blank lines, lines consisting solely of
spaces, and those starting with zero or more spaces followed by
a double dash.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
# This could be made more efficient.
line = line.strip()
if line and not line.startswith('--'):
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# OCCAM =============================================================
def count_lines_occam(path_to_file, options, lang):
"""
Count source lines in an Octave file where single line comments
begin with '%' or '#' and multi-line comments are delimited by
%{ and %} or #{ and #}. These may be nested. We ignore blank lines
and lines consisting solely of spaces and comments.
NOTE that Octave actually requires that a multi-line comment marker
be the only token on the source line.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
depth = 0 # comment depth
# for l_ndx, line in enumerate(lines):
for line in lines:
lines_so_far += 1
non_space_sen = False
delim_seen = False # might start %{ or %}
# for c_ndx, ch_ in enumerate(list(line)):
for ch_ in list(line):
if delim_seen:
if ch_ == '{':
depth += 1
elif ch_ == '}':
if depth > 0:
depth -= 1
else:
# this would start a comment
if depth == 0:
break
delim_seen = False
elif depth == 0:
if ch_ == '%' or ch_ == '#':
delim_seen = True
elif ch_ != ' ' and ch_ != '\t':
non_space_sen = True
# ignore other unicode space chars for now
else:
pass
else:
# depth > 0
if delim_seen:
if ch_ == '{':
depth += 1
elif ch_ == '}':
depth -= 1
delim_seen = False
elif ch_ == '%' or ch_ == '#':
delim_seen = True
if non_space_sen:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# PASCAL ============================================================
def count_lines_pascal(path_to_file, options, lang):
"""
Count lines in an Pascal file where comments are delimited by
(* and *) or { and } -- interchangeably. These may be nested. We
ignore blank lines and lines consisting solely of spaces and comments.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
depth = 0 # comment depth
# for ndx, line in enumerate(lines):
for line in lines:
lines_so_far += 1
non_space_sen = False
l_paren_sen = False # might start (*
star_seen = False # might start *)
for ch_ in list(line):
# ignore other unicode space chars for now
if ch_ == ' ' or ch_ == '\t':
l_paren_sen = False
star_seen = False
continue
elif depth == 0:
if l_paren_sen:
if ch_ == '}':
non_space_sen = True
elif ch_ == '*' or ch_ == '{':
depth += 1
else:
non_space_sen = True
l_paren_sen = False
elif star_seen:
if ch_ == '{':
depth += 1
elif ch_ == ')' or ch_ == '}':
non_space_sen = True
star_seen = False
else:
non_space_sen = True
elif ch_ == '{':
depth += 1
elif ch_ == '}':
pass
elif ch_ == '(':
l_paren_sen = True
elif ch_ == '*':
star_seen = True
else:
non_space_sen = True
else:
# depth > 0
if l_paren_sen:
if ch_ == '}':
if depth > 0:
depth -= 1
elif ch_ == '*' or ch_ == '{':
depth += 1
l_paren_sen = False
elif star_seen:
if ch_ == '{':
depth += 1
elif ch_ == '}':
depth -= 1
elif ch_ == ')':
if depth > 0:
depth -= 1
star_seen = False
elif ch_ == '{':
depth += 1
elif ch_ == '}':
depth -= 1
elif ch_ == '(':
l_paren_sen = True
elif ch_ == '*':
star_seen = True
if non_space_sen:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# PERCENT ===========================================================
def count_lines_not_percent(path_to_file, options, lang):
"""
Count lines in a file where the percent sign ('%') is the comment
marker. That is, we ignore blank lines, lines consisting solely of
spaces, and those starting with zero or more spaces followed by
a percent sign.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
# This could be made more efficient.
line = line.strip()
if line and (line[0] != '%'):
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# PERL ==============================================================
def count_lines_perl(path_to_file, options, lang):
"""
XXX REWRITE:
Count lines in a file where the sharp sign ('#') is the comment
marker. That is, we ignore blank lines, lines consisting solely of
spaces, and those starting with zero or more spaces followed by
a sharp sign.
XXX EXPAND THIS TO HANDLE POD BLOCKS, treated as multi-line comments.
"""
lines_so_far, sloc_so_far = (0, 0)
in_pod = False
in_for = False
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
if in_pod:
if line == '=cut':
in_pod = False
continue
if in_for:
if line == '=cut':
in_for = False
continue
if line == '=pod':
in_pod = True
continue
if line.startswith('=for comment'):
in_for = True
continue
# This could be made more efficient.
line = line.strip()
if line and (line[0] != '#'):
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# PHP ===============================================================
# PROTOBUF ==========================================================
def count_lines_protobuf(path, options, lang):
loc_, sloc_ = count_lines_java_style(path, options, lang)
return loc_, sloc_
# PYTHON ============================================================
def count_lines_python(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
if not path_to_file.endswith('_pb2.py'):
lines_so_far, sloc_so_far = _count_lines_python(
path_to_file, options, lang)
return lines_so_far, sloc_so_far
def _count_lines_python(path_to_file, options, lang):
in_triple_quote = False
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (lines is not None) and (hash_val is not None):
for line in lines:
if in_triple_quote:
# we always count this line
lines_so_far += 1
sloc_so_far += 1
count = line.count(TQUOTE)
if count % 2:
in_triple_quote = False
else:
lines_so_far += 1
sloc_ = line.partition('#')[0] # strip off comments
line = sloc_.strip() # strip leading & trailing
if line != '':
sloc_so_far += 1
count = line.count(TQUOTE)
if count % 2:
in_triple_quote = True
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return (lines_so_far, sloc_so_far)
# RE2C ==============================================================
def count_lines_re2c(path, options, lang):
loc_, sloc_ = count_lines_java_style(path, options, lang)
return loc_, sloc_
# R MARKDOWN ========================================================
def count_lines_r_markdown(path_to_file, options, lang):
"""
Count the lines of R in an RMarkdown file. Count lines in
(a) the YAML section at the top of the file, (b) chunks of R code
following the normal rules (we ignore blank lines and anything
following a sharp sign (#), and (c) wherever there is inline R
code, we count that line as source code.
To be counted, the YAML must begin at the very first line in the
file and must be delimited by "^---" lines.
This code only counts RMarkdown sections beginning with "```{r " and
ending with "```". That is, these must begin the line in each case.
Also, anything beginning with "`r " is counted as an line variable.
"""
# NOT YET AMENDED
lines_so_far, sloc_so_far = (0, 0)
in_code_chunk = False
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
in_yaml = False
for ndx, line in enumerate(lines):
lines_so_far += 1
line = line.strip()
# count YAML header if present ----------------------
if ndx == 0 and line.startswith('---'):
in_yaml = True
if in_yaml:
if line:
sloc_so_far += 1
if ndx and line.startswith('---'):
in_yaml = False
# already counted
continue
if in_code_chunk:
if line and (line[0] != '#'):
sloc_so_far += 1
if line.startswith('```'):
in_code_chunk = False
# already counted
else:
if line.startswith('```{r '):
sloc_so_far += 1
in_code_chunk = True
continue
if line.find('`r ') != -1:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# RUBY ==============================================================
def count_lines_ruby(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
if not path_to_file.endswith('.pb.rb'):
lines_so_far, sloc_so_far = count_lines_not_sharp(
path_to_file, options, lang)
return lines_so_far, sloc_so_far
# RUST ==============================================================
def count_lines_rust(path_to_file, options, lang):
"""
Count lines in a file where doubled forward slashes ('//') are the comment
marker. That is, we ignore blank lines, lines consisting solely of
spaces, and those starting with zero or more spaces followed by
doubled slashes. Documentation lines beginning with '///' are treated as
comments.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1 # this counts every line
# This could be made more efficient.
line = line.strip()
if line and not line.startswith('//'):
sloc_so_far += 1 # this counts source lines
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# SHELL =============================================================
def count_lines_shell(path, options, lang):
return count_lines_not_sharp(path, options, lang)
# SCALA =============================================================
def count_lines_scala(path_to_file, options, lang):
lines_so_far, sloc_so_far = (0, 0)
comment_depth = 0
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
code, comment_depth = uncomment_scala(line, comment_depth)
if code:
code = code.strip()
if code:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
if comment_depth > 0:
print("unclosed comment at end of %s" % path_to_file)
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return (lines_so_far, sloc_so_far)
def _find_scala_code(text, comment_depth):
"""
We are in a comment. Return a ref to the beginning of the text
outside the comment block (which may be '') and the value of commentDepth.
"""
start_multi = text.find('/*')
end_multi = text.find('*/')
text_back = ''
if start_multi == -1 and end_multi == -1:
return text_back, comment_depth
elif end_multi == -1 or (start_multi != -1 and start_multi < end_multi):
comment_depth = comment_depth + 1
if start_multi + 2 < len(text):
text_back = text[start_multi + 2:]
else:
comment_depth = comment_depth - 1
if end_multi + 2 < len(text):
text_back = text[end_multi + 2:]
return text_back, comment_depth
def _find_scala_comment(text, comment_depth):
"""
We are NOT at comment depth > 0. Return a ref to any code found, a
ref to the rest of the text, and the value of commentDepth
"""
multi_line = False
posn_old = text.find('/*') # multi-line comment
posn_new = text.find('//') # one-line comment
if posn_old == -1 and posn_new == -1:
return text, '', 0
if posn_new == -1:
posn = posn_old
comment_depth = True
multi_line = True
else:
# posnNew is non-negative
if posn_old == -1 or posn_old > posn_new:
posn = posn_new
comment_depth = 0
else:
posn = posn_old
comment_depth = 1
multi_line = True
if multi_line and (posn + 2 < len(text)):
return text[:posn], text[posn + 2:], comment_depth
return text[:posn], '', comment_depth
def uncomment_scala(text, comment_depth):
"""
Given a line of text, return a ref to any code found and the value of
commentDepth, which may have changed.
"""
code = ''
text = text.strip()
while text:
if comment_depth > 0:
text, comment_depth = _find_scala_code(text, comment_depth)
elif comment_depth == 0:
chunk, text, comment_depth = _find_scala_comment(
text.strip(), comment_depth)
code += chunk # XXX INEFFICIENT
else:
print("INTERNAL ERROR: negative comment depth %d" % comment_depth)
return code, comment_depth
# SEMICOLON =========================================================
def count_lines_not_semicolon(path_to_file, options, lang):
"""
Count lines in a file where the semicolon (';') is the comment
marker. That is, we ignore blank lines, lines consisting solely of
spaces, and those starting with zero or more spaces followed by
a semicolon.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
# This could be made more efficient.
line = line.strip()
if line and (line[0] != ';'):
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# SNOBOL ============================================================
def count_lines_snobol(path_to_file, options, lang):
"""
already is a set containing hashes of files already counted
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
line = line.rstrip()
if line and (line[0] != '*'):
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# TeX ===============================================================
def count_lines_tex(path_to_file, options, lang):
return count_lines_not_percent(path_to_file, options, lang)
# TXT ===============================================================
def count_lines_txt(path_to_file, options, lang):
"""
Count the lines in a text file. We ignore empty lines and lines
consisting solely of spaces.
"""
lines_so_far, sloc_so_far = (0, 0)
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
if (hash_val is not None) and (lines is not None):
for line in lines:
lines_so_far += 1
# This could be made more efficient.
line = line.strip()
if line:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, lines_so_far, sloc_so_far))
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return lines_so_far, sloc_so_far
# XML ===============================================================
def count_lines_xml(path_to_file, options, lang):
"""
Count the lines in an xml file. We ignore empty lines and lines
consisting solely of spaces, and of course we ignore xml comments.
"""
try:
lines, hash_val = check_whether_already_counted(path_to_file, options)
except Exception as exc:
print("error reading '%s', skipping: %s" % (path_to_file, exc))
return 0, 0
try:
line_count, sloc_so_far = (0, 0)
if (hash_val is not None) and (lines is not None):
line_count = len(lines)
raw = '\n'.join(lines)
soup = BeautifulSoup(raw, 'lxml')
comments = soup.findAll(
text=lambda text: isinstance(
text, Comment))
# XXX THE NEXT LINE HAS NO EFFECT ?
[comment.extract() for comment in comments]
# -------------------------------------------------------
# 2016:05:22 userguide.xml's soup begins with an XML decl line
# followed by
# <html><body><document>
# <header><title> ... </header>
# <p>
# </p>
# <section> ...
#
# and ends with
# <p>
# CryptoServer will continue to serve pages until you kill it.
# </p>
# </section>
# </document>
# </body></html>
# -------------------------------------------------------
# PREVIOUS UNDERSTANDING:
# soup begins with '<html><body><p>' and ends with
# </p></body></html> on a separate line.
# elm = soup.html.body.p
# drop leading <p> and trailing </p>
# stripped = str(elm)[3:-4]
# lines = stripped.split('\n')
elm = soup.html
lines = str(elm).split('\n')
for line in lines:
# This could be made more efficient.
line = line.strip()
if line:
sloc_so_far += 1
options.already.add(hash_val)
if options.verbose:
print("%-47s: %-6s %5d lines, %5d sloc" % (
path_to_file, lang, line_count, sloc_so_far))
except Exception as exc:
print("error parsing '%s', skipping: %s" % (path_to_file, exc))
return line_count, sloc_so_far
|
jddixon/pysloc
|
src/pysloc/__init__.py
|
Python
|
mit
| 71,178
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import mimetypes
from flexget import plugin
from flexget.event import event
log = logging.getLogger('nzb_size')
# a bit hacky, add nzb as a known mimetype
mimetypes.add_type('application/x-nzb', '.nzb')
class NzbSize(object):
"""
Provides entry size information when dealing with nzb files
"""
@plugin.priority(200)
def on_task_modify(self, task, config):
"""
The downloaded file is accessible in modify phase
"""
try:
from pynzb import nzb_parser
except ImportError:
# TODO: remove builtin status so this won't get repeated on every task execution
# TODO: this will get loaded even without any need for nzb
raise plugin.DependencyError(issued_by='nzb_size', missing='lib pynzb')
for entry in task.accepted:
if entry.get('mime-type') in ['text/nzb', 'application/x-nzb'] or \
entry.get('filename') and entry['filename'].endswith('.nzb'):
if 'file' not in entry:
log.warning('`%s` does not have a `file` that could be used to get size information' %
entry['title'])
continue
filename = entry['file']
log.debug('reading %s' % filename)
xmldata = open(filename).read()
try:
nzbfiles = nzb_parser.parse(xmldata)
except Exception:
log.debug('%s is not a valid nzb' % entry['title'])
continue
size = 0
for nzbfile in nzbfiles:
for segment in nzbfile.segments:
size += segment.bytes
size_mb = size / 1024 / 1024
log.debug('%s content size: %s MB' % (entry['title'], size_mb))
entry['content_size'] = size_mb
else:
log.trace('%s does not seem to be nzb' % entry['title'])
@event('plugin.register')
def register_plugin():
plugin.register(NzbSize, 'nzb_size', api_ver=2, builtin=True)
|
jawilson/Flexget
|
flexget/plugins/metainfo/nzb_size.py
|
Python
|
mit
| 2,284
|
from os import environ
from os.path import dirname, abspath, join
from django.urls import reverse_lazy
SITE_DIR = dirname(abspath(__file__))
# Security
SECRET_KEY = environ.get('SECRET_KEY', '')
DEBUG = True
ALLOWED_HOSTS = [] + environ.get('ALLOWED_HOSTS', '').split(',')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sitemaps',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# wagtail
'wagtail.core',
'wagtail.admin',
'wagtail.documents',
'wagtail.snippets',
'wagtail.users',
'wagtail.images',
'wagtail.embeds',
'wagtail.search',
'wagtail.contrib.redirects',
'wagtail.sites',
'wagtail.contrib.modeladmin',
'wagtail.contrib.postgres_search',
'wagtail.contrib.settings',
'wagtail.contrib.search_promotions',
'captcha',
'taggit',
# app specific
'wagtailstreamforms',
'example',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(SITE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': environ.get('RDS_HOSTNAME'),
'PORT': environ.get('RDS_PORT'),
'NAME': environ.get('RDS_DB_NAME'),
'USER': environ.get('RDS_USERNAME'),
'PASSWORD': environ.get('RDS_PASSWORD'),
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'Django <no_reply@example.com>'
# Authentication
AUTH_PASSWORD_VALIDATORS = []
LOGIN_URL = reverse_lazy('admin:login')
LOGIN_REDIRECT_URL = LOGOUT_REDIRECT_URL = '/'
# Internationalization
LANGUAGE_CODE = 'en'
LANGUAGES = [
('en', 'English'),
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATICFILES_DIRS = [
join(SITE_DIR, "static"),
]
STATIC_URL = "/static/"
MEDIA_ROOT = join(SITE_DIR, "media")
MEDIA_URL = "/media/"
# Wagtail
WAGTAIL_SITE_NAME = 'example.com'
# Forms
WAGTAILSTREAMFORMS_ADVANCED_SETTINGS_MODEL = 'example.AdvancedFormSetting'
# ReCAPTCHA
# developer keys
RECAPTCHA_PUBLIC_KEY = '6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI'
RECAPTCHA_PRIVATE_KEY = '6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe'
NOCAPTCHA = True
SILENCED_SYSTEM_CHECKS = ['captcha.recaptcha_test_key_error']
|
AccentDesign/wagtailstreamforms
|
example/settings.py
|
Python
|
mit
| 3,558
|
#! /usr/bin/env python
# -*- coding: latin-1 -*-
from collections import deque, defaultdict
import itertools
import time
import invariants
import pddl
import timers
class BalanceChecker(object):
def __init__(self, task, reachable_action_params):
self.predicates_to_add_actions = defaultdict(set)
self.action_name_to_heavy_action = {}
for act in task.actions:
action = self.add_inequality_preconds(act, reachable_action_params)
too_heavy_effects = []
create_heavy_act = False
heavy_act = action
for eff in action.effects:
too_heavy_effects.append(eff)
if eff.parameters: # universal effect
create_heavy_act = True
too_heavy_effects.append(eff.copy())
if not eff.literal.negated:
predicate = eff.literal.predicate
self.predicates_to_add_actions[predicate].add(action)
if create_heavy_act:
heavy_act = pddl.Action(action.name, action.parameters,
action.precondition, too_heavy_effects,
action.cost)
# heavy_act: duplicated universal effects and assigned unique names
# to all quantified variables (implicitly in constructor)
self.action_name_to_heavy_action[action.name] = heavy_act
def get_threats(self, predicate):
return self.predicates_to_add_actions.get(predicate, set())
def get_heavy_action(self, action_name):
return self.action_name_to_heavy_action[action_name]
def add_inequality_preconds(self, action, reachable_action_params):
if reachable_action_params is None or len(action.parameters) < 2:
return action
inequal_params = []
combs = itertools.combinations(list(range(len(action.parameters))), 2)
for pos1, pos2 in combs:
inequality = True
for params in reachable_action_params[action.name]:
if params[pos1] == params[pos2]:
inequality = False
break
if inequality:
inequal_params.append((pos1, pos2))
if inequal_params:
precond_parts = list(action.precondition.parts)
for pos1, pos2 in inequal_params:
param1 = action.parameters[pos1].name
param2 = action.parameters[pos2].name
new_cond = pddl.NegatedAtom("=", (param1, param2))
precond_parts.append(new_cond)
precond = action.precondition.change_parts(precond_parts)
return pddl.Action(action.name, action.parameters, precond,
action.effects, action.cost)
else:
return action
def get_fluents(task):
fluent_names = set()
for action in task.actions:
for eff in action.effects:
fluent_names.add(eff.literal.predicate)
return [pred for pred in task.predicates if pred.name in fluent_names]
def get_initial_invariants(task):
for predicate in get_fluents(task):
all_args = list(range(len(predicate.arguments)))
for omitted_arg in [-1] + all_args:
order = [i for i in all_args if i != omitted_arg]
part = invariants.InvariantPart(predicate.name, order, omitted_arg)
yield invariants.Invariant((part,))
# Input file might be grounded, beware of too many invariant candidates
MAX_CANDIDATES = 100000
MAX_TIME = 300
def find_invariants(task, reachable_action_params):
candidates = deque(get_initial_invariants(task))
print(len(candidates), "initial candidates")
seen_candidates = set(candidates)
balance_checker = BalanceChecker(task, reachable_action_params)
def enqueue_func(invariant):
if len(seen_candidates) < MAX_CANDIDATES and invariant not in seen_candidates:
candidates.append(invariant)
seen_candidates.add(invariant)
start_time = time.clock()
while candidates:
candidate = candidates.popleft()
if time.clock() - start_time > MAX_TIME:
print("Time limit reached, aborting invariant generation")
return
if candidate.check_balance(balance_checker, enqueue_func):
yield candidate
def useful_groups(invariants, initial_facts):
predicate_to_invariants = defaultdict(list)
for invariant in invariants:
for predicate in invariant.predicates:
predicate_to_invariants[predicate].append(invariant)
nonempty_groups = set()
overcrowded_groups = set()
for atom in initial_facts:
if isinstance(atom, pddl.Assign):
continue
for invariant in predicate_to_invariants.get(atom.predicate, ()):
group_key = (invariant, tuple(invariant.get_parameters(atom)))
if group_key not in nonempty_groups:
nonempty_groups.add(group_key)
else:
overcrowded_groups.add(group_key)
useful_groups = nonempty_groups - overcrowded_groups
for (invariant, parameters) in useful_groups:
yield [part.instantiate(parameters) for part in invariant.parts]
def get_groups(task, reachable_action_params=None):
with timers.timing("Finding invariants"):
invariants = list(find_invariants(task, reachable_action_params))
with timers.timing("Checking invariant weight"):
result = list(useful_groups(invariants, task.init))
return result
if __name__ == "__main__":
import pddl
print("Parsing...")
task = pddl.open()
print("Finding invariants...")
for invariant in find_invariants(task):
print(invariant)
print("Finding fact groups...")
groups = get_groups(task)
for group in groups:
print("[%s]" % ", ".join(map(str, group)))
|
rock-planning/planning-lama
|
lama/translate/invariant_finder.py
|
Python
|
gpl-3.0
| 5,900
|
__author__ = 'wei'
from push.igetui.template.igt_base_template import *
class IGtMessage:
def __init__(self):
self.isOffline = False
self.offlineExpireTime = 0
self.data = BaseTemplate()
class IGtSingleMessage(IGtMessage) :
def __init__(self):
IGtMessage.__init__(self)
class IGtListMessage(IGtMessage):
def __init__(self):
IGtMessage.__init__(self)
class IGtAppMessage(IGtMessage):
def __init__(self):
IGtMessage.__init__(self)
self.appIdList = []
self.phoneTypeList = []
self.provinceList = []
self.tagList = []
|
KasenJ/CommunityPython
|
code/push/igetui/igt_message.py
|
Python
|
gpl-2.0
| 614
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the sync stages."""
import contextlib
import datetime
import logging
import os
import sys
from xml.etree import ElementTree
from xml.dom import minidom
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import constants
from chromite.cbuildbot import lkgm_manager
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot import repository
from chromite.cbuildbot import tree_status
from chromite.cbuildbot import trybot_patch_pool
from chromite.cbuildbot import validation_pool
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import build_stages
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import gclient
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import patch as cros_patch
from chromite.scripts import cros_mark_chrome_as_stable
PRE_CQ = validation_pool.PRE_CQ
class PatchChangesStage(generic_stages.BuilderStage):
"""Stage that patches a set of Gerrit changes to the buildroot source tree."""
def __init__(self, builder_run, patch_pool, **kwargs):
"""Construct a PatchChangesStage.
Args:
builder_run: BuilderRun object.
patch_pool: A TrybotPatchPool object containing the different types of
patches to apply.
"""
super(PatchChangesStage, self).__init__(builder_run, **kwargs)
self.patch_pool = patch_pool
@staticmethod
def _CheckForDuplicatePatches(_series, changes):
conflicts = {}
duplicates = []
for change in changes:
if change.id is None:
cros_build_lib.Warning(
"Change %s lacks a usable ChangeId; duplicate checking cannot "
"be done for this change. If cherry-picking fails, this is a "
"potential cause.", change)
continue
conflicts.setdefault(change.id, []).append(change)
duplicates = [x for x in conflicts.itervalues() if len(x) > 1]
if not duplicates:
return changes
for conflict in duplicates:
cros_build_lib.Error(
"Changes %s conflict with each other- they have same id %s.",
', '.join(map(str, conflict)), conflict[0].id)
cros_build_lib.Die("Duplicate patches were encountered: %s", duplicates)
def _PatchSeriesFilter(self, series, changes):
return self._CheckForDuplicatePatches(series, changes)
def _ApplyPatchSeries(self, series, patch_pool, **kwargs):
"""Applies a patch pool using a patch series."""
kwargs.setdefault('frozen', False)
# Honor the given ordering, so that if a gerrit/remote patch
# conflicts w/ a local patch, the gerrit/remote patch are
# blamed rather than local (patch ordering is typically
# local, gerrit, then remote).
kwargs.setdefault('honor_ordering', True)
kwargs['changes_filter'] = self._PatchSeriesFilter
_applied, failed_tot, failed_inflight = series.Apply(
list(patch_pool), **kwargs)
failures = failed_tot + failed_inflight
if failures:
self.HandleApplyFailures(failures)
def HandleApplyFailures(self, failures):
cros_build_lib.Die("Failed applying patches: %s",
"\n".join(map(str, failures)))
def PerformStage(self):
class NoisyPatchSeries(validation_pool.PatchSeries):
"""Custom PatchSeries that adds links to buildbot logs for remote trys."""
def ApplyChange(self, change):
if isinstance(change, cros_patch.GerritPatch):
cros_build_lib.PrintBuildbotLink(str(change), change.url)
elif isinstance(change, cros_patch.UploadedLocalPatch):
cros_build_lib.PrintBuildbotStepText(str(change))
return validation_pool.PatchSeries.ApplyChange(self, change)
# If we're an external builder, ignore internal patches.
helper_pool = validation_pool.HelperPool.SimpleCreate(
cros_internal=self._run.config.internal, cros=True)
# Limit our resolution to non-manifest patches.
patch_series = NoisyPatchSeries(
self._build_root,
helper_pool=helper_pool,
deps_filter_fn=lambda p: not trybot_patch_pool.ManifestFilter(p))
self._ApplyPatchSeries(patch_series, self.patch_pool)
class BootstrapStage(PatchChangesStage):
"""Stage that patches a chromite repo and re-executes inside it.
Attributes:
returncode - the returncode of the cbuildbot re-execution. Valid after
calling stage.Run().
"""
option_name = 'bootstrap'
def __init__(self, builder_run, chromite_patch_pool,
manifest_patch_pool=None, **kwargs):
super(BootstrapStage, self).__init__(
builder_run, trybot_patch_pool.TrybotPatchPool(), **kwargs)
self.chromite_patch_pool = chromite_patch_pool
self.manifest_patch_pool = manifest_patch_pool
self.returncode = None
def _ApplyManifestPatches(self, patch_pool):
"""Apply a pool of manifest patches to a temp manifest checkout.
Args:
patch_pool: The pool to apply.
Returns:
The path to the patched manifest checkout.
Raises:
Exception, if the new patched manifest cannot be parsed.
"""
checkout_dir = os.path.join(self.tempdir, 'manfest-checkout')
repository.CloneGitRepo(checkout_dir,
self._run.config.manifest_repo_url)
patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(
checkout_dir, tracking_branch=self._run.manifest_branch)
self._ApplyPatchSeries(patch_series, patch_pool)
# Create the branch that 'repo init -b <target_branch> -u <patched_repo>'
# will look for.
cmd = ['branch', '-f', self._run.manifest_branch,
constants.PATCH_BRANCH]
git.RunGit(checkout_dir, cmd)
# Verify that the patched manifest loads properly. Propagate any errors as
# exceptions.
manifest = os.path.join(checkout_dir, self._run.config.manifest)
git.Manifest.Cached(manifest, manifest_include_dir=checkout_dir)
return checkout_dir
@staticmethod
def _FilterArgsForApi(parsed_args, api_minor):
"""Remove arguments that are introduced after an api version."""
def filter_fn(passed_arg):
return passed_arg.opt_inst.api_version <= api_minor
accepted, removed = commandline.FilteringParser.FilterArgs(
parsed_args, filter_fn)
if removed:
cros_build_lib.Warning('The following arguments were removed due to api: '
"'%s'" % ' '.join(removed))
return accepted
@classmethod
def FilterArgsForTargetCbuildbot(cls, buildroot, cbuildbot_path, options):
_, minor = cros_build_lib.GetTargetChromiteApiVersion(buildroot)
args = [cbuildbot_path]
args.extend(options.build_targets)
args.extend(cls._FilterArgsForApi(options.parsed_args, minor))
# Only pass down --cache-dir if it was specified. By default, we want
# the cache dir to live in the root of each checkout, so this means that
# each instance of cbuildbot needs to calculate the default separately.
if minor >= 2 and options.cache_dir_specified:
args += ['--cache-dir', options.cache_dir]
return args
def HandleApplyFailures(self, failures):
"""Handle the case where patches fail to apply."""
if self._run.options.pre_cq or self._run.config.pre_cq:
# Let the PreCQSync stage handle this failure. The PreCQSync stage will
# comment on CLs with the appropriate message when they fail to apply.
#
# WARNING: For manifest patches, the Pre-CQ attempts to apply external
# patches to the internal manifest, and this means we may flag a conflict
# here even if the patch applies cleanly. TODO(davidjames): Fix this.
cros_build_lib.PrintBuildbotStepWarnings()
cros_build_lib.Error('Failed applying patches: %s',
'\n'.join(map(str, failures)))
else:
PatchChangesStage.HandleApplyFailures(self, failures)
#pylint: disable=E1101
@osutils.TempDirDecorator
def PerformStage(self):
# The plan for the builders is to use master branch to bootstrap other
# branches. Now, if we wanted to test patches for both the bootstrap code
# (on master) and the branched chromite (say, R20), we need to filter the
# patches by branch.
filter_branch = self._run.manifest_branch
if self._run.options.test_bootstrap:
filter_branch = 'master'
chromite_dir = os.path.join(self.tempdir, 'chromite')
reference_repo = os.path.join(constants.SOURCE_ROOT, 'chromite', '.git')
repository.CloneGitRepo(chromite_dir, constants.CHROMITE_URL,
reference=reference_repo)
git.RunGit(chromite_dir, ['checkout', filter_branch])
def BranchAndChromiteFilter(patch):
return (trybot_patch_pool.BranchFilter(filter_branch, patch) and
trybot_patch_pool.ChromiteFilter(patch))
patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(
chromite_dir, filter_branch,
deps_filter_fn=BranchAndChromiteFilter)
filtered_pool = self.chromite_patch_pool.FilterBranch(filter_branch)
if filtered_pool:
self._ApplyPatchSeries(patch_series, filtered_pool)
cbuildbot_path = constants.PATH_TO_CBUILDBOT
if not os.path.exists(os.path.join(self.tempdir, cbuildbot_path)):
cbuildbot_path = 'chromite/cbuildbot/cbuildbot'
# pylint: disable=W0212
cmd = self.FilterArgsForTargetCbuildbot(self.tempdir, cbuildbot_path,
self._run.options)
extra_params = ['--sourceroot=%s' % self._run.options.sourceroot]
extra_params.extend(self._run.options.bootstrap_args)
if self._run.options.test_bootstrap:
# We don't want re-executed instance to see this.
cmd = [a for a in cmd if a != '--test-bootstrap']
else:
# If we've already done the desired number of bootstraps, disable
# bootstrapping for the next execution. Also pass in the patched manifest
# repository.
extra_params.append('--nobootstrap')
if self.manifest_patch_pool:
manifest_dir = self._ApplyManifestPatches(self.manifest_patch_pool)
extra_params.extend(['--manifest-repo-url', manifest_dir])
cmd += extra_params
result_obj = cros_build_lib.RunCommand(
cmd, cwd=self.tempdir, kill_timeout=30, error_code_ok=True)
self.returncode = result_obj.returncode
class SyncStage(generic_stages.BuilderStage):
"""Stage that performs syncing for the builder."""
option_name = 'sync'
output_manifest_sha1 = True
def __init__(self, builder_run, **kwargs):
super(SyncStage, self).__init__(builder_run, **kwargs)
self.repo = None
self.skip_sync = False
# TODO(mtennant): Why keep a duplicate copy of this config value
# at self.internal when it can always be retrieved from config?
self.internal = self._run.config.internal
def _GetManifestVersionsRepoUrl(self, read_only=False):
return cbuildbot_config.GetManifestVersionsRepoUrl(
self.internal,
read_only=read_only)
def Initialize(self):
self._InitializeRepo()
def _InitializeRepo(self):
"""Set up the RepoRepository object."""
self.repo = self.GetRepoRepository()
def GetNextManifest(self):
"""Returns the manifest to use."""
return self._run.config.manifest
def ManifestCheckout(self, next_manifest):
"""Checks out the repository to the given manifest."""
self._Print('\n'.join(['BUILDROOT: %s' % self.repo.directory,
'TRACKING BRANCH: %s' % self.repo.branch,
'NEXT MANIFEST: %s' % next_manifest]))
if not self.skip_sync:
self.repo.Sync(next_manifest)
print >> sys.stderr, self.repo.ExportManifest(
mark_revision=self.output_manifest_sha1)
def RunPrePatchBuild(self):
"""Run through a pre-patch build to prepare for incremental build.
This function runs though the InitSDKStage, SetupBoardStage, and
BuildPackagesStage. It is intended to be called before applying
any patches under test, to prepare the chroot and sysroot in a state
corresponding to ToT prior to an incremental build.
Returns:
True if all stages were successful, False if any of them failed.
"""
suffix = ' (pre-Patch)'
try:
build_stages.InitSDKStage(
self._run, chroot_replace=True, suffix=suffix).Run()
for builder_run in self._run.GetUngroupedBuilderRuns():
for board in builder_run.config.boards:
build_stages.SetupBoardStage(
builder_run, board=board, suffix=suffix).Run()
build_stages.BuildPackagesStage(
builder_run, board=board, suffix=suffix).Run()
except failures_lib.StepFailure:
return False
return True
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
self.Initialize()
with osutils.TempDir() as tempdir:
# Save off the last manifest.
fresh_sync = True
if os.path.exists(self.repo.directory) and not self._run.options.clobber:
old_filename = os.path.join(tempdir, 'old.xml')
try:
old_contents = self.repo.ExportManifest()
except cros_build_lib.RunCommandError as e:
cros_build_lib.Warning(str(e))
else:
osutils.WriteFile(old_filename, old_contents)
fresh_sync = False
# Sync.
self.ManifestCheckout(self.GetNextManifest())
# Print the blamelist.
if fresh_sync:
cros_build_lib.PrintBuildbotStepText('(From scratch)')
elif self._run.options.buildbot:
lkgm_manager.GenerateBlameList(self.repo, old_filename)
# Incremental builds request an additional build before patching changes.
if self._run.config.build_before_patching:
pre_build_passed = self.RunPrePatchBuild()
if not pre_build_passed:
cros_build_lib.PrintBuildbotStepText('Pre-patch build failed.')
class LKGMSyncStage(SyncStage):
"""Stage that syncs to the last known good manifest blessed by builders."""
output_manifest_sha1 = False
def GetNextManifest(self):
"""Override: Gets the LKGM."""
# TODO(sosa): Should really use an initialized manager here.
if self.internal:
mv_dir = 'manifest-versions-internal'
else:
mv_dir = 'manifest-versions'
manifest_path = os.path.join(self._build_root, mv_dir)
manifest_repo = self._GetManifestVersionsRepoUrl(read_only=True)
manifest_version.RefreshManifestCheckout(manifest_path, manifest_repo)
return os.path.join(manifest_path, lkgm_manager.LKGMManager.LKGM_PATH)
class ManifestVersionedSyncStage(SyncStage):
"""Stage that generates a unique manifest file, and sync's to it."""
# TODO(mtennant): Make this into a builder run value.
output_manifest_sha1 = False
def __init__(self, builder_run, **kwargs):
# Perform the sync at the end of the stage to the given manifest.
super(ManifestVersionedSyncStage, self).__init__(builder_run, **kwargs)
self.repo = None
self.manifest_manager = None
# If a builder pushes changes (even with dryrun mode), we need a writable
# repository. Otherwise, the push will be rejected by the server.
self.manifest_repo = self._GetManifestVersionsRepoUrl(read_only=False)
# 1. If we're uprevving Chrome, Chrome might have changed even if the
# manifest has not, so we should force a build to double check. This
# means that we'll create a new manifest, even if there are no changes.
# 2. If we're running with --debug, we should always run through to
# completion, so as to ensure a complete test.
self._force = self._chrome_rev or self._run.options.debug
def HandleSkip(self):
"""Initializes a manifest manager to the specified version if skipped."""
super(ManifestVersionedSyncStage, self).HandleSkip()
if self._run.options.force_version:
self.Initialize()
self.ForceVersion(self._run.options.force_version)
def ForceVersion(self, version):
"""Creates a manifest manager from given version and returns manifest."""
cros_build_lib.PrintBuildbotStepText(version)
return self.manifest_manager.BootstrapFromVersion(version)
def VersionIncrementType(self):
"""Return which part of the version number should be incremented."""
if self._run.manifest_branch == 'master':
return 'build'
return 'branch'
def RegisterManifestManager(self, manifest_manager):
"""Save the given manifest manager for later use in this run.
Args:
manifest_manager: Expected to be a BuildSpecsManager.
"""
self._run.attrs.manifest_manager = self.manifest_manager = manifest_manager
def Initialize(self):
"""Initializes a manager that manages manifests for associated stages."""
dry_run = self._run.options.debug
self._InitializeRepo()
# If chrome_rev is somehow set, fail.
assert not self._chrome_rev, \
'chrome_rev is unsupported on release builders.'
self.RegisterManifestManager(manifest_version.BuildSpecsManager(
source_repo=self.repo,
manifest_repo=self.manifest_repo,
manifest=self._run.config.manifest,
build_names=self._run.GetBuilderIds(),
incr_type=self.VersionIncrementType(),
force=self._force,
branch=self._run.manifest_branch,
dry_run=dry_run,
master=self._run.config.master))
def _SetChromeVersionIfApplicable(self, manifest):
"""If 'chrome' is in |manifest|, write the version to the BuilderRun object.
Args:
manifest: Path to the manifest.
"""
manifest_dom = minidom.parse(manifest)
elements = manifest_dom.getElementsByTagName(lkgm_manager.CHROME_ELEMENT)
if elements:
chrome_version = elements[0].getAttribute(
lkgm_manager.CHROME_VERSION_ATTR)
logging.info(
'Chrome version was found in the manifest: %s', chrome_version)
# Update the metadata dictionary. This is necessary because the
# metadata dictionary is preserved through re-executions, so
# SyncChromeStage can read the version from the dictionary
# later. This is easier than parsing the manifest again after
# the re-execution.
self._run.attrs.metadata.UpdateKeyDictWithDict(
'version', {'chrome': chrome_version})
def GetNextManifest(self):
"""Uses the initialized manifest manager to get the next manifest."""
assert self.manifest_manager, \
'Must run GetStageManager before checkout out build.'
build_id = self._run.attrs.metadata.GetDict().get('build_id')
to_return = self.manifest_manager.GetNextBuildSpec(
dashboard_url=self.ConstructDashboardURL(),
build_id=build_id)
previous_version = self.manifest_manager.GetLatestPassingSpec()
target_version = self.manifest_manager.current_version
# Print the Blamelist here.
url_prefix = 'http://chromeos-images.corp.google.com/diff/report?'
url = url_prefix + 'from=%s&to=%s' % (previous_version, target_version)
cros_build_lib.PrintBuildbotLink('Blamelist', url)
# The testManifestVersionedSyncOnePartBranch interacts badly with this
# function. It doesn't fully initialize self.manifest_manager which
# causes target_version to be None. Since there isn't a clean fix in
# either direction, just throw this through str(). In the normal case,
# it's already a string anyways.
cros_build_lib.PrintBuildbotStepText(str(target_version))
return to_return
@contextlib.contextmanager
def LocalizeManifest(self, manifest, filter_cros=False):
"""Remove restricted checkouts from the manifest if needed.
Args:
manifest: The manifest to localize.
filter_cros: If set, then only checkouts with a remote of 'cros' or
'cros-internal' are kept, and the rest are filtered out.
"""
if filter_cros:
with osutils.TempDir() as tempdir:
filtered_manifest = os.path.join(tempdir, 'filtered.xml')
doc = ElementTree.parse(manifest)
root = doc.getroot()
for node in root.findall('project'):
remote = node.attrib.get('remote')
if remote and remote not in constants.GIT_REMOTES:
root.remove(node)
doc.write(filtered_manifest)
yield filtered_manifest
else:
yield manifest
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
self.Initialize()
if self._run.options.force_version:
next_manifest = self.ForceVersion(self._run.options.force_version)
else:
next_manifest = self.GetNextManifest()
if not next_manifest:
cros_build_lib.Info('Found no work to do.')
if self._run.attrs.manifest_manager.DidLastBuildFail():
raise failures_lib.StepFailure('The previous build failed.')
else:
sys.exit(0)
# Log this early on for the release team to grep out before we finish.
if self.manifest_manager:
self._Print('\nRELEASETAG: %s\n' % (
self.manifest_manager.current_version))
self._SetChromeVersionIfApplicable(next_manifest)
# To keep local trybots working, remove restricted checkouts from the
# official manifest we get from manifest-versions.
with self.LocalizeManifest(
next_manifest, filter_cros=self._run.options.local) as new_manifest:
self.ManifestCheckout(new_manifest)
class MasterSlaveLKGMSyncStage(ManifestVersionedSyncStage):
"""Stage that generates a unique manifest file candidate, and sync's to it.
This stage uses an LKGM manifest manager that handles LKGM
candidates and their states.
"""
# Timeout for waiting on the latest candidate manifest.
LATEST_CANDIDATE_TIMEOUT_SECONDS = 20 * 60
# TODO(mtennant): Turn this into self._run.attrs.sub_manager or similar.
# An instance of lkgm_manager.LKGMManager for slave builds.
sub_manager = None
def __init__(self, builder_run, **kwargs):
super(MasterSlaveLKGMSyncStage, self).__init__(builder_run, **kwargs)
# lkgm_manager deals with making sure we're synced to whatever manifest
# we get back in GetNextManifest so syncing again is redundant.
self.skip_sync = True
self._chrome_version = None
def _GetInitializedManager(self, internal):
"""Returns an initialized lkgm manager.
Args:
internal: Boolean. True if this is using an internal manifest.
Returns:
lkgm_manager.LKGMManager.
"""
increment = self.VersionIncrementType()
return lkgm_manager.LKGMManager(
source_repo=self.repo,
manifest_repo=cbuildbot_config.GetManifestVersionsRepoUrl(
internal, read_only=False),
manifest=self._run.config.manifest,
build_names=self._run.GetBuilderIds(),
build_type=self._run.config.build_type,
incr_type=increment,
force=self._force,
branch=self._run.manifest_branch,
dry_run=self._run.options.debug,
master=self._run.config.master)
def Initialize(self):
"""Override: Creates an LKGMManager rather than a ManifestManager."""
self._InitializeRepo()
self.RegisterManifestManager(self._GetInitializedManager(self.internal))
if (self._run.config.master and self._GetSlaveConfigs()):
assert self.internal, 'Unified masters must use an internal checkout.'
MasterSlaveLKGMSyncStage.sub_manager = self._GetInitializedManager(False)
def ForceVersion(self, version):
manifest = super(MasterSlaveLKGMSyncStage, self).ForceVersion(version)
if MasterSlaveLKGMSyncStage.sub_manager:
MasterSlaveLKGMSyncStage.sub_manager.BootstrapFromVersion(version)
return manifest
def GetNextManifest(self):
"""Gets the next manifest using LKGM logic."""
assert self.manifest_manager, \
'Must run Initialize before we can get a manifest.'
assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \
'Manifest manager instantiated with wrong class.'
if self._run.config.master:
build_id = self._run.attrs.metadata.GetDict().get('build_id')
manifest = self.manifest_manager.CreateNewCandidate(
chrome_version=self._chrome_version,
build_id=build_id)
if MasterSlaveLKGMSyncStage.sub_manager:
MasterSlaveLKGMSyncStage.sub_manager.CreateFromManifest(
manifest, dashboard_url=self.ConstructDashboardURL())
return manifest
else:
return self.manifest_manager.GetLatestCandidate(
dashboard_url=self.ConstructDashboardURL(),
timeout=self.LATEST_CANDIDATE_TIMEOUT_SECONDS)
def GetLatestChromeVersion(self):
"""Returns the version of Chrome to uprev."""
return cros_mark_chrome_as_stable.GetLatestRelease(gclient.GetBaseURLs()[0])
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
"""Performs the stage."""
if (self._chrome_rev == constants.CHROME_REV_LATEST and
self._run.config.master):
# PFQ master needs to determine what version of Chrome to build
# for all slaves.
self._chrome_version = self.GetLatestChromeVersion()
ManifestVersionedSyncStage.PerformStage(self)
class CommitQueueSyncStage(MasterSlaveLKGMSyncStage):
"""Commit Queue Sync stage that handles syncing and applying patches.
Similar to the MasterSlaveLKGMsync Stage, this stage handles syncing
to a manifest, passing around that manifest to other builders.
What makes this stage different is that the CQ master finds the
patches on Gerrit which are ready to be committed, apply them, and
includes the pathces in the new manifest. The slaves sync to the
manifest, and apply the paches written in the manifest.
"""
def __init__(self, builder_run, **kwargs):
super(CommitQueueSyncStage, self).__init__(builder_run, **kwargs)
# Figure out the builder's name from the buildbot waterfall.
builder_name = self._run.config.paladin_builder_name
self.builder_name = builder_name if builder_name else self._run.config.name
# The pool of patches to be picked up by the commit queue.
# - For the master commit queue, it's initialized in GetNextManifest.
# - For slave commit queues, it's initialized in _SetPoolFromManifest.
#
# In all cases, the pool is saved to disk.
self.pool = None
def HandleSkip(self):
"""Handles skip and initializes validation pool from manifest."""
super(CommitQueueSyncStage, self).HandleSkip()
filename = self._run.options.validation_pool
if filename:
self.pool = validation_pool.ValidationPool.Load(filename,
metadata=self._run.attrs.metadata, record_patches=False)
else:
self._SetPoolFromManifest(self.manifest_manager.GetLocalManifest())
def _ChangeFilter(self, pool, changes, non_manifest_changes):
# First, look for changes that were tested by the Pre-CQ.
changes_to_test = []
for change in changes:
status = pool.GetCLStatus(PRE_CQ, change)
if status == manifest_version.BuilderStatus.STATUS_PASSED:
changes_to_test.append(change)
# If we only see changes that weren't verified by Pre-CQ, try all of the
# changes. This ensures that the CQ continues to work even if the Pre-CQ is
# down.
if not changes_to_test:
changes_to_test = changes
return changes_to_test, non_manifest_changes
def _SetPoolFromManifest(self, manifest):
"""Sets validation pool based on manifest path passed in."""
# Note that GetNextManifest() calls GetLatestCandidate() in this case,
# so the repo will already be sync'd appropriately. This means that
# AcquirePoolFromManifest does not need to sync.
self.pool = validation_pool.ValidationPool.AcquirePoolFromManifest(
manifest, self._run.config.overlays, self.repo,
self._run.buildnumber, self.builder_name,
self._run.config.master, self._run.options.debug,
metadata=self._run.attrs.metadata)
def GetNextManifest(self):
"""Gets the next manifest using LKGM logic."""
assert self.manifest_manager, \
'Must run Initialize before we can get a manifest.'
assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \
'Manifest manager instantiated with wrong class.'
build_id = self._run.attrs.metadata.GetDict().get('build_id')
if self._run.config.master:
try:
# In order to acquire a pool, we need an initialized buildroot.
if not git.FindRepoDir(self.repo.directory):
self.repo.Initialize()
self.pool = pool = validation_pool.ValidationPool.AcquirePool(
self._run.config.overlays, self.repo,
self._run.buildnumber, self.builder_name,
self._run.options.debug,
check_tree_open=not self._run.options.debug or
self._run.options.mock_tree_status,
changes_query=self._run.options.cq_gerrit_override,
change_filter=self._ChangeFilter, throttled_ok=True,
metadata=self._run.attrs.metadata)
except validation_pool.TreeIsClosedException as e:
cros_build_lib.Warning(str(e))
return None
manifest = self.manifest_manager.CreateNewCandidate(validation_pool=pool,
build_id=build_id)
if MasterSlaveLKGMSyncStage.sub_manager:
MasterSlaveLKGMSyncStage.sub_manager.CreateFromManifest(
manifest, dashboard_url=self.ConstructDashboardURL(),
build_id=build_id)
return manifest
else:
manifest = self.manifest_manager.GetLatestCandidate(
dashboard_url=self.ConstructDashboardURL())
if manifest:
if self._run.config.build_before_patching:
pre_build_passed = self.RunPrePatchBuild()
cros_build_lib.PrintBuildbotStepName(
'CommitQueueSync : Apply Patches')
if not pre_build_passed:
cros_build_lib.PrintBuildbotStepText('Pre-patch build failed.')
self._SetPoolFromManifest(manifest)
self.pool.ApplyPoolIntoRepo()
return manifest
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
"""Performs normal stage and prints blamelist at end."""
if self._run.options.force_version:
self.HandleSkip()
else:
ManifestVersionedSyncStage.PerformStage(self)
class PreCQSyncStage(SyncStage):
"""Sync and apply patches to test if they compile."""
def __init__(self, builder_run, patches, **kwargs):
super(PreCQSyncStage, self).__init__(builder_run, **kwargs)
# The list of patches to test.
self.patches = patches
# The ValidationPool of patches to test. Initialized in PerformStage, and
# refreshed after bootstrapping by HandleSkip.
self.pool = None
def HandleSkip(self):
"""Handles skip and loads validation pool from disk."""
super(PreCQSyncStage, self).HandleSkip()
filename = self._run.options.validation_pool
if filename:
self.pool = validation_pool.ValidationPool.Load(filename,
metadata=self._run.attrs.metadata)
def PerformStage(self):
super(PreCQSyncStage, self).PerformStage()
self.pool = validation_pool.ValidationPool.AcquirePreCQPool(
self._run.config.overlays, self._build_root,
self._run.buildnumber, self._run.config.name,
dryrun=self._run.options.debug_forced, changes=self.patches,
metadata=self._run.attrs.metadata)
self.pool.ApplyPoolIntoRepo()
if len(self.pool.changes) == 0:
cros_build_lib.Die('No changes have been applied.')
class PreCQLauncherStage(SyncStage):
"""Scans for CLs and automatically launches Pre-CQ jobs to test them."""
STATUS_INFLIGHT = validation_pool.ValidationPool.STATUS_INFLIGHT
STATUS_PASSED = validation_pool.ValidationPool.STATUS_PASSED
STATUS_FAILED = validation_pool.ValidationPool.STATUS_FAILED
STATUS_LAUNCHING = validation_pool.ValidationPool.STATUS_LAUNCHING
STATUS_WAITING = validation_pool.ValidationPool.STATUS_WAITING
# The number of minutes we allow before considering a launch attempt failed.
# If this window isn't hit in a given launcher run, the window will start
# again from scratch in the next run.
LAUNCH_DELAY = 30
# The number of minutes we allow before considering an in-flight
# job failed. If this window isn't hit in a given launcher run, the window
# will start again from scratch in the next run.
INFLIGHT_DELAY = 120
# The maximum number of patches we will allow in a given trybot run. This is
# needed because our trybot infrastructure can only handle so many patches at
# once.
MAX_PATCHES_PER_TRYBOT_RUN = 50
def __init__(self, builder_run, **kwargs):
super(PreCQLauncherStage, self).__init__(builder_run, **kwargs)
self.skip_sync = True
# Mapping from launching changes to the first known time when they
# were launching.
self.launching = {}
# Mapping from inflight changes to the first known time when they
# were inflight.
self.inflight = {}
self.retried = set()
def _HasLaunchTimedOut(self, change):
"""Check whether a given |change| has timed out on its trybot launch.
Assumes that the change is in the middle of being launched.
Returns:
True if the change has timed out. False otherwise.
"""
diff = datetime.timedelta(minutes=self.LAUNCH_DELAY)
return datetime.datetime.now() - self.launching[change] > diff
def _HasInflightTimedOut(self, change):
"""Check whether a given |change| has timed out while trybot inflight.
Assumes that the change's trybot is inflight.
Returns:
True if the change has timed out. False otherwise.
"""
diff = datetime.timedelta(minutes=self.INFLIGHT_DELAY)
return datetime.datetime.now() - self.inflight[change] > diff
@staticmethod
def _PrintPatchStatus(patch, status):
"""Print a link to |patch| with |status| info."""
items = (
status,
os.path.basename(patch.project),
str(patch),
)
cros_build_lib.PrintBuildbotLink(' | '.join(items), patch.url)
def GetPreCQStatus(self, pool, changes):
"""Get the Pre-CQ status of a list of changes.
Side effect: reject or retry changes that have timed out.
Args:
pool: The validation pool.
changes: Changes to examine.
Returns:
busy: The set of CLs that are currently being tested.
passed: The set of CLs that have been verified.
"""
busy, passed = set(), set()
for change in changes:
status = pool.GetCLStatus(PRE_CQ, change)
if status != self.STATUS_LAUNCHING:
# The trybot is not launching, so we should remove it from our
# launching timeout map.
self.launching.pop(change, None)
if status != self.STATUS_INFLIGHT:
# The trybot is not inflight, so we should remove it from our
# inflight timeout map.
self.inflight.pop(change, None)
if status == self.STATUS_LAUNCHING:
# The trybot is in the process of launching.
busy.add(change)
if change not in self.launching:
# Record the launch time of changes.
self.launching[change] = datetime.datetime.now()
elif self._HasLaunchTimedOut(change):
if change in self.retried:
msg = ('We were not able to launch a pre-cq trybot for your change.'
'\n\n'
'This problem can happen if the trybot waterfall is very '
'busy, or if there is an infrastructure issue. Please '
'notify the sheriff and mark your change as ready again. If '
'this problem occurs multiple times in a row, please file a '
'bug.')
pool.SendNotification(change, '%(details)s', details=msg)
pool.RemoveCommitReady(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_FAILED,
self._run.options.debug)
self.retried.discard(change)
else:
# Try the change again.
self.retried.add(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_WAITING,
self._run.options.debug)
elif status == self.STATUS_INFLIGHT:
# Once a Pre-CQ run actually starts, it'll set the status to
# STATUS_INFLIGHT.
busy.add(change)
if change not in self.inflight:
# Record the inflight start time.
self.inflight[change] = datetime.datetime.now()
elif self._HasInflightTimedOut(change):
msg = ('The pre-cq trybot for your change timed out after %s minutes.'
'\n\n'
'This problem can happen if your change causes the builder '
'to hang, or if there is some infrastructure issue. If your '
'change is not at fault you may mark your change as ready '
'again. If this problem occurs multiple times please notify '
'the sheriff and file a bug.' % self.INFLIGHT_DELAY)
pool.SendNotification(change, '%(details)s', details=msg)
pool.RemoveCommitReady(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_FAILED,
self._run.options.debug)
elif status == self.STATUS_FAILED:
# The Pre-CQ run failed for this change. It's possible that we got
# unlucky and this change was just marked as 'Not Ready' by a bot. To
# test this, mark the CL as 'waiting' for now. If the CL is still marked
# as 'Ready' next time we check, we'll know the CL is truly still ready.
busy.add(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_WAITING,
self._run.options.debug)
self._PrintPatchStatus(change, 'failed')
elif status == self.STATUS_PASSED:
passed.add(change)
self._PrintPatchStatus(change, 'passed')
return busy, passed
def LaunchTrybot(self, pool, plan):
"""Launch a Pre-CQ run with the provided list of CLs.
Args:
pool: ValidationPool corresponding to |plan|.
plan: The list of patches to test in the Pre-CQ run.
"""
cmd = ['cbuildbot', '--remote', constants.PRE_CQ_BUILDER_NAME]
if self._run.options.debug:
cmd.append('--debug')
for patch in plan:
cmd += ['-g', cros_patch.AddPrefix(patch, patch.gerrit_number)]
self._PrintPatchStatus(patch, 'testing')
cros_build_lib.RunCommand(cmd, cwd=self._build_root)
for patch in plan:
if pool.GetCLStatus(PRE_CQ, patch) != self.STATUS_PASSED:
pool.UpdateCLStatus(PRE_CQ, patch, self.STATUS_LAUNCHING,
self._run.options.debug)
def GetDisjointTransactionsToTest(self, pool, changes):
"""Get the list of disjoint transactions to test.
Side effect: reject or retry changes that have timed out.
Returns:
A list of disjoint transactions to test. Each transaction should be sent
to a different Pre-CQ trybot.
"""
busy, passed = self.GetPreCQStatus(pool, changes)
# Create a list of disjoint transactions to test.
manifest = git.ManifestCheckout.Cached(self._build_root)
plans = pool.CreateDisjointTransactions(
manifest, max_txn_length=self.MAX_PATCHES_PER_TRYBOT_RUN)
for plan in plans:
# If any of the CLs in the plan are currently "busy" being tested,
# wait until they're done before launching our trybot run. This helps
# avoid race conditions.
#
# Similarly, if all of the CLs in the plan have already been validated,
# there's no need to launch a trybot run.
plan = set(plan)
if plan.issubset(passed):
logging.info('CLs already verified: %r', ' '.join(map(str, plan)))
elif plan.intersection(busy):
logging.info('CLs currently being verified: %r',
' '.join(map(str, plan.intersection(busy))))
if plan.difference(busy):
logging.info('CLs waiting on verification of dependencies: %r',
' '.join(map(str, plan.difference(busy))))
else:
yield plan
def ProcessChanges(self, pool, changes, _non_manifest_changes):
"""Process a list of changes that were marked as Ready.
From our list of changes that were marked as Ready, we create a
list of disjoint transactions and send each one to a separate Pre-CQ
trybot.
Non-manifest changes are just submitted here because they don't need to be
verified by either the Pre-CQ or CQ.
"""
# Submit non-manifest changes if we can.
if tree_status.IsTreeOpen():
pool.SubmitNonManifestChanges(check_tree_open=False)
# Launch trybots for manifest changes.
for plan in self.GetDisjointTransactionsToTest(pool, changes):
self.LaunchTrybot(pool, plan)
# Tell ValidationPool to keep waiting for more changes until we hit
# its internal timeout.
return [], []
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
# Setup and initialize the repo.
super(PreCQLauncherStage, self).PerformStage()
# Loop through all of the changes until we hit a timeout.
validation_pool.ValidationPool.AcquirePool(
self._run.config.overlays, self.repo,
self._run.buildnumber,
constants.PRE_CQ_LAUNCHER_NAME,
dryrun=self._run.options.debug,
changes_query=self._run.options.cq_gerrit_override,
check_tree_open=False, change_filter=self.ProcessChanges,
metadata=self._run.attrs.metadata)
|
bpsinc-native/src_third_party_chromite
|
cbuildbot/stages/sync_stages.py
|
Python
|
bsd-3-clause
| 41,804
|
#!/usr/bin/env python
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sys
import autoProcessTV
if len(sys.argv) < 4:
print "No folder supplied - is this being called from HellaVCR?"
sys.exit()
else:
autoProcessTV.processEpisode(sys.argv[3], sys.argv[2])
|
Branlala/docker-sickbeardfr
|
sickbeard/autoProcessTV/hellaToSickBeard.py
|
Python
|
mit
| 979
|
"""The test for the min/max sensor platform."""
import unittest
from homeassistant.bootstrap import setup_component
from homeassistant.const import (
STATE_UNKNOWN, ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS, TEMP_FAHRENHEIT)
from tests.common import get_test_home_assistant
class TestMinMaxSensor(unittest.TestCase):
"""Test the min/max sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.values = [17, 20, 15.2]
self.count = len(self.values)
self.min = min(self.values)
self.max = max(self.values)
self.mean = round(sum(self.values) / self.count, 2)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_min_sensor(self):
"""Test the min sensor."""
config = {
'sensor': {
'platform': 'min_max',
'name': 'test',
'type': 'min',
'entity_ids': [
'sensor.test_1',
'sensor.test_2',
'sensor.test_3',
]
}
}
assert setup_component(self.hass, 'sensor', config)
entity_ids = config['sensor']['entity_ids']
for entity_id, value in dict(zip(entity_ids, self.values)).items():
self.hass.states.set(entity_id, value)
self.hass.block_till_done()
state = self.hass.states.get('sensor.test_min')
self.assertEqual(str(float(self.min)), state.state)
self.assertEqual(self.max, state.attributes.get('max_value'))
self.assertEqual(self.mean, state.attributes.get('mean'))
def test_max_sensor(self):
"""Test the max sensor."""
config = {
'sensor': {
'platform': 'min_max',
'name': 'test',
'type': 'max',
'entity_ids': [
'sensor.test_1',
'sensor.test_2',
'sensor.test_3',
]
}
}
assert setup_component(self.hass, 'sensor', config)
entity_ids = config['sensor']['entity_ids']
for entity_id, value in dict(zip(entity_ids, self.values)).items():
self.hass.states.set(entity_id, value)
self.hass.block_till_done()
state = self.hass.states.get('sensor.test_max')
self.assertEqual(str(float(self.max)), state.state)
self.assertEqual(self.min, state.attributes.get('min_value'))
self.assertEqual(self.mean, state.attributes.get('mean'))
def test_not_enough_sensor_value(self):
"""Test that there is nothing done if not enough values available."""
config = {
'sensor': {
'platform': 'min_max',
'name': 'test',
'type': 'max',
'entity_ids': [
'sensor.test_1',
'sensor.test_2',
'sensor.test_3',
]
}
}
assert setup_component(self.hass, 'sensor', config)
entity_ids = config['sensor']['entity_ids']
self.hass.states.set(entity_ids[0], self.values[0])
self.hass.block_till_done()
state = self.hass.states.get('sensor.test_max')
self.assertEqual(STATE_UNKNOWN, state.state)
self.hass.states.set(entity_ids[1], self.values[1])
self.hass.block_till_done()
state = self.hass.states.get('sensor.test_max')
self.assertEqual(STATE_UNKNOWN, state.state)
self.hass.states.set(entity_ids[2], self.values[2])
self.hass.block_till_done()
state = self.hass.states.get('sensor.test_max')
self.assertNotEqual(STATE_UNKNOWN, state.state)
def test_different_unit_of_measurement(self):
"""Test for different unit of measurement."""
config = {
'sensor': {
'platform': 'min_max',
'name': 'test',
'type': 'mean',
'entity_ids': [
'sensor.test_1',
'sensor.test_2',
'sensor.test_3',
]
}
}
assert setup_component(self.hass, 'sensor', config)
entity_ids = config['sensor']['entity_ids']
self.hass.states.set(entity_ids[0], self.values[0],
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS})
self.hass.block_till_done()
state = self.hass.states.get('sensor.test_mean')
self.assertEqual(STATE_UNKNOWN, state.state)
self.assertEqual('°C', state.attributes.get('unit_of_measurement'))
self.hass.states.set(entity_ids[1], self.values[1],
{ATTR_UNIT_OF_MEASUREMENT: TEMP_FAHRENHEIT})
self.hass.block_till_done()
self.assertEqual(STATE_UNKNOWN, state.state)
self.assertEqual('°C', state.attributes.get('unit_of_measurement'))
self.hass.states.set(entity_ids[2], self.values[2],
{ATTR_UNIT_OF_MEASUREMENT: '%'})
self.hass.block_till_done()
self.assertEqual(STATE_UNKNOWN, state.state)
self.assertEqual('°C', state.attributes.get('unit_of_measurement'))
|
srcLurker/home-assistant
|
tests/components/sensor/test_min_max.py
|
Python
|
mit
| 5,377
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import functools
import mock
from django.template.loader import render_to_string
from exam import fixture
from sentry.interfaces.base import InterfaceValidationError
from sentry.interfaces.stacktrace import (
Frame, Stacktrace, get_context, slim_frame_data
)
from sentry.models import Event
from sentry.testutils import TestCase
class GetContextTest(TestCase):
def test_works_with_empty_filename(self):
result = get_context(0, 'hello world')
assert result == [(0, 'hello world')]
class StacktraceTest(TestCase):
@fixture
def interface(self):
return Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/bar.py'
},
{
'filename': 'foo/baz.py',
'lineno': 1,
'in_app': True,
}
]))
def test_legacy_interface(self):
# Simple test to ensure legacy data works correctly with the ``Frame``
# objects
event = self.event
interface = Stacktrace.to_python(event.data['sentry.interfaces.Stacktrace'])
assert len(interface.frames) == 2
assert interface == event.interfaces['sentry.interfaces.Stacktrace']
def test_requires_filename(self):
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python(dict(frames=[{}]))
Stacktrace.to_python(dict(frames=[{
'filename': 'foo.py',
}]))
Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
}]))
def test_requires_frames(self):
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python({})
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python(dict(frames=[]))
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python(dict(frames=1))
def test_allows_abs_path_without_filename(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'abs_path': 'foo/bar/baz.py',
}]))
frame = interface.frames[0]
assert frame.filename == 'foo/bar/baz.py'
assert frame.abs_path == frame.filename
def test_coerces_url_filenames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'http://foo.com/foo.js',
}]))
frame = interface.frames[0]
assert frame.filename == '/foo.js'
assert frame.abs_path == 'http://foo.com/foo.js'
def test_does_not_overwrite_filename(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.js',
'abs_path': 'http://foo.com/foo.js',
}]))
frame = interface.frames[0]
assert frame.filename == 'foo.js'
assert frame.abs_path == 'http://foo.com/foo.js'
def test_ignores_results_with_empty_path(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'http://foo.com',
}]))
frame = interface.frames[0]
assert frame.filename == 'http://foo.com'
assert frame.abs_path == frame.filename
def test_serialize_returns_frames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
}]))
result = interface.to_json()
assert 'frames' in result
def test_hash_without_system_frames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}, {
'lineno': 1,
'filename': 'bar.py',
'in_app': None,
}]))
result = interface.get_hash(system_frames=False)
assert result == ['foo.py', 1]
result = interface.get_hash(system_frames=True)
assert result == ['foo.py', 1, 'bar.py', 1]
def test_compute_hashes(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}, {
'lineno': 1,
'filename': 'bar.py',
'in_app': None,
}]))
result = interface.compute_hashes('python')
assert result == [['foo.py', 1, 'bar.py', 1], ['foo.py', 1]]
def test_get_hash_with_minimal_app_frames(self):
frames = [{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}] + [{
'lineno': 1,
'filename': 'bar.py',
'in_app': False,
} for _ in range(11)]
interface = Stacktrace.to_python(dict(frames=frames))
result = interface.get_hash(system_frames=False)
assert not result
def test_get_hash_with_only_required_vars(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 1])
def test_get_hash_sanitizes_block_functions(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': 'foo.py',
'function': 'block in _conditional_callback_around_233',
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'block'])
def test_get_hash_sanitizes_versioned_filenames(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': '/data/foo/releases/20140114151955/app/views/foo.html.erb',
'context_line': '<% if @hotels.size > 0 %>',
})
result = interface.get_hash()
self.assertEquals(result, [
'/data/foo/releases/<version>/app/views/foo.html.erb',
'<% if @hotels.size > 0 %>',
])
interface = Frame.to_python({
'filename': '20140114151955/app/views/foo.html.erb',
'context_line': '<% if @hotels.size > 0 %>',
})
result = interface.get_hash()
self.assertEquals(result, [
'<version>/app/views/foo.html.erb',
'<% if @hotels.size > 0 %>',
])
def test_get_hash_ignores_java8_lambda_module(self):
interface = Frame.to_python({
'module': 'foo.bar.Baz$$Lambda$40/1673859467',
'function': 'call',
})
result = interface.get_hash()
self.assertEquals(result, [
'<module>',
'call',
])
def test_get_hash_ignores_java8_lambda_function(self):
interface = Frame.to_python({
'module': 'foo.bar.Baz',
'function': 'lambda$work$1',
})
result = interface.get_hash()
self.assertEquals(result, [
'foo.bar.Baz',
'<function>',
])
def test_get_hash_ignores_ENHANCED_spring_classes(self):
interface = Frame.to_python({
'module': 'invalid.gruml.talkytalkyhub.common.config.'
'JipJipConfig$$EnhancerBySpringCGLIB$$1ebdddb0',
'function': 'jipJipManagementApplication'
})
result = interface.get_hash()
self.assertEquals(result, [
'invalid.gruml.talkytalkyhub.common.config.JipJipConfig'
'$$EnhancerBySpringCGLIB$$<auto>',
'jipJipManagementApplication',
])
def test_get_hash_ignores_extra_ENHANCED_spring_classes(self):
interface = Frame.to_python({
'module': 'invalid.gruml.talkytalkyhub.common.config.'
'JipJipConfig$$EnhancerBySpringCGLIB$$1ebdddb0'
'$$EnhancerBySpringCGLIB$$8219cd38'
'$$FastClassBySpringCGLIB$$6c0b35d1',
'function': 'jipJipManagementApplication'
})
result = interface.get_hash()
self.assertEquals(result, [
'invalid.gruml.talkytalkyhub.common.config.JipJipConfig'
'$$EnhancerBySpringCGLIB$$<auto>$$EnhancerBySpringCGLIB$$<auto>'
'$$FastClassBySpringCGLIB$$<auto>',
'jipJipManagementApplication',
])
def test_get_hash_ignores_sun_java_generated_methods(self):
interface = Frame.to_python({
'module': 'sun.reflect.GeneratedMethodAccessor12345',
'function': 'invoke',
})
result = interface.get_hash()
self.assertEquals(result, [
'sun.reflect.GeneratedMethodAccessor',
'invoke',
])
def test_get_hash_sanitizes_erb_templates(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': 'foo.html.erb',
'function': '_foo_html_erb__3327151541118998292_70361296749460',
})
result = interface.get_hash()
self.assertEquals(result, [
'foo.html.erb', '_foo_html_erb__<anon>_<anon>',
])
def test_get_hash_ignores_filename_if_blob(self):
interface = Frame.to_python({
'filename': 'blob:http://example.com/7f7aaadf-a006-4217-9ed5-5fbf8585c6c0',
})
result = interface.get_hash()
self.assertEquals(result, [])
def test_get_hash_ignores_filename_if_http(self):
interface = Frame.to_python({
'context_line': 'hello world',
'filename': 'http://foo.com/foo.py',
'function': 'test',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_ignores_filename_if_https(self):
interface = Frame.to_python({
'context_line': 'hello world',
'filename': 'https://foo.com/foo.py',
'function': 'test',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_ignores_filename_if_abs_path_is_http(self):
interface = Frame.to_python({
'context_line': 'hello world',
'abs_path': 'https://foo.com/foo.py',
'function': 'test',
'filename': 'foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_uses_module_over_filename(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
'module': 'foo'
})
result = interface.get_hash()
self.assertEquals(result, ['foo', 1])
def test_get_hash_uses_function_over_lineno(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
'function': 'bar'
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'bar'])
def test_get_hash_uses_context_line_over_function(self):
interface = Frame.to_python({
'context_line': 'foo bar',
'lineno': 1,
'filename': 'foo.py',
'function': 'bar'
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'foo bar'])
def test_get_hash_discards_seemingly_useless_stack(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '<HTML>',
'lineno': 1,
'abs_path': 'http://example.com/foo',
'filename': 'foo',
'function': '?',
}],
})
result = interface.get_hash()
assert result == []
def test_get_hash_does_not_discard_non_urls(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '<HTML>',
'lineno': 1,
'abs_path': 'foo',
'filename': 'foo',
'function': '?',
}],
})
result = interface.get_hash()
assert result != []
def test_get_hash_excludes_single_frame_urls(self):
"""
Browser JS will often throw errors (from inlined code in an HTML page)
which contain only a single frame, no function name, and have the HTML
document as the filename.
In this case the hash is often not usable as the context cannot be
trusted and the URL is dynamic.
"""
interface = Stacktrace.to_python({
'frames': [{
'context_line': 'hello world',
'abs_path': 'http://foo.com/bar/',
'lineno': 107,
'filename': '/bar/',
'module': '<unknown module>',
}],
})
result = interface.get_hash()
assert result == []
def test_cocoa_culprit(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/baz.dylib',
'lineno': 1,
'in_app': True,
'function': '-[CRLCrashAsyncSafeThread crash]',
}
]))
assert stacktrace.get_culprit_string(platform='cocoa') == '-[CRLCrashAsyncSafeThread crash]'
def test_emoji_culprit(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/baz.dylib',
'module': u'\U0001f62d',
'lineno': 1,
'in_app': True,
'function': u'\U0001f60d',
}
]))
assert stacktrace.get_culprit_string(platform='javascript') == u'\U0001f60d(\U0001f62d)'
def test_exclude_libswiftCore_from_in_app(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/libswiftCore.dylib',
'lineno': 1,
'in_app': True,
'function': 'fooBar',
}
]))
assert stacktrace.frames[0].in_app is False
def test_cocoa_strict_stacktrace(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/libswiftCore.dylib',
'lineno': 1,
'in_app': False,
'function': 'fooBar',
},
{
'package': '/foo/bar/MyApp',
'in_app': True,
'function': 'fooBar2',
},
{
'filename': 'Mycontroller.swift',
'package': '/foo/bar/MyApp',
'in_app': True,
'function': '-[CRLCrashAsyncSafeThread crash]',
}
]))
assert stacktrace.get_culprit_string(platform='cocoa') == '-[CRLCrashAsyncSafeThread crash]'
def test_get_hash_does_not_group_different_js_errors(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '{snip}',
'lineno': 20,
'filename': 'https://foo.com/index.js',
'function': '?',
}],
})
result = interface.get_hash()
assert result == []
def test_get_hash_uses_symbol_instead_of_function(self):
interface = Frame.to_python({
'module': 'libfoo',
'function': 'int main()',
'symbol': '_main',
})
result = interface.get_hash()
self.assertEquals(result, [
'libfoo',
'_main',
])
def test_get_hash_skips_symbol_if_unknown(self):
interface = Frame.to_python({
'module': 'libfoo',
'function': 'main',
'symbol': '?',
})
result = interface.get_hash()
self.assertEquals(result, [
'libfoo',
'main',
])
@mock.patch('sentry.interfaces.stacktrace.Stacktrace.get_stacktrace')
def test_to_string_returns_stacktrace(self, get_stacktrace):
event = mock.Mock(spec=Event())
interface = Stacktrace(frames=[])
result = interface.to_string(event)
get_stacktrace.assert_called_once_with(event, system_frames=False, max_frames=10)
self.assertEquals(result, get_stacktrace.return_value)
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
@mock.patch('sentry.interfaces.stacktrace.Stacktrace.get_stacktrace')
def test_get_traceback_response(self, get_stacktrace):
event = mock.Mock(spec=Event())
event.message = 'foo'
get_stacktrace.return_value = 'bar'
interface = Stacktrace.to_python(dict(frames=[{'lineno': 1, 'filename': 'foo.py'}]))
result = interface.get_traceback(event)
get_stacktrace.assert_called_once_with(event, newest_first=None)
self.assertEquals(result, 'foo\n\nbar')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_only_filename(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo'}, {'filename': 'bar'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo"\n File "bar"')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_module(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'module': 'foo'}, {'module': 'bar'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n Module "foo"\n Module "bar"')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_and_function(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo', 'function': 'biz'}, {'filename': 'bar', 'function': 'baz'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", in biz\n File "bar", in baz')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_function_lineno_and_context(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[
{'filename': 'foo', 'function': 'biz', 'lineno': 3, 'context_line': ' def foo(r):'},
{'filename': 'bar', 'function': 'baz', 'lineno': 5, 'context_line': ' return None'},
]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", line 3, in biz\n def foo(r):\n File "bar", line 5, in baz\n return None')
def test_bad_input(self):
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'filename': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'filename': 'foo',
'abs_path': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'function': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'module': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'function': '?',
})
def test_context_with_nan(self):
self.assertEquals(
Frame.to_python({
'filename': 'x',
'vars': {'x': float('inf')},
}).vars,
{'x': '<inf>'},
)
self.assertEquals(
Frame.to_python({
'filename': 'x',
'vars': {'x': float('-inf')},
}).vars,
{'x': '<-inf>'},
)
self.assertEquals(
Frame.to_python({
'filename': 'x',
'vars': {'x': float('nan')},
}).vars,
{'x': '<nan>'},
)
def test_address_normalization(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'blah.c',
'function': 'main',
'instruction_addr': 123456,
'symbol_addr': '123450',
'image_addr': '0x0',
})
assert interface.instruction_addr == '0x1e240'
assert interface.symbol_addr == '0x1e23a'
assert interface.image_addr == '0x0'
class SlimFrameDataTest(TestCase):
def test_under_max(self):
interface = Stacktrace.to_python({'frames': [{'filename': 'foo'}]})
slim_frame_data(interface, 4)
assert len(interface.frames) == 1
assert not interface.frames_omitted
def test_over_max(self):
values = []
for n in range(5):
values.append({
'filename': 'frame %d' % n,
'vars': {'foo': 'bar'},
'context_line': 'b',
'pre_context': ['a'],
'post_context': ['c'],
})
interface = Stacktrace.to_python({'frames': values})
slim_frame_data(interface, 4)
assert len(interface.frames) == 5
for value, num in zip(interface.frames[:2], range(2)):
assert value.filename == 'frame %d' % num
assert value.vars is not None
assert value.pre_context is not None
assert value.post_context is not None
for value, num in zip(interface.frames[3:], range(3, 5)):
assert value.filename == 'frame %d' % num
assert value.vars is not None
assert value.pre_context is not None
assert value.post_context is not None
value = interface.frames[2]
assert value.filename == 'frame 2'
assert not value.vars
assert not value.pre_context
assert not value.post_context
def test_java_frame_rendering():
render = functools.partial(render_to_string, 'sentry/partial/frames/java.txt')
# This is the ideal case.
assert render({
'module': 'com.getsentry.example.Example',
'function': 'test',
'filename': 'Example.java',
'lineno': 1,
}).strip() == 'at com.getsentry.example.Example.test(Example.java:1)'
# Legacy support for frames without filename.
assert render({
'module': 'com.getsentry.example.Example',
'function': 'test',
'lineno': 1,
}).strip() == 'at com.getsentry.example.Example.test'
# (This shouldn't happen, but...)
assert render({
'module': 'com.getsentry.example.Example',
'function': 'test',
'filename': 'foo/bar/Example.java',
'lineno': 1,
}).strip() == 'at com.getsentry.example.Example.test(Example.java:1)'
# Native methods don't have line numbers.
assert render({
'function': 'test',
'filename': 'Example.java',
'lineno': -2,
}).strip() == 'at test(Example.java)'
assert render({
'function': 'test',
'filename': 'Example.java',
'lineno': 1,
}).strip() == 'at test(Example.java:1)'
|
JamesMura/sentry
|
tests/sentry/interfaces/test_stacktrace.py
|
Python
|
bsd-3-clause
| 23,486
|
#base1
import base64
print base64.b64encode('binary\x00string')
print base64.b64decode('YmluYXJ5AHN0cmluZw==')
print base64.urlsafe_b64encode('i\xb7\xfb\xef\xff')
print base64.urlsafe_b64decode('abcd--__')
'abcd' -> 'YWJjZA=='
print base64.b64decode('YWJjZA==')
print safe_b64decode('YWJjZA')
|
zengboming/python
|
base1.py
|
Python
|
apache-2.0
| 297
|
#!/usr/bin/python
# Copyright 2008 Jurko Gospodnetic, Vladimir Prus
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
# Added to guard against a bug causing targets to be used before they
# themselves have finished building. This used to happen for targets built by a
# multi-file action that got triggered by another target, except when the
# target triggering the action was the first one in the list of targets
# produced by that action.
#
# Example:
# When target A and target B were declared as created by a single action with
# A being the first one listed, and target B triggered running that action
# then, while the action was still running, target A was already reporting as
# being built causing other targets depending on target A to be built
# prematurely.
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0)
t.write("sleep.bat", """\
::@timeout /T %1 /NOBREAK >nul
@ping 127.0.0.1 -n 2 -w 1000 >nul
@ping 127.0.0.1 -n %1 -w 1000 >nul
@exit /B 0
""")
t.write("file.jam", """\
if $(NT)
{
SLEEP = @call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
actions link
{
$(SLEEP) 1
echo 001 - linked
}
link dll lib ;
actions install
{
echo 002 - installed
}
install installed_dll : dll ;
DEPENDS installed_dll : dll ;
DEPENDS all : lib installed_dll ;
""")
t.run_build_system(["-ffile.jam", "-j2"], stdout="""\
...found 4 targets...
...updating 3 targets...
link dll
001 - linked
install installed_dll
002 - installed
...updated 3 targets...
""")
t.cleanup()
|
davehorton/drachtio-server
|
deps/boost_1_77_0/tools/build/test/core_parallel_multifile_actions_2.py
|
Python
|
mit
| 1,621
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
import torch
from torch import optim
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
def test_optimizer_with_scheduling(tmpdir):
"""Verify that learning rate scheduling is working"""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2, val_check_interval=0.5
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
init_lr = 0.1
adjusted_lr = [pg["lr"] for pg in trainer.optimizers[0].param_groups]
assert len(trainer.lr_schedulers) == 1
assert all(a == adjusted_lr[0] for a in adjusted_lr)
assert init_lr * 0.1 == adjusted_lr[0]
def test_multi_optimizer_with_scheduling(tmpdir):
"""Verify that learning rate scheduling is working"""
class TestModel(BoringModel):
init_lr = 5e-4
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=self.init_lr)
optimizer2 = optim.Adam(self.parameters(), lr=self.init_lr)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, step_size=1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, step_size=1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
adjusted_lr1 = [pg["lr"] for pg in trainer.optimizers[0].param_groups]
adjusted_lr2 = [pg["lr"] for pg in trainer.optimizers[1].param_groups]
assert len(trainer.lr_schedulers) == 2
assert all(a == adjusted_lr1[0] for a in adjusted_lr1)
assert all(a == adjusted_lr2[0] for a in adjusted_lr2)
assert model.init_lr * 0.1 == adjusted_lr1[0]
assert model.init_lr * 0.1 == adjusted_lr2[0]
def test_reducelronplateau_with_no_monitor_raises(tmpdir):
"""
Test exception when a ReduceLROnPlateau is used with no monitor
"""
model = BoringModel()
optimizer = optim.Adam(model.parameters())
model.configure_optimizers = lambda: ([optimizer], [optim.lr_scheduler.ReduceLROnPlateau(optimizer)])
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.raises(
MisconfigurationException, match="`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"
):
trainer.fit(model)
def test_reducelronplateau_with_no_monitor_in_lr_scheduler_dict_raises(tmpdir):
"""
Test exception when lr_scheduler dict has a ReduceLROnPlateau with no monitor
"""
model = BoringModel()
optimizer = optim.Adam(model.parameters())
model.configure_optimizers = lambda: {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer)},
}
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.raises(MisconfigurationException, match="must include a monitor when a `ReduceLROnPlateau`"):
trainer.fit(model)
def test_reducelronplateau_scheduling(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.log("foo", batch_idx)
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters())
return {
"optimizer": optimizer,
"lr_scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer),
"monitor": "foo",
}
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
lr_scheduler = trainer.lr_schedulers[0]
assert lr_scheduler == dict(
scheduler=lr_scheduler["scheduler"],
monitor="foo",
interval="epoch",
frequency=1,
reduce_on_plateau=True,
strict=True,
opt_idx=None,
name=None,
)
def test_optimizer_return_options(tmpdir):
trainer = Trainer(default_root_dir=tmpdir)
model = BoringModel()
# single optimizer
opt_a = optim.Adam(model.parameters(), lr=0.002)
opt_b = optim.SGD(model.parameters(), lr=0.002)
scheduler_a = optim.lr_scheduler.StepLR(opt_a, 10)
scheduler_b = optim.lr_scheduler.StepLR(opt_b, 10)
# single optimizer
model.configure_optimizers = lambda: opt_a
opt, lr_sched, freq = trainer.init_optimizers(model)
assert len(opt) == 1 and len(lr_sched) == len(freq) == 0
# opt tuple
model.configure_optimizers = lambda: (opt_a, opt_b)
opt, lr_sched, freq = trainer.init_optimizers(model)
assert opt == [opt_a, opt_b]
assert len(lr_sched) == len(freq) == 0
# opt list
model.configure_optimizers = lambda: [opt_a, opt_b]
opt, lr_sched, freq = trainer.init_optimizers(model)
assert opt == [opt_a, opt_b]
assert len(lr_sched) == len(freq) == 0
ref_lr_sched = dict(
scheduler=scheduler_a,
interval="epoch",
frequency=1,
reduce_on_plateau=False,
monitor=None,
strict=True,
name=None,
opt_idx=None,
)
# opt tuple of 2 lists
model.configure_optimizers = lambda: ([opt_a], [scheduler_a])
opt, lr_sched, freq = trainer.init_optimizers(model)
assert len(opt) == len(lr_sched) == 1
assert len(freq) == 0
assert opt[0] == opt_a
assert lr_sched[0] == ref_lr_sched
# opt tuple of 1 list
model.configure_optimizers = lambda: ([opt_a], scheduler_a)
opt, lr_sched, freq = trainer.init_optimizers(model)
assert len(opt) == len(lr_sched) == 1
assert len(freq) == 0
assert opt[0] == opt_a
assert lr_sched[0] == ref_lr_sched
# opt single dictionary
model.configure_optimizers = lambda: {"optimizer": opt_a, "lr_scheduler": scheduler_a}
opt, lr_sched, freq = trainer.init_optimizers(model)
assert len(opt) == len(lr_sched) == 1
assert len(freq) == 0
assert opt[0] == opt_a
assert lr_sched[0] == ref_lr_sched
# opt multiple dictionaries with frequencies
model.configure_optimizers = lambda: (
{"optimizer": opt_a, "lr_scheduler": scheduler_a, "frequency": 1},
{"optimizer": opt_b, "lr_scheduler": scheduler_b, "frequency": 5},
)
opt, lr_sched, freq = trainer.init_optimizers(model)
assert len(opt) == len(lr_sched) == len(freq) == 2
assert opt[0] == opt_a
ref_lr_sched["opt_idx"] = 0
assert lr_sched[0] == ref_lr_sched
ref_lr_sched["scheduler"] = scheduler_b
ref_lr_sched["opt_idx"] = 1
assert lr_sched[1] == ref_lr_sched
assert freq == [1, 5]
def test_none_optimizer(tmpdir):
model = BoringModel()
model.configure_optimizers = lambda: None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2)
with pytest.warns(UserWarning, match="will run with no optimizer"):
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_configure_optimizer_from_dict(tmpdir):
"""Tests if `configure_optimizer` method could return a dictionary with `optimizer` field only."""
class TestModel(BoringModel):
def configure_optimizers(self):
config = {"optimizer": optim.SGD(params=self.parameters(), lr=1e-03)}
return config
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@pytest.mark.parametrize(
"schedulers, kwargs, intervals, frequencies, expected_steps, max_epochs",
[
(
(optim.lr_scheduler.OneCycleLR, optim.lr_scheduler.OneCycleLR),
(dict(max_lr=0.01, total_steps=3), dict(max_lr=0.01, total_steps=2)),
("step", "step"),
(3, 2),
(4, 3),
1,
),
(
(optim.lr_scheduler.OneCycleLR, optim.lr_scheduler.OneCycleLR),
(dict(max_lr=0.01, total_steps=5), dict(max_lr=0.01, total_steps=5)),
("step", "step"),
(None, None),
(6, 6),
1,
),
(
(optim.lr_scheduler.StepLR, optim.lr_scheduler.CosineAnnealingLR),
(dict(step_size=5), dict(T_max=2)),
("epoch", "epoch"),
(5, 10),
(2, 3),
3,
),
],
)
def test_step_scheduling_for_multiple_optimizers_with_frequency(
tmpdir, schedulers, kwargs, intervals, frequencies, expected_steps, max_epochs
):
"""
Test that step LR schedulers for multiple optimizers follow
the optimizer frequencies when corresponding frequency is set.
"""
class DummyModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def training_epoch_end(self, outputs) -> None:
pass
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=0.01)
optimizer2 = optim.Adam(self.parameters(), lr=0.01)
lr_scheduler_config_1 = {"scheduler": schedulers[0](optimizer1, **kwargs[0]), "interval": intervals[0]}
lr_scheduler_config_2 = {"scheduler": schedulers[1](optimizer2, **kwargs[1]), "interval": intervals[1]}
return [
{"optimizer": optimizer1, "frequency": frequencies[0], "lr_scheduler": lr_scheduler_config_1},
{"optimizer": optimizer2, "frequency": frequencies[1], "lr_scheduler": lr_scheduler_config_2},
]
model = DummyModel()
trainer = Trainer(default_root_dir=tmpdir, limit_val_batches=1, limit_train_batches=5, max_epochs=max_epochs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.lr_schedulers[0]["opt_idx"] == 0
assert trainer.lr_schedulers[1]["opt_idx"] == 1
# Step count is 1 greater than the expected value because scheduler.step() is called once during initialization
assert trainer.lr_schedulers[0]["scheduler"]._step_count == expected_steps[0]
assert trainer.lr_schedulers[1]["scheduler"]._step_count == expected_steps[1]
@pytest.mark.parametrize("fn", ("validate", "test"))
def test_init_optimizers_during_evaluation(tmpdir, fn):
"""
Test that optimizers is an empty list during evaluation
"""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=0.1)
optimizer2 = optim.Adam(self.parameters(), lr=0.1)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, step_size=1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, step_size=1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
trainer = Trainer(default_root_dir=tmpdir, limit_val_batches=10, limit_test_batches=10)
validate_or_test = getattr(trainer, fn)
validate_or_test(TestModel(), ckpt_path=None)
assert len(trainer.lr_schedulers) == 0
assert len(trainer.optimizers) == 0
assert len(trainer.optimizer_frequencies) == 0
def test_multiple_optimizers_callbacks(tmpdir):
"""
Tests that multiple optimizers can be used with callbacks
"""
class CB(Callback):
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
pass
def on_train_epoch_start(self, trainer, pl_module):
pass
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.layer_1 = torch.nn.Linear(32, 2)
self.layer_2 = torch.nn.Linear(32, 2)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
a = batch[0]
acc = self.layer_1(a)
else:
a = batch[0]
acc = self.layer_2(a)
acc = self.loss(acc, acc)
return acc
def configure_optimizers(self):
a = optim.RMSprop(self.layer_1.parameters(), 1e-2)
b = optim.RMSprop(self.layer_2.parameters(), 1e-2)
return a, b
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(
callbacks=[CB()],
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=2,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
@pytest.mark.parametrize("complete_epoch", [True, False])
@mock.patch("torch.optim.lr_scheduler.ReduceLROnPlateau.step")
def test_lr_scheduler_strict(step_mock, tmpdir, complete_epoch):
"""
Test "strict" support in lr_scheduler dict
"""
model = BoringModel()
optimizer = optim.Adam(model.parameters())
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)
max_epochs = 1 if complete_epoch else None
max_steps = None if complete_epoch else 1
trainer = Trainer(default_root_dir=tmpdir, max_epochs=max_epochs, max_steps=max_steps)
model.configure_optimizers = lambda: {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": scheduler, "monitor": "giraffe", "strict": True},
}
if complete_epoch:
with pytest.raises(
MisconfigurationException,
match=r"ReduceLROnPlateau conditioned on metric .* which is not available\. Available metrics are:",
):
trainer.fit(model)
else:
trainer.fit(model)
step_mock.assert_not_called()
model.configure_optimizers = lambda: {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": scheduler, "monitor": "giraffe", "strict": False},
}
if complete_epoch:
with pytest.warns(
RuntimeWarning, match=r"ReduceLROnPlateau conditioned on metric .* which is not available but strict"
):
trainer.fit(model)
step_mock.assert_not_called()
def test_unknown_configure_optimizers_raises(tmpdir):
"""
Test exception with an unsupported configure_optimizers return
"""
model = BoringModel()
model.configure_optimizers = lambda: 1
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.raises(MisconfigurationException, match="Unknown configuration for model optimizers"):
trainer.fit(model)
def test_lr_scheduler_with_unknown_interval_raises(tmpdir):
"""
Test exception when lr_scheduler dict has unknown interval param value
"""
model = BoringModel()
optimizer = optim.Adam(model.parameters())
model.configure_optimizers = lambda: {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": optim.lr_scheduler.StepLR(optimizer, 1), "interval": "incorrect_unknown_value"},
}
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.raises(MisconfigurationException, match=r'The "interval" key in lr scheduler dict must be'):
trainer.fit(model)
def test_lr_scheduler_with_extra_keys_warns(tmpdir):
"""
Test warning when lr_scheduler dict has extra keys
"""
model = BoringModel()
optimizer = optim.Adam(model.parameters())
model.configure_optimizers = lambda: {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": optim.lr_scheduler.StepLR(optimizer, 1), "foo": 1, "bar": 2},
}
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.warns(RuntimeWarning, match=r"Found unsupported keys in the lr scheduler dict: \[.+\]"):
trainer.fit(model)
def test_lr_scheduler_with_no_actual_scheduler_raises(tmpdir):
"""
Test exception when lr_scheduler dict has no scheduler
"""
model = BoringModel()
model.configure_optimizers = lambda: {"optimizer": optim.Adam(model.parameters()), "lr_scheduler": {}}
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.raises(MisconfigurationException, match='The lr scheduler dict must have the key "scheduler"'):
trainer.fit(model)
def test_invalid_optimizer_in_scheduler(tmpdir):
"""
Test exception when optimizer attatched to lr_schedulers wasn't returned
"""
class InvalidOptimizerModel(BoringModel):
def configure_optimizers(self):
opt1 = optim.SGD(self.layer.parameters(), lr=0.1)
opt2 = optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = optim.lr_scheduler.StepLR(opt2, step_size=1)
return [opt1], [lr_scheduler]
model = InvalidOptimizerModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.raises(MisconfigurationException, match="attatched with an optimizer that wasn't returned"):
trainer.fit(model)
def test_invalid_optimizer_dict_raises(tmpdir):
"""
Test exception when lr_scheduler dict has no scheduler
"""
class DummyModel(BoringModel):
def configure_optimizers(self):
return [{"optimizer": optim.Adam(self.parameters())}, optim.Adam(self.parameters())]
model = DummyModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.raises(MisconfigurationException, match="Unknown configuration for model optimizers"):
trainer.fit(model)
def test_warn_invalid_scheduler_key_in_manual_optimization(tmpdir):
"""
Test warning when invalid scheduler keys are provided in manual optimization.
"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def configure_optimizers(self):
opt = optim.SGD(self.layer.parameters(), lr=0.1)
sch = optim.lr_scheduler.StepLR(opt, step_size=1)
return [opt], [{"scheduler": sch, "interval": "epoch"}]
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.warns(RuntimeWarning, match="the keys will be ignored"):
trainer.fit(model)
@RunIf(min_gpus=2, special=True)
def test_optimizer_state_on_device(tmpdir):
"""Test that optimizers that create state initially at instantiation still end up with the state on the GPU."""
class TestModel(BoringModel):
def configure_optimizers(self):
# Adagrad creates state tensors immediately, model is not yet on GPU.
return optim.Adagrad(self.parameters())
def on_train_start(self, *args, **kwargs):
opt = self.optimizers()
_, state = next(iter(opt.state.items()))
assert state["sum"].device == torch.device("cuda", self.local_rank) == self.device
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, gpus=2, accelerator="ddp", fast_dev_run=True)
trainer.fit(model)
@pytest.mark.parametrize("check_val_every_n_epoch", [1, 2])
@mock.patch("torch.optim.lr_scheduler.StepLR.step")
def test_lr_scheduler_epoch_step_frequency(mocked_sched, check_val_every_n_epoch, tmpdir):
epochs = 4
expected_steps = epochs + 1 # every LRScheduler gets called once at init
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
check_val_every_n_epoch=check_val_every_n_epoch,
max_epochs=epochs,
)
trainer.fit(model)
assert mocked_sched.call_count == expected_steps
@pytest.mark.parametrize("every_n_train_steps, epoch_interval", [(None, True), (2, False), (2, True)])
def test_lr_scheduler_state_updated_before_saving(tmpdir, every_n_train_steps, epoch_interval):
batches = 2
max_epochs = 1
lr, gamma = 1, 10
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
logger=False,
max_epochs=max_epochs,
limit_train_batches=batches,
limit_val_batches=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=every_n_train_steps)],
)
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=lr)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
lr_scheduler_config = {"scheduler": lr_scheduler}
if not epoch_interval:
lr_scheduler_config["interval"] = "step"
return [optimizer], [lr_scheduler_config]
def on_save_checkpoint(self, checkpoint):
lr_scheduler_config = checkpoint["lr_schedulers"][0]
# 2 batches ran. since the lr_scheduler_config interval is `step`, the step count should be 2
assert self.trainer.global_step + 1 == batches # the global step hasn't been increased yet
compare_to = max_epochs if epoch_interval else batches
assert lr_scheduler_config["_step_count"] - 1 == compare_to # step count starts at 1
assert lr_scheduler_config["_last_lr"] == [lr * gamma ** compare_to]
self.on_save_checkpoint_called = True
model = TestModel()
trainer.fit(model)
assert model.on_save_checkpoint_called
@pytest.mark.parametrize("save_on_train_epoch_end", (False, True))
def test_plateau_scheduler_lr_step_interval_updated_after_saving(tmpdir, save_on_train_epoch_end):
batches = 4
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
logger=False,
max_epochs=1,
limit_train_batches=batches,
limit_val_batches=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, save_on_train_epoch_end=save_on_train_epoch_end)],
)
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
self.log("foo", batch_idx)
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer_1 = torch.optim.Adam(self.parameters())
optimizer_2 = torch.optim.Adam(self.parameters())
lr_scheduler1 = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_1)
lr_scheduler_config_1 = {"scheduler": lr_scheduler1, "interval": "step", "monitor": "foo"}
lr_scheduler2 = torch.optim.lr_scheduler.StepLR(optimizer_2, step_size=1)
lr_scheduler_config_2 = {"scheduler": lr_scheduler2, "interval": "step"}
return [optimizer_1, optimizer_2], [lr_scheduler_config_1, lr_scheduler_config_2]
def on_save_checkpoint(self, checkpoint):
lr_scheduler_config_1 = checkpoint["lr_schedulers"][0]
last_epoch = lr_scheduler_config_1["last_epoch"]
assert last_epoch == batches - (not save_on_train_epoch_end) # last epoch starts at 0
lr_scheduler_config_2 = checkpoint["lr_schedulers"][1]
assert lr_scheduler_config_2["_step_count"] - 1 == batches # step count starts at 1
self.on_save_checkpoint_called = True
model = TestModel()
model.training_epoch_end = None
trainer.fit(model)
assert model.on_save_checkpoint_called
|
williamFalcon/pytorch-lightning
|
tests/trainer/optimization/test_optimizers.py
|
Python
|
apache-2.0
| 24,264
|
'''
Hugefiles urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urllib2
from lib import captcha_lib
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class HugefilesResolver(UrlResolver):
name = "hugefiles"
domains = ["hugefiles.net"]
pattern = '(?://|\.)(hugefiles\.net)/([0-9a-zA-Z/]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
common.log_utils.log_debug('HugeFiles: get_link: %s' % (web_url))
html = self.net.http_GET(web_url).content
r = re.findall('File Not Found', html)
if r:
raise ResolverError('File Not Found or removed')
# Grab data values
data = helpers.get_hidden(html)
data['method_free'] = 'Free Download'
data.update(captcha_lib.do_captcha(html))
common.log_utils.log_debug('HugeFiles - Requesting POST URL: %s with data: %s' % (web_url, data))
html = self.net.http_POST(web_url, data).content
# Re-grab data values
data = helpers.get_hidden(html)
data['referer'] = web_url
headers = {'User-Agent': common.IE_USER_AGENT}
common.log_utils.log_debug('HugeFiles - Requesting POST URL: %s with data: %s' % (web_url, data))
request = urllib2.Request(web_url, data=urllib.urlencode(data), headers=headers)
try: stream_url = urllib2.urlopen(request).geturl()
except: return
common.log_utils.log_debug('Hugefiles stream Found: %s' % stream_url)
return stream_url
def get_url(self, host, media_id):
return 'http://hugefiles.net/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
|
wndias/bc.repository
|
script.module.urlresolver/lib/urlresolver/plugins/hugefiles.py
|
Python
|
gpl-2.0
| 2,636
|
""" Overrides for Docker-based devstack. """
from openedx.stanford.lms.envs.devstack import * # pylint: disable=wildcard-import, unused-wildcard-import
# Docker does not support the syslog socket at /dev/log. Rely on the console.
LOGGING['handlers']['local'] = LOGGING['handlers']['tracking'] = {
'class': 'logging.NullHandler',
}
LOGGING['loggers']['tracking']['handlers'] = ['console']
LMS_BASE = 'edx.devstack.lms:18000'
CMS_BASE = 'edx.devstack.studio:18010'
SITE_NAME = LMS_BASE
LMS_ROOT_URL = 'http://{}'.format(LMS_BASE)
LMS_INTERNAL_ROOT_URL = LMS_ROOT_URL
ECOMMERCE_PUBLIC_URL_ROOT = 'http://localhost:18130'
ECOMMERCE_API_URL = 'http://edx.devstack.ecommerce:18130/api/v2'
COMMENTS_SERVICE_URL = 'http://edx.devstack.forum:4567'
ENTERPRISE_API_URL = '{}/enterprise/api/v1/'.format(LMS_INTERNAL_ROOT_URL)
CREDENTIALS_INTERNAL_SERVICE_URL = 'http://edx.devstack.credentials:18150'
CREDENTIALS_PUBLIC_SERVICE_URL = 'http://localhost:18150'
OAUTH_OIDC_ISSUER = '{}/oauth2'.format(LMS_ROOT_URL)
DEFAULT_JWT_ISSUER = {
'ISSUER': OAUTH_OIDC_ISSUER,
'SECRET_KEY': 'lms-secret',
'AUDIENCE': 'lms-key',
}
JWT_AUTH.update({
'JWT_ISSUER': DEFAULT_JWT_ISSUER['ISSUER'],
'JWT_AUDIENCE': DEFAULT_JWT_ISSUER['AUDIENCE'],
'JWT_ISSUERS': [
DEFAULT_JWT_ISSUER,
RESTRICTED_APPLICATION_JWT_ISSUER,
],
})
FEATURES.update({
'AUTOMATIC_AUTH_FOR_TESTING': True,
'ENABLE_COURSEWARE_SEARCH': False,
'ENABLE_COURSE_DISCOVERY': False,
'ENABLE_DASHBOARD_SEARCH': False,
'ENABLE_DISCUSSION_SERVICE': True,
'SHOW_HEADER_LANGUAGE_SELECTOR': True,
'ENABLE_ENTERPRISE_INTEGRATION': False,
})
ENABLE_MKTG_SITE = os.environ.get('ENABLE_MARKETING_SITE', False)
MARKETING_SITE_ROOT = os.environ.get('MARKETING_SITE_ROOT', 'http://localhost:8080')
MKTG_URLS = {
'ABOUT': '/about',
'ACCESSIBILITY': '/accessibility',
'AFFILIATES': '/affiliate-program',
'BLOG': '/blog',
'CAREERS': '/careers',
'CONTACT': '/support/contact_us',
'COURSES': '/course',
'DONATE': '/donate',
'ENTERPRISE': '/enterprise',
'FAQ': '/student-faq',
'HONOR': '/edx-terms-service',
'HOW_IT_WORKS': '/how-it-works',
'MEDIA_KIT': '/media-kit',
'NEWS': '/news-announcements',
'PRESS': '/press',
'PRIVACY': '/edx-privacy-policy',
'ROOT': MARKETING_SITE_ROOT,
'SCHOOLS': '/schools-partners',
'SITE_MAP': '/sitemap',
'TRADEMARKS': '/trademarks',
'TOS': '/edx-terms-service',
'TOS_AND_HONOR': '/edx-terms-service',
'WHAT_IS_VERIFIED_CERT': '/verified-certificate',
}
CREDENTIALS_SERVICE_USERNAME = 'credentials_worker'
COURSE_CATALOG_API_URL = 'http://edx.devstack.discovery:18381/api/v1/'
|
Stanford-Online/edx-platform
|
lms/envs/devstack_docker.py
|
Python
|
agpl-3.0
| 2,707
|
# sqlalchemy/ext/baked.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
from ..orm.query import Query
from ..orm import strategies, attributes, properties, \
strategy_options, util as orm_util, interfaces
from .. import log as sqla_log
from ..sql import util as sql_util
from ..orm import exc as orm_exc
from .. import exc as sa_exc
from .. import util
import copy
import logging
log = logging.getLogger(__name__)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = 'steps', '_bakery', '_cache_key', '_spoiled'
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200):
"""Construct a new bakery."""
_bakery = util.LRUCache(size)
def call(initial_fn, *args):
return cls(_bakery, initial_fn, args)
return call
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full:
_spoil_point = self._clone()
_spoil_point._cache_key += ('_query_only', )
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._cache_key, None)
if query is None:
query = self._as_query(session)
self._bakery[self._cache_key] = query.with_session(None)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
context = query._compile_context()
self._bake_subquery_loaders(session, context)
context.session = None
context.query = query = context.query.with_session(None)
query._execution_options = query._execution_options.union(
{"compiled_cache": self._bakery}
)
# we'll be holding onto the query for some of its state,
# so delete some compilation-use-only attributes that can take up
# space
for attr in (
'_correlate', '_from_obj', '_mapper_adapter_map',
'_joinpath', '_joinpoint'):
query.__dict__.pop(attr, None)
self._bakery[self._cache_key] = context
return context
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes['baked_queries'] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if 'subquery' in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(self, session, context, params):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(self._bakery, lambda sess: query.with_session(sess))
bk._cache_key = cache_key
context.attributes[k] = bk.for_session(session).params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = 'bq', 'session', '_params'
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params.update(kw)
return self
def _as_query(self):
return self.bq._as_query(self.session).params(self._params)
def __str__(self):
return str(self._as_query())
def __iter__(self):
bq = self.bq
if bq._spoiled:
return iter(self._as_query())
baked_context = bq._bakery.get(bq._cache_key, None)
if baked_context is None:
baked_context = bq._bake(self.session)
context = copy.copy(baked_context)
context.session = self.session
context.attributes = context.attributes.copy()
bq._unbake_subquery_loaders(self.session, context, self._params)
context.statement.use_labels = True
if context.autoflush and not context.populate_existing:
self.session._autoflush()
return context.query.params(self._params).\
with_session(self.session)._execute_and_instances(context)
def first(self):
"""Return the first row.
Equivalent to :meth:`.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(bq.for_session(self.session).params(self._params))
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`.Query.one`.
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def one_or_none(self):
"""Return one or zero results, or raise an exception for multiple
rows.
Equivalent to :meth:`.Query.one_or_none`.
.. versionadded:: 1.0.9
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def all(self):
"""Return all rows.
Equivalent to :meth:`.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_ident)
def _load_on_ident(self, query, key):
"""Load the given identity key from the database."""
ident = key[1]
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones)
_lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False)
q._criterion = _lcl_get_clause
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
bq = bq.with_criteria(setup, tuple(elem is None for elem in ident))
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
This operation should be safe for all lazy loaders, and will reduce
Python overhead for these operations.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This operation reverts the changes produced by :func:`.bake_lazy_loaders`.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
assert strategies.LazyLoader._strategy_keys
@sqla_log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class BakedLazyLoader(strategies.LazyLoader):
def _emit_lazyload(self, session, state, ident_key, passive):
q = BakedQuery(
self.mapper._compiled_cache,
lambda session: session.query(self.mapper))
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q:
q.select_from(self.mapper, self.parent_property.secondary))
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_path:
q.spoil()
q.add_criteria(
lambda q:
q._with_current_path(state.load_path[self.parent_property]))
if state.load_options:
q.spoil()
q.add_criteria(
lambda q: q._conditional_options(*state.load_options))
if self.use_get:
return q(session)._load_on_ident(
session.query(self.mapper), ident_key)
if self.parent_property.order_by:
q.add_criteria(
lambda q:
q.order_by(*util.to_list(self.parent_property.order_by)))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, strategies.LazyLoader):
q.add_criteria(
lambda q:
q.options(
strategy_options.Load(
rev.parent).baked_lazyload(rev.key)))
lazy_clause, params = self._generate_lazy_clause(state, passive)
if pending:
if orm_util._none_set.intersection(params.values()):
return None
q.add_criteria(lambda q: q.filter(lazy_clause))
result = q(session).params(**params).all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
def baked_lazyload(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, False, {})
@baked_lazyload._add_unbound_all_fn
def baked_lazyload_all(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, True, {})
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
|
hsum/sqlalchemy
|
lib/sqlalchemy/ext/baked.py
|
Python
|
mit
| 16,735
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_banner
version_added: "2.4"
author:
- Trishna Guha (@trishnaguha)
- Kedar Kekan (@kedarX)
short_description: Manage multiline banners on Cisco IOS XR devices
description:
- This module will configure both exec and motd banners on remote device
running Cisco IOS XR. It allows playbooks to add or remove
banner text from the running configuration.
extends_documentation_fragment: iosxr
notes:
- Tested against IOS XRv 6.1.2
options:
banner:
description:
- Specifies the type of banner to configure on remote device.
required: true
choices: ['login', 'motd']
text:
description:
- Banner text to be configured. Accepts multiline string,
without empty lines. Requires I(state=present).
state:
description:
- Existential state of the configuration on the device.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
iosxr_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
iosxr_banner:
banner: motd
state: absent
- name: Configure banner from file
iosxr_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands sent to device with transport C(cli)
returned: always (empty list when no commands to send)
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
xml:
description: NetConf rpc xml sent to device with transport C(netconf)
returned: always (empty list when no xml rpc to send)
type: list
version_added: 2.5
sample:
- '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<banners xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-infra-infra-cfg">
<banner xc:operation="merge">
<banner-name>motd</banner-name>
<banner-text>Ansible banner example</banner-text>
</banner>
</banners>
</config>'
"""
import re
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.iosxr.iosxr import get_config, load_config
from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec
from ansible.module_utils.network.iosxr.iosxr import build_xml, is_cliconf
from ansible.module_utils.network.iosxr.iosxr import etree_find, is_netconf
class ConfigBase(object):
def __init__(self, module):
self._module = module
self._result = {'changed': False, 'warnings': []}
self._want = {}
self._have = {}
def map_params_to_obj(self):
text = self._module.params['text']
if text:
text = "{!r}".format(str(text).strip())
self._want.update({
'banner': self._module.params['banner'],
'text': text,
'state': self._module.params['state']
})
class CliConfiguration(ConfigBase):
def __init__(self, module):
super(CliConfiguration, self).__init__(module)
def map_obj_to_commands(self):
commands = list()
state = self._module.params['state']
if state == 'absent':
if self._have.get('state') != 'absent' and ('text' in self._have.keys() and self._have['text']):
commands.append('no banner {!s}'.format(self._module.params['banner']))
elif state == 'present':
if (self._want['text'] and
self._want['text'].encode().decode('unicode_escape').strip("'") != self._have.get('text')):
banner_cmd = 'banner {!s} '.format(self._module.params['banner'])
banner_cmd += self._want['text'].strip()
commands.append(banner_cmd)
self._result['commands'] = commands
if commands:
commit = not self._module.check_mode
diff = load_config(self._module, commands, commit=commit)
if diff:
self._result['diff'] = dict(prepared=diff)
self._result['changed'] = True
def map_config_to_obj(self):
cli_filter = 'banner {!s}'.format(self._module.params['banner'])
output = get_config(self._module, config_filter=cli_filter)
match = re.search(r'banner (\S+) (.*)', output, re.DOTALL)
if match:
text = match.group(2).strip("'")
else:
text = None
obj = {'banner': self._module.params['banner'], 'state': 'absent'}
if output:
obj['text'] = text
obj['state'] = 'present'
self._have.update(obj)
def run(self):
self.map_params_to_obj()
self.map_config_to_obj()
self.map_obj_to_commands()
return self._result
class NCConfiguration(ConfigBase):
def __init__(self, module):
super(NCConfiguration, self).__init__(module)
self._banners_meta = collections.OrderedDict()
self._banners_meta.update([
('banner', {'xpath': 'banners/banner', 'tag': True, 'attrib': "operation"}),
('a:banner', {'xpath': 'banner/banner-name'}),
('a:text', {'xpath': 'banner/banner-text', 'operation': 'edit'})
])
def map_obj_to_xml_rpc(self):
state = self._module.params['state']
_get_filter = build_xml('banners', xmap=self._banners_meta, params=self._module.params, opcode="filter")
running = get_config(self._module, source='running', config_filter=_get_filter)
banner_name = None
banner_text = None
if etree_find(running, 'banner-text') is not None:
banner_name = etree_find(running, 'banner-name').text
banner_text = etree_find(running, 'banner-text').text
opcode = None
if state == 'absent' and banner_name == self._module.params['banner'] and len(banner_text):
opcode = "delete"
elif state == 'present':
opcode = 'merge'
self._result['xml'] = []
if opcode:
_edit_filter = build_xml('banners', xmap=self._banners_meta, params=self._module.params, opcode=opcode)
if _edit_filter is not None:
commit = not self._module.check_mode
diff = load_config(self._module, _edit_filter, commit=commit, running=running, nc_get_filter=_get_filter)
if diff:
self._result['xml'] = _edit_filter
if self._module._diff:
self._result['diff'] = dict(prepared=diff)
self._result['changed'] = True
def run(self):
self.map_params_to_obj()
self.map_obj_to_xml_rpc()
return self._result
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(iosxr_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
config_object = None
if is_cliconf(module):
module.deprecate(msg="cli support for 'iosxr_banner' is deprecated. Use transport netconf instead",
version="4 releases from v2.5")
config_object = CliConfiguration(module)
elif is_netconf(module):
config_object = NCConfiguration(module)
result = None
if config_object is not None:
result = config_object.run()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/network/iosxr/iosxr_banner.py
|
Python
|
gpl-3.0
| 8,287
|
from battlenet.community import Community
class Character(Community):
ENDPOINT = '/wow/character/%s/%s'
def __init__(self, *args, **kwargs):
super(Character, self).__init__(*args, **kwargs)
self.name = kwargs.get('name', None)
self.realm = kwargs.get('realm', None)
if kwargs.get('fields'):
if not isinstance(kwargs['fields'], list):
raise ValueError(
'fields argument must be a list. E.g. ["achievements", "appearance"].'
)
self.fields = kwargs['fields']
else:
self.fields = []
def get(self, name=None, realm=None, fields=[]):
if name:
self.name = name
else:
if not self.name:
raise ValueError('Character name required.')
if realm:
self.realm = realm
else:
if not self.realm:
raise ValueError('Character realm required.')
if len(fields) > 0:
if not isinstance(fields, list):
raise ValueError(
'fields argument must be a list. E.g. ["achievements", "appearance"].'
)
self.fields = fields
if len(self.fields) > 0:
self.add_params(
{
'fields': ','.join(self.fields)
}
)
return self.make_request(self.ENDPOINT % (self.realm, self.name))
|
elryndir/GuildPortal
|
battlenet/community/wow/characters.py
|
Python
|
mit
| 1,473
|
from pydsp import *
PyDSP().configure_traits(view=view1)
|
antiface/PyDSP-1
|
pydsp/__main__.py
|
Python
|
bsd-2-clause
| 59
|
"""
Model objects used to represent data from the JPER account system
"""
from flask.ext.login import UserMixin
from werkzeug import generate_password_hash, check_password_hash
from octopus.core import app
from service import dao
from octopus.lib import dataobj
class Account(dataobj.DataObj, dao.AccountDAO, UserMixin):
"""
Account model which mirrors the JPER account model, providing only the functions
we need within the sword depositor
"""
@property
def api_key(self):
"""
Get the API key for this account
:return: the account's api key
"""
return self._get_single("api_key", coerce=self._utf8_unicode())
@property
def packaging(self):
"""
Get the list of supported packaging formats for this account
:return: list of packaging formats
"""
return self._get_list("packaging", coerce=self._utf8_unicode())
def add_packaging(self, val):
"""
Add a packaging format to the list of supported formats
:param val: format identifier
:return:
"""
self._add_to_list("packaging", val, coerce=self._utf8_unicode(), unique=True)
def add_sword_credentials(self, username, password, collection):
"""
Add the sword credentials for the user
:param username: username to deposit to repository as
:param password: password of repository user account
:param collection: collection url to deposit to
:return:
"""
self._set_single("sword.username", username, coerce=self._utf8_unicode())
self._set_single("sword.password", password, coerce=self._utf8_unicode())
self._set_single("sword.collection", collection, coerce=self._utf8_unicode())
@property
def sword_collection(self):
"""
Get the url of the collection in the repository to deposit to
:return: collection url
"""
return self._get_single("sword.collection", coerce=self._utf8_unicode())
@property
def sword_username(self):
"""
Get the username of the repository account to deposit as
:return: username
"""
return self._get_single("sword.username", coerce=self._utf8_unicode())
@property
def sword_password(self):
"""
Get the password for the repository user to deposit as
:return: password
"""
return self._get_single("sword.password", coerce=self._utf8_unicode())
@property
def repository_software(self):
"""
Get the name of the repository software we are depositing to
:return: software name (e.g. eprints, dspace)
"""
return self._get_single("repository.software", coerce=self._utf8_unicode())
@repository_software.setter
def repository_software(self, val):
"""
Set the name of the repository software we are depositing to
:param val: software name
:return:
"""
self._set_single("repository.software", val, coerce=self._utf8_unicode())
|
JiscPER/jper-sword-out
|
service/models/account.py
|
Python
|
apache-2.0
| 3,096
|
#! /usr/bin/env python
# libraries
import os, sys, subprocess, time, threading
from PIL import Image
# own modules and packages
from packages import rmconfig, rmmedia, rmutil, rmnetwork
from packages.rmnetwork import udpserver, tcpfilesocket, udpbroadcaster, messages, GroupManager
from constants import *
config = {}
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def startMediaPlayer():
# set config and path for player and start it
# rmmedia.mediaplayer.main()
global config
config = rmconfig.configtool.readConfig()
rmmedia.mediaplayer.setMediaPath(mediaPath)
rmmedia.mediaplayer.identify = False
if config['autoplay']:
rmmedia.mediaplayer.play()
def startUdpServer():
udpserver.start()
def openFileSocket():
tcpfilesocket.openFileSocket()
def checkThumbnails():
print "Checking thumbnails..."
mediaPath = os.getcwd() + '/media/'
thumbPath = mediaPath + 'thumbs/'
if not os.path.isdir(thumbPath):
os.mkdir(thumbPath)
cnt = 0
files = rmmedia.mediaplayer.getImageFilelist()
for name in files:
oPath = os.path.join(mediaPath, name)
tPath = os.path.join(thumbPath, name)
if not os.path.isfile(tPath):
# no thumbnail for image present -> create and save thumbnail
img = Image.open(oPath)
w = img.size[0]
h = img.size[1]
newW = 200
newH = newW * h / w
img.thumbnail((newW,newH))
img.save(os.path.join(thumbPath, name))
cnt += 1
print "%d missing thumbnails created and saved." % cnt
def main():
global config, groupConfig, mediaPath
config = rmconfig.configtool.initConfig()
# default media path
mediaPath = os.getcwd() + '/media/'
#print "Media Path: " + mediaPath
proc = subprocess.Popen(['tty'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = proc.communicate()
print result
print "Launching player..."
# hide console text of local tty0 on hdmi
# os.system("sudo sh -c \"TERM=linux setterm -foreground black -clear >/dev/pts/0\"")
os.system("sudo sh -c \"TERM=linux setterm -foreground black -clear >/dev/tty0\"")
startUdpServer()
openFileSocket()
startMediaPlayer()
# send boot complete broadcast
msgData = messages.getMessage(PLAYER_BOOT_COMPLETE)
udpbroadcaster.sendBroadcast(msgData, True)
# initialize group manager with group configuration
groupConfig = rmconfig.configtool.readGroupConfig()
print "GROUP CONFIG: ", groupConfig
GroupManager.InitGroupManager(groupConfig)
time.sleep(2)
GroupManager.Schedule()
# check if thumbnails completely present
t = threading.Thread(target=checkThumbnails)
t.daemon = True
t.start()
# simple CLI to modify and quit program when debugging
print ""
print ""
print "Type commands any time -->"
print "-- \"start\" to start the UDP server"
print "-- \"stop\" to stop and close the UDP server"
print "-- \"quit\" to exit the program"
print ""
print ""
running = True
while running:
cmd = raw_input("")
if(cmd == "start"):
udpserver.start()
elif(cmd == "stop"):
udpserver.stop()
elif(cmd == "quit"):
running = False
else:
print "Unknown command: ", cmd
# bring back console text on tty0 on hdmi
os.system("sudo sh -c \"TERM=linux setterm -foreground white -clear >/dev/pts/0\"")
os.system("sudo sh -c \"TERM=linux setterm -foreground white -clear >/dev/tty0\"")
udpserver.stop()
rmutil.processtool.killProcesses('fbi')
# startup image
# subprocess.call(["sudo","fbi","--once","-a","-noverbose","-T","2", "./raspmedia.jpg"])
if __name__ == '__main__':
print ""
print ":::::::::::::::::::::::::::::::::::::::::::::::::"
print ":::::::::: WELCOME TO RASPMEDIA PLAYER ::::::::::"
print ":::::::::::::::::::::::::::::::::::::::::::::::::"
print ""
main()
|
xserty/piDS
|
Raspberry/rasp-mediaplayer.py
|
Python
|
apache-2.0
| 4,051
|
import matplotlib.mlab as mlab
import numpy as np
from .recursive import KWS, alias
from .plotclasses import (XYPlot, xyplot)
@xyplot.decorate()
def xcorr(plot, *args, **kwargs):
""" PlotFactory Wrapper of function xcorr
contrarly to matplolib.xcorr, xcorr return a new xyplot-like instance
ready to plot result of the correlation.
Altered Parameters:
lags, corr: corelation result
xerr, yerr : set to None
data1, data2 : original data
lines : alias(lags),
min, max : 0 and alias("corr") for lines
if direction == "y"
x and y : alias of "lags" and "corr" for plot
ymin : set to 0
ymax : set y correlation result (to be used with vlines)
if deirection == "x"
y and x : alias of "lags" and "corr" for plot
xmin : set to 0
xmax : alias of "corr" (to be used with vlines/lines)
the matplotlib doc is copied below
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Plot the cross correlation between *data1* and *data2*.
Parameters
----------
data1 : sequence of scalars of length n (can be aliases to "x" for instance)
data2 : sequence of scalars of length n (can be aliases to "y" for instance)
hold : boolean, optional, default: True
detrend : callable, optional, default: `mlab.detrend_none`
x is detrended by the `detrend` callable. Default is no
normalization.
normed : boolean, optional, default: True
if True, normalize the data by the autocorrelation at the 0-th
lag.
usevlines : boolean, optional, default: True
if True, Axes.vlines is used to plot the vertical lines from the
origin to the acorr. Otherwise, Axes.plot is used.
maxlags : integer, optional, default: 10
number of lags to show. If None, will return all 2 * len(x) - 1
lags.
Returns
-------
(lags, c, line, b) : where:
- `lags` are a length 2`maxlags+1 lag vector.
- `c` is the 2`maxlags+1 auto correlation vectorI
- `line` is a `~matplotlib.lines.Line2D` instance returned by
`plot`.
- `b` is the x-axis (none, if plot is used).
Other parameters
-----------------
linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
Only used if usevlines is False.
marker : string, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
`mode` = 2.
"""
plot.update(kwargs.pop(KWS, {}), **kwargs)
(x, y, normed,
detrend, maxlags) = plot.parseargs(args, "data1", "data2", "normed",
"detrend", "maxlags",
normed=True,
detrend=mlab.detrend_none,
maxlags=10)
Nx = len(x)
if Nx != len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed:
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags, maxlags + 1)
c = c[Nx - 1 - maxlags:Nx + maxlags]
di, dd = plot._get_direction()
plot.update(
{di:alias("lags"), dd:alias("corr"),
dd+"min":0, dd+"max":alias("corr")},
min=0, max=alias("corr"),
data=alias("corr"),
xerr = None, yerr=None,
lags=lags, corr=c,
data1 =x, data2=y
)
plot.goifgo()
######
# add instances to XYPlot
XYPlot.xcorr = xcorr.derive(data1=alias("x"), data2=alias("y"))
|
SylvainGuieu/smartplotlib
|
correlations.py
|
Python
|
gpl-2.0
| 3,932
|
import os
import pygame
import sys
import wx
from wiggler.core.events import StageEvents
from wiggler.engine.stage import Stage
tilemap = dict()
class StagePane(wx.Control):
def __init__(self, parent, id, resources, events, **options):
wx.Control.__init__(*(self, parent, id), **options)
self.parent = parent
self.resources = resources
self.stage = Stage(self.resources)
self.events = events
self._initialized = 0
self._resized = 0
self._surface = None
self.__needsDrawing = 1
self.size = self.GetSizeTuple()
self.stageevents = StageEvents()
wx.EVT_SIZE(self, self.OnSize)
wx.EVT_IDLE(self, self.OnIdle)
self.timer = wx.Timer(self)
self.events.subscribe(self, ['projload', 'play', 'stop'])
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.Update, self.timer)
self.Bind(self.events.EVT_NOTICE, self.notice_handler)
self.Bind(wx.EVT_KEY_DOWN, self.stageevents.translate_key)
self.Bind(wx.EVT_KEY_UP, self.stageevents.translate_key)
self.Bind(wx.EVT_MOUSE_EVENTS, self.stageevents.translate_mouse)
self.max_fps = 25.0
self.timespacing = 1000.0 / self.max_fps
self.timer.Start(self.timespacing, False)
self.default_backcolor = (255, 255, 255)
def notice_handler(self, event):
if event.notice == 'projload':
self.clear()
elif event.notice == 'play':
self.play()
elif event.notice == 'stop':
self.stop()
event.Skip()
def OnIdle(self, ev):
if not self._initialized or self._resized:
if not self._initialized:
hwnd = self.GetHandle()
os.environ['SDL_WINDOWID'] = str(hwnd)
if sys.platform == 'win32':
os.environ['SDL_VIDEODRIVER'] = 'windib'
self.stage.start()
self._initialized = 1
else:
self._resized = 0
def clear(self):
self.stage.sweep()
def OnPaint(self, ev):
self.Redraw()
def OnSize(self, ev):
self.size = self.GetSizeTuple()
def Kill(self, event):
# Make sure Pygame can't be asked to redraw /before/ quitting
# by unbinding all methods which call the Redraw() method
# (Otherwise wx seems to call Draw between quitting Pygame
# and destroying the frame)
self.Unbind(event=wx.EVT_PAINT, handler=self.OnPaint)
self.Unbind(event=wx.EVT_TIMER, handler=self.Update, source=self.timer)
pygame.quit()
def Update(self, event):
# loop = main_event_queue.handle_events()
self.Redraw()
def Redraw(self):
if not self.stage.screen:
return
self.stage.update()
def play(self):
# if self.code_status == "undef":
# self.events.send('play')
# else:
# TODO(panda): warn about errors in the code
# pass
self.stage.pause = False
self.stage.reset()
def stop(self):
self.stage.pause = True
|
ProgrammaBol/wiggler
|
wiggler/ui/stagepane.py
|
Python
|
gpl-3.0
| 3,150
|
from uaperrors import StepError
import sys
from abstract_step import *
import glob
import misc
import process_pool
import yaml
import os
from logging import getLogger
logger = getLogger('uap_logger')
class StringtieMerge(AbstractStep):
'''
# stringtie --merge <gtf.list> > outputpat/outputname
StringTie is a fast and highly efficient assembler of RNA-Seq alignments into potential
transcripts. merge is a mode of the StringTie tool that is used to assemble transcripts from multiple input files (assemblies). It generates a unified non-redundant set of isoforms.
NOTE: This step implements the merging part of stringtie. If you want
stringtie to assemble transcripts from multiple BAM files please use step stringtie!
https://ccb.jhu.edu/software/stringtie/
'''
def __init__(self, pipeline):
super(StringtieMerge, self).__init__(pipeline)
self.set_cores(2)
# all .gft assemblies from all samples that have been produced with
# stringtie
self.add_connection('in/features', format=['gtf', 'gff3'],
description='Feature annotations to be merged.')
self.add_connection(
'in/reference',
format=[
'gtf',
'gff3'],
optional=True,
description='Reference assembly. Can also be passed with option G '
'or left out for denovo assembling.')
# merged assembly 'merged.gft'
self.add_connection('out/features', format='gtf') # merged.gtf
self.add_connection('out/assemblies') # input assemblies txt file
self.add_connection('out/log_stderr')
self.require_tool('stringtie')
self.require_tool('printf')
self.require_tool('mkdir')
self.require_tool('mv')
self.add_option(
'G',
str,
optional=True,
description='reference annotation to include in the merging (GTF/GFF3)')
self.add_option(
'm',
int,
optional=True,
description='minimum input transcript length to include in the merge (default: 50)')
self.add_option(
'c',
int,
optional=True,
description='minimum input transcript coverage to include in the merge (default: 0)')
self.add_option(
'F',
float,
optional=True,
description='minimum input transcript FPKM to include in the merge (default: 1.0)')
self.add_option(
'T',
float,
optional=True,
description='minimum input transcript TPM to include in the merge (default: 1.0)')
self.add_option('f', float, optional=True,
description='minimum isoform fraction (default: 0.01)')
self.add_option(
'g',
int,
optional=True,
description='gap between transcripts to merge together (default: 250)')
self.add_option(
'i',
bool,
optional=True,
description='keep merged transcripts with retained introns; by default')
self.add_option(
'l',
str,
optional=True,
description='name prefix for output transcripts (default: MSTRG)')
self.add_option('p', int, optional=True,
default=2, description='Number of cores')
self.add_option(
'output_prefix',
str,
optional=True,
default="merge",
description='Prefix used in the utput directory.')
def runs(self, cc):
# reset cores to number of threads
self.set_cores(self.get_option('p'))
# compile list of options
options = ['m', 'c', 'F', 'T', 'f', 'g', 'i', 'l']
set_options = [option for option in options if
self.is_option_set_in_config(option)]
option_list = list()
for option in set_options:
if isinstance(self.get_option(option), bool):
if self.get_option(option):
option_list.append('-%s' % option)
else:
value = str(self.get_option(option))
option_list.append('-%s' % option)
option_list.append(value)
option_ref_assembly = self.get_option('G')
if option_ref_assembly is not None:
option_ref_assembly = os.path.abspath(option_ref_assembly)
ref_assembly = cc.look_for_unique('in/reference', option_ref_assembly)
if cc.all_runs_have_connection('in/reference'):
raise StepError(
self, 'For stringtieMerge only one reference assmbly can be used.')
input_files = []
if ref_assembly is not None:
option_list.extend(['-G', ref_assembly])
if option_ref_assembly is None:
# include dependency
input_files.append(ref_assembly)
# get all paths to the stringtie assemblies from each sample
stringtie_sample_gtf = []
assembling_runs = cc.get_runs_with_connections('in/features')
for run_id in assembling_runs:
stringtie_sample_gtf.append(cc[run_id]['in/features'][0])
run_id = self.get_option('output_prefix')
run = self.declare_run(run_id)
# create the filename of the assemblies.txt file
assemblies = [self.get_tool('printf'), '\n'.join(stringtie_sample_gtf)]
# print assemblies
input_files.extend(stringtie_sample_gtf)
assemblies_file = run.add_output_file(
'assemblies', '%s-stringtieMerge-assemblies.txt' %
run_id, input_files)
# 1. create assemblies file
with run.new_exec_group() as exec_group:
exec_group.add_command(assemblies, stdout_path=assemblies_file)
with exec_group.add_pipeline() as stringtie_pipe:
res = run.add_output_file('features',
'%s-stringtieMerge-merged.gtf' %
run_id, input_files)
log_err_file = run.add_output_file(
'log_stderr', '%s-stringtieMerge-log_stderr.txt' %
run_id, input_files)
stringtieMerge = [self.get_tool('stringtie'), '--merge']
stringtieMerge.extend(option_list)
stringtieMerge.append(assemblies_file)
stringtie_pipe.add_command(stringtieMerge,
stderr_path=log_err_file,
stdout_path=res)
|
kmpf/uap
|
include/steps/stringtieMerge.py
|
Python
|
gpl-3.0
| 6,723
|
'''BADA Coefficient file loader
This module provides access to the performance data contained in the various
BADA data files.
The current implementation is based on the official documentation described in
report: EEC Technical/Scientific Report No. 14/04/24-44. This report can be obtained here:
https://www.eurocontrol.int/sites/default/files/field_tabs/content/documents/sesar/user-manual-bada-3-12.pdf
'''
from glob import glob
from os import path
import re
from bluesky.tools.fwparser import FixedWidthParser, ParseError
# File formats of BADA data files. Uses fortran-like notation
# Adapted from the BADA manual format lines. (page 61-81 in the BADA manual)
# Skip characters are indicated with nnX
# Variables are indicated with nnF, nnI, and nnS (float, int, string)
syn_format = ['CD, 1X, 1S, 1X, 4S, 3X, 18S, 1X, 25S, 1X, 6S, 2X, 1S']
syn_parser = FixedWidthParser(syn_format)
opf_format = [
# aircraft type block (1 data line)
'CD, 3X, 6S, 9X, 1I, 12X, 9S, 17X, 1S',
# mass block (1 data line)
'CD, 2X, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F',
# flight envelope block (1 data line)
'CD, 2X, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F',
# aerodynamics block (12 data lines)
'CD, 2X, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F',
'CD, 15X, 3X, 10F, 3X, 10F, 3X, 10F',
'CD, 15X, 3X, 10F, 3X, 10F, 3X, 10F',
'CD, 15X, 3X, 10F, 3X, 10F, 3X, 10F',
'CD, 15X, 3X, 10F, 3X, 10F, 3X, 10F',
'CD, 15X, 3X, 10F, 3X, 10F, 3X, 10F',
'CD 50X',
'CD 50X',
'CD 50X',
'CD, 31X, 10F',
'CD 50X',
'CD 50X',
# engine thrust block (3 data lines)
'CD, 2X, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F',
'CD, 2X, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F',
'CD, 2X, 3X, 10F, 3X, 10F',
# fuel consumption block (3 data lines)
'CD, 2X, 3X, 10F, 3X, 10F',
'CD, 2X, 3X, 10F, 3X, 10F',
'CD, 5X, 10F',
# ground movement block (1 data line)
'CD, 2X, 3X, 10F, 3X, 10F, 3X, 10F, 3X, 10F']
opf_parser = FixedWidthParser(opf_format)
apt_format = [
# company name (1 line)
'CD, 2X, 3S, 1X, 2S, 4X, 15S',
# profiles for climb, cruise, and descent (3 lines)
'CD, 25X, 3I, 1X, 3I, 1X, 2I, 10X, 3I, 1X, 3I, 1X, 2I, 2X, 2I, 1X, 3I, 1X, 3I',
'CD, 25X, 3I, 1X, 3I, 1X, 2I, 10X, 3I, 1X, 3I, 1X, 2I, 2X, 2I, 1X, 3I, 1X, 3I',
'CD, 25X, 3I, 1X, 3I, 1X, 2I, 10X, 3I, 1X, 3I, 1X, 2I, 2X, 2I, 1X, 3I, 1X, 3I']
apf_parser = FixedWidthParser(apt_format)
# The available aircraft are stored by type id in synonyms. The actual coefficient data are stored in accoeffs
synonyms = dict()
accoeffs = dict()
release_date = 'Unknown'
bada_version = 'Unknown'
def getCoefficients(actype):
''' Get a set of BADA coefficients for the given aircraft type.
This function looks for the given aircraft type in the synonym list, and
when successful, retreives the corresponding coefficient set.
This function returns the synonym object (which contains more detailed
information about the aircraft type) and the coefficient object'''
if actype not in synonyms:
return False, actype + ' is not found in BADA aircraft database. \
(Check the file SYNONYM.NEW in your BADA path if you spelled the id correctly)'
syn = synonyms[actype]
if syn.file not in accoeffs:
return False, actype + ' exists in BADA synonym database, but corresponding \
coefficient file (%s) could not be found.' % syn.file
coeff = accoeffs[syn.file]
return syn, coeff
def init(bada_path=''):
''' init() loads the available BADA datafiles in the provided directory.'''
releasefile = path.join(path.normpath(bada_path), 'ReleaseSummary')
if path.isfile(releasefile):
global release_date, bada_version
re_reldate = re.compile('Summary Date:\s+(.+(?<!\s))\s*', re.IGNORECASE)
re_badaver = re.compile('\s*BADA Release:\s+([\d.]+)\s*', re.IGNORECASE)
with open(releasefile) as f:
for line in f:
if re_reldate.match(line):
release_date = re_reldate.findall(line)[0]
elif re_badaver.match(line):
bada_version = re_badaver.findall(line)[0]
if 'Unknown' not in (release_date, bada_version):
break
print('Found BADA version %s (release date %s)' % (bada_version, release_date))
else:
print('No BADA release summary found: can not determine version.')
synonymfile = path.join(path.normpath(bada_path), 'SYNONYM.NEW')
if not path.isfile(synonymfile):
print('SYNONYM.NEW not found in BADA path, could not load BADA.')
return False
try:
data = syn_parser.parse(synonymfile)
except ParseError as e:
print('Error reading synonym file {} on line {}'.format(e.fname, e.lineno))
return False
for line in data:
syn = Synonym(line)
synonyms[syn.accode] = syn
print('%d aircraft entries loaded' % len(synonyms))
# Load aircraft coefficient data
for fname in glob(path.join(path.normpath(bada_path), '*.OPF')):
ac = ACData()
try:
ac.setOPFData(opf_parser.parse(fname))
if path.isfile(fname[:-4] + '.APF'):
ac.setAPFData(apf_parser.parse(fname[:-4] + '.APF'))
except ParseError as e:
print('Error reading {} on line {}'.format(e.fname, e.lineno))
ac = None
if ac:
accoeffs[ac.actype] = ac
print('%d unique aircraft coefficient sets loaded' % len(accoeffs))
return (len(synonyms) > 0 and len(accoeffs) > 0)
class Synonym(object):
def __init__(self, data):
self.is_equiv = (data[0] == '*') # False if model is directly supported in bada, true if supported through equivalent model
self.accode = data[1] # Aircraft code
self.manufact = data[2] # Aircraft manufacturer
self.model = data[3] # Aircraft model
self.file = data[4] # Corresponding coefficient filename
self.icao = (data[5].upper() == 'Y') # designator for this aircraft type is in use according to ICAO Doc 8643 [RD2]
class ACData(object):
# minimum speed coefficients
CVmin = 1.3
CVmin_to = 1.2
# reduced power coefficients
Cred_turboprop = 0.25
Cred_jet = 0.15
Cred_piston = 0.0
# value from BADA.gpf file
gr_acc = 2.0
def setOPFData(self, data):
# aircraft type block: 1 line
self.actype, self.neng, \
self.engtype, self.weightcat = data[0]
# mass block: 1 line
self.m_ref, self.m_min, self.m_max, \
self.m_paymax, self.mass_grad = data[1]
# flight envelope block: 1 line
self.VMO, self.MMO, self.h_MO, \
self.h_max, self.temp_grad = data[2]
# aerodynamics block: 12 lines
self.S, self.Clbo, self.k, self.CM16 = data[3]
self.Vstall_cr, self.CD0_cr, self.CD2_cr = data[4]
self.Vstall_ic, self.CD0_ic, self.CD2_ic = data[5]
self.Vstall_to, self.CD0_to, self.CD2_to = data[6]
self.Vstall_ap, self.CD0_ap, self.CD2_ap = data[7]
self.Vstall_ld, self.CD0_ld, self.CD2_ld = data[8]
self.CD0_gear = data[12][0]
# engine thrust block: 3 lines
self.CTC = data[15]
self.CTdes_low, self.CTdes_high, \
self.Hp_des, self.CTdes_app, \
self.CTdes_land = data[16]
self.Vdes_ref, self.Mdes_ref = data[17]
# fuel consumption block: 3 lines
self.Cf1, self.Cf2 = data[18]
self.Cf3, self.Cf4 = data[19]
self.Cf_cruise = data[20][0]
# ground movements block: 1 line
self.TOL, self.LDL, \
self.wingspan, self.length = data[21]
def setAPFData(self, data):
# Minimum, average, and high reference speeds for climb, cruise,
# and descent. xx1=low mass, xx2=high mass
self.CAScl1, self.CAScl2, self.Mcl, \
self.CAScr1, self.CAScr2, self.Mcr, \
self.Mdes, self.CASdes2, self.CASdes1 = list(zip(*data[1:])) # swap rows/columns
# Mach numbers are multiplied by 100 in the BADA files
self.Mcl = [m / 100.0 for m in self.Mcl]
self.Mcr = [m / 100.0 for m in self.Mcr]
self.Mdes = [m / 100.0 for m in self.Mdes]
|
ethertricity/bluesky
|
bluesky/traffic/performance/legacy/coeff_bada.py
|
Python
|
gpl-3.0
| 9,017
|
# coding:utf-8
import url_manager, html_downloader, html_parser, html_outputer
import traceback
import iMessage
class Crawl(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_urls):
count = 1
for root_url in root_urls:
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print 'craw %d : %s' % (count, new_url)
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
count = count + 1
except Exception,e:
print 'craw failed!'
#print 'str(Exception):\t', str(Exception)
#print 'str(e):\t\t', str(e)
#print 'repr(e):\t', repr(e)
#print 'e.message:\t', e.message
#print 'traceback.print_exc():'; traceback.print_exc()
#print 'traceback.format_exc():\n%s' % traceback.format_exc()
datass = self.outputer.output_html()
News = ''
for datas in datass:
for data in datas:
News += datas[data]+'\n'
#print News
if News != '':
iMessage.send_Message(News, 'CQUT_News')
if __name__=="__main__":
root_urls = ["http://cs.cqut.edu.cn/Notice/NoticeStudentMore.aspx", "http://cs.cqut.edu.cn/Notice/NoticeMore.aspx?NtcCategoryID=5", "http://cs.cqut.edu.cn/News/NewsMore.aspx", "http://cs.cqut.edu.cn/Notice/NoticeEmpMore.aspx"]
obj_News_crawl = Crawl()
obj_News_crawl.craw(root_urls)
|
newbee-7/News_Crawl
|
crawl.py
|
Python
|
mit
| 1,994
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((7012.45, 8906.78, 353.735), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((6417.51, 8618.07, 2018.89), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((7458.5, 7323.32, 2982.37), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((8563.58, 8494.85, 1301.1), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((9710.43, 7550.6, 1333.24), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((9645.33, 5649.37, 2900.25), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((9328.23, 4051.25, 3404.98), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((9868.9, 4417.28, 2735.18), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((8146.46, 2855.63, 4140.08), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((7547.63, 1496.06, 3331.75), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((6237.88, 1271.75, 4681.53), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((5512.13, 2232.35, 4469.89), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4237.73, 3119.54, 4660.71), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((4509.33, 3635.01, 3222.68), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2473.09, 4189.49, 2368.27), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((595.44, 6539.43, 3045.54), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((1635.26, 7438.49, 4426.86), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((1200.08, 6349.23, 5192.49), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((2156.96, 4924.09, 5108.43), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((1887.72, 3461.58, 4960.48), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((4232.63, 3377.06, 5478.48), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2319.58, 4133.34, 5381.12), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((2329.27, 4528.37, 5702.94), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((1834.7, 5631.89, 6195.15), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((1791.52, 6728.54, 5405.95), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((783.959, 7497.94, 4636.41), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((1597.08, 6195.98, 4972.46), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3500.13, 5175.83, 4509.7), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((3443.15, 4538.04, 5496.39), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3962.55, 3446.17, 6036.72), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((3953.27, 3946.16, 6350.91), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4806.28, 2985.73, 5216.45), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3304.1, 2942.37, 6332.1), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((3038.4, 4364.26, 6767.05), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((2157.86, 4931.48, 5964.32), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((945.582, 5199.38, 5309.28), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((3217.48, 4334.18, 5531.59), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((1650.93, 5275.65, 6038.39), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((2615.98, 5770.04, 6242.48), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1709.7, 4538.76, 6698.75), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((3172.31, 3823.59, 7221.69), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((3678.93, 2038.55, 7518.62), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((1766.36, 2264.18, 9313.97), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3355.2, 1548.2, 8556.25), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2921.15, 3033.78, 7919.23), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((4779.9, 3251.57, 7357.01), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((4975.68, 4270.21, 9028.42), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3684.78, 3163.82, 10188.5), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((5468.15, 3665.51, 9789.02), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((6838.43, 3807.23, 8511.49), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((6288.51, 2947.45, 9096.85), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((6808.82, 3070.6, 7524.23), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((7025.79, 3628.48, 5881.36), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((8212.41, 2679.08, 5568.34), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((8408.96, 2457.67, 6294.48), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((7350.34, 3823.5, 7477.17), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((8456, 5694.3, 7242.38), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((8565.94, 7915.09, 8420.51), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((8523.85, 8415.42, 8713.47), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((9026.49, 8665.79, 8123.95), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((8766.35, 7644.88, 8617.49), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((9537.09, 7719.35, 9052.13), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((8493.14, 6703.95, 7826.4), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((7375.64, 8298.24, 7609.61), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((6329.8, 9915.21, 7784.74), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((5620.51, 8394.92, 8472.47), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((5799.34, 9510.77, 9711.1), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((6920.93, 7901.07, 8461.16), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((7525.17, 10114, 8747.85), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((5971.6, 10179.4, 8939.21), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((6854.24, 9680.73, 9750.19), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/mtx1_models/SHH_WT_models9585.py
|
Python
|
gpl-3.0
| 17,572
|
import time
import os
import sys
import threading
import importlib
#import mttkinter as tkinter
from tkinter import ttk
import tkinter.font
KRCC_MODULE_DECLARATION = 'DECLARE_' + 'KRCC' + '_MODULE'
krcc_modules = []
for dirpath, _, filenames in os.walk(os.getcwd()):
for filename in filenames:
if not filename.endswith('.py'):
continue
with open(os.path.join(dirpath, filename), 'r') as f:
for line in f.readlines():
if KRCC_MODULE_DECLARATION in line:
krcc_modules.append(filename[:-3])
def on_combobox_changed(event):
loader.start_module(combobox.get())
def on_button_clicked(event):
loader.reload_module()
class KRCCModuleLoader:
def __init__(self, root):
self._root = root
self._lock = threading.Lock()
self._py_module = None
self._module = None
self._module_name = None
self._module_thread = None
self._shutdown = False
self._thread = threading.Thread(target=self._execute_module, name='_thread')
self._thread.start()
self._file_thread = threading.Thread(target=self._watch_file,
name='_file_thread')
self._file_thread.start()
def _execute_module(self):
error = None
dots = 0
while True:
try:
self._lock.acquire()
if self._module_name is None and self._py_module is None:
continue
if self._py_module is None:
self._py_module = importlib.import_module(self._module_name)
else:
self._py_module = importlib.reload(self._py_module)
self._module_name = self._py_module.__name__.split('/')[-1]
self._module_name = self._module_name.split('.')[0]
self._module = self._py_module.load(self._root)
print('\nStarting thread with module: %s' % self._module.name)
self._module_thread = threading.Thread(target=self._module.run,
name='_module_thread')
self._module_thread.start()
error = None
self._lock.release()
self._module_thread.join()
self._lock.acquire()
print('\nModule %s finished executing.' % self._module.name)
if self._shutdown:
self._module_name = None
return
except Exception as e:
if error != e.args[0]:
error = e.args[0]
print('\n')
print(e)
self._module_name = None
sys.stdout.write('Retrying')
if dots > 80:
dots = 0
sys.stdout.write('\n')
sys.stdout.write('.')
dots += 1
sys.stdout.flush()
time.sleep(1)
finally:
self._lock.release()
def _watch_file(self):
watched_file = None
while watched_file is None:
with self._lock:
if self._shutdown:
return
if self._module_name is not watched_file:
watched_file = self._module_name
if watched_file is None:
continue
stats = os.stat(watched_file + '.py')
mtime = stats.st_mtime
while True:
time.sleep(3)
new_stats = os.stat(watched_file + '.py')
new_mtime = new_stats.st_mtime
if new_mtime > mtime:
self.reload_module()
mtime = new_mtime
with self._lock:
if self._shutdown:
return
if self._module_name is None:
watched_file = None
break
def shutdown(self):
with self._lock:
if self._shutdown:
return
self._shutdown = True
self.stop_module()
tk.update()
while self._thread.isAlive():
self._thread.join(0.01)
tk.update()
while self._file_thread.isAlive():
self._file_thread.join(0.01)
tk.update()
def start_module(self, name):
with self._lock:
if self._shutdown:
return
if self._module_name != name:
self._module_name = name
if self._module is not None:
self._module.terminate = True
self._py_module = None
def stop_module(self):
with self._lock:
if self._module_name is not None:
self._module.terminate = True
self._module_name = None
def reload_module(self):
with self._lock:
if self._shutdown:
return
name = self._module_name
self.stop_module()
#self.start_module(name)
tk = tkinter.Tk()
tk.title('KRCC')
tk.geometry('1200x450+2180+550')
s = ttk.Style()
s.theme_use('clam')
s.configure('TButton', padding=(0, 1, 0, 1))
for font in tkinter.font.names():
tkinter.font.nametofont(font).configure(family='Liberation Sans')
tkinter.font.nametofont('TkFixedFont').configure(family='Liberation Mono')
app = ttk.Frame(tk)
app.pack(side=tkinter.TOP, fill=tkinter.X)
button = ttk.Button(app)
button['text'] = "reload"
button.pack(side=tkinter.RIGHT)
button.bind('<Button-1>', on_button_clicked)
should_auto_reload = tkinter.BooleanVar()
should_auto_reload.set(True)
auto_reload_checkbutton = ttk.Checkbutton(app, var=should_auto_reload)
auto_reload_checkbutton['text'] = 'Automatically reload'
auto_reload_checkbutton.pack(side=tkinter.RIGHT)
combobox = ttk.Combobox(app)
combobox['state'] = 'readonly'
combobox['values'] = krcc_modules
combobox.set(krcc_modules[0])
combobox.pack(side=tkinter.RIGHT)
combobox.bind('<<ComboboxSelected>>', on_combobox_changed)
module_frame = ttk.Frame(tk)
module_frame.pack(fill=tkinter.BOTH, expand=1)
loader = KRCCModuleLoader(module_frame)
loader.start_module(krcc_modules[0])
button['command'] = loader.reload_module
def on_shutdown():
loader.shutdown()
tk.quit()
tk.protocol("WM_DELETE_WINDOW", on_shutdown)
try:
tk.mainloop()
except KeyboardInterrupt:
loader.shutdown()
print('Shutdown complete! Have a nice day.')
|
jsartisohn/krpc_scripts
|
main.py
|
Python
|
agpl-3.0
| 5,741
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Filter rule to match family with a particular citation.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hascitationbase import HasCitationBase
#-------------------------------------------------------------------------
#
# HasEvent
#
#-------------------------------------------------------------------------
class HasCitation(HasCitationBase):
"""Rule that checks for a family with a particular value"""
labels = [ _('Volume/Page:'),
_('Date:'),
_('Confidence level:')]
name = _('Place with the <citation>')
description = _("Matches places with a citation of a particular "
"value")
|
sam-m888/gramps
|
gramps/gen/filters/rules/place/_hascitation.py
|
Python
|
gpl-2.0
| 1,909
|
# -*- coding: utf-8 -*-
from django.db import models
from apps.registro.models.Anexo import Anexo
from apps.seguridad.models.Usuario import Usuario
from apps.seguridad.audit import audit
@audit
class AnexoCertificacionCarga(models.Model):
anexo = models.ForeignKey(Anexo, related_name='certificacion_carga')
anio = models.IntegerField()
fecha = models.DateField()
usuario = models.ForeignKey(Usuario)
class Meta:
app_label = 'registro'
unique_together = ('anexo', 'anio')
db_table = 'registro_anexo_certificacion_carga'
|
MERegistro/meregistro
|
meregistro/apps/registro/models/AnexoCertificacionCarga.py
|
Python
|
bsd-3-clause
| 571
|
vals = IN[0]
elementlist = []
for val in vals:
elementlist.append(hex(val))
OUT = elementlist
|
andydandy74/ClockworkForDynamo
|
nodes/0.9.x/python/Math.DecimalToHex.py
|
Python
|
mit
| 94
|
"""Test the TcEx Batch Module."""
# third-party
import pytest
# pylint: disable=no-self-use
class TestUtils:
"""Test the TcEx Batch Module."""
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:b1!Binary', b'bytes 1'),
('#App:0002:b2!Binary', b'bytes 2'),
('#App:0002:b3!Binary', b'bytes 3'),
('#App:0002:b4!Binary', b'bytes 4'),
],
)
def test_playbook_binary(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_binary(variable, value)
result = tcex.playbook.read_binary(variable)
assert result == value, f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:b1!Binary', 'not binary 1'),
('#App:0002:b2!Binary', []),
('#App:0002:b3!Binary', {}),
('#App:0002:b3!WrongType', 'wrong type'),
],
)
def test_playbook_binary_fail(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
try:
tcex.playbook.create_binary(variable, value)
assert False, f'{value} is not a valid Binary value'
except RuntimeError:
assert True
@pytest.mark.parametrize(
'variable,value',
[
('#App:0003:ba1!BinaryArray', [b'bytes 1', b'bytes 1']),
('#App:0003:ba2!BinaryArray', [b'bytes 2', b'bytes 2']),
('#App:0003:ba3!BinaryArray', [b'bytes 3', b'bytes 3']),
('#App:0003:ba4!BinaryArray', [b'bytes 4', b'bytes 4']),
],
)
def test_playbook_binary_array(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_binary_array(variable, value)
result = tcex.playbook.read_binary_array(variable)
assert result == value, f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
('#App:0003:ba1!BinaryArray', ['not binary 1', 'not binary 1']),
('#App:0002:b3!WrongType', 'wrong type'),
],
)
def test_playbook_binary_array_fail(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
try:
tcex.playbook.create_binary_array(variable, value)
assert False, f'{value} is not a valid Binary Array value'
except RuntimeError:
assert True
#
# Type specific
#
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:b1!Binary', b'bytes 1'),
('#App:0002:b2!Binary', b'bytes 2'),
('#App:0002:b3!Binary', b'bytes 3'),
('#App:0002:b4!Binary', b'bytes 4'),
],
)
def test_playbook_binary_decode(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_binary(variable, value)
result = tcex.playbook.read_binary(variable, decode=True)
assert result == value.decode('utf-8'), f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value,expected',
[
('#App:0002:b1!Binary', b'bytes 1', 'Ynl0ZXMgMQ=='),
('#App:0002:b2!Binary', b'bytes 2', 'Ynl0ZXMgMg=='),
('#App:0002:b3!Binary', b'bytes 3', 'Ynl0ZXMgMw=='),
('#App:0002:b4!Binary', b'bytes 4', 'Ynl0ZXMgNA=='),
],
)
def test_playbook_binary_no_b64decode(self, variable, value, expected, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_binary(variable, value)
result = tcex.playbook.read_binary(variable, b64decode=False)
assert result == expected, f'result of ({result}) for ({value}) does not match ({expected})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
('#App:0003:ba1!BinaryArray', [b'bytes 1', b'bytes 1']),
('#App:0003:ba2!BinaryArray', [b'bytes 2', b'bytes 2']),
('#App:0003:ba3!BinaryArray', [b'bytes 3', b'bytes 3']),
('#App:0003:ba4!BinaryArray', [b'bytes 4', b'bytes 4']),
],
)
def test_playbook_binary_array_decode(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_binary_array(variable, value)
result = tcex.playbook.read_binary_array(variable, decode=True)
assert result == [
v.decode('utf-8') for v in value
], f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value,expected',
[
(
'#App:0003:ba1!BinaryArray',
[b'bytes 1', b'bytes 1'],
['Ynl0ZXMgMQ==', 'Ynl0ZXMgMQ=='],
),
(
'#App:0003:ba2!BinaryArray',
[b'bytes 2', b'bytes 2'],
['Ynl0ZXMgMg==', 'Ynl0ZXMgMg=='],
),
(
'#App:0003:ba3!BinaryArray',
[b'bytes 3', b'bytes 3'],
['Ynl0ZXMgMw==', 'Ynl0ZXMgMw=='],
),
(
'#App:0003:ba4!BinaryArray',
[b'bytes 4', b'bytes 4'],
['Ynl0ZXMgNA==', 'Ynl0ZXMgNA=='],
),
],
)
def test_playbook_binary_array_no_b64decode(self, variable, value, expected, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (list): The value to store in Key Value Store.
expected (list): The expected output of the read command.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_binary_array(variable, value)
result = tcex.playbook.read_binary_array(variable, b64decode=False)
assert result == expected, f'result of ({result}) for ({value}) does not match ({expected})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
|
kstilwell/tcex
|
tests/playbooks/test_playbook_binary_types.py
|
Python
|
apache-2.0
| 8,370
|
import unittest
import os
import logging
from osm2gtfs.tests.creators.creators_tests import CreatorsTestsAbstract
# Define logging level
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
class TestCreatorsNiManagua(CreatorsTestsAbstract):
def _get_selector(self):
return "ni_managua"
def _get_required_variables(self):
# Define required values for the tests of this provider
return {
'routes_count': 45,
'stops_count': 1450,
'stations_count': 547,
'stops_osm_count': 1997,
'route_id_to_check': 111,
'gtfs_files': [
"agency.txt", "calendar.txt", "routes.txt", "shapes.txt",
"stops.txt", "stop_times.txt", "trips.txt"
],
}
def _override_configuration(self):
# Overriding some of the configuration options
# Use local timetable.json
self.config.data['schedule_source'] = os.path.join(
self.standard_variables['fixture_dir'], "timetable.json")
# Use timeframe of reference GTFS
self.config.data['start_date'] = "201780101"
self.config.data['end_date'] = "20180201"
def load_tests(loader, tests, pattern):
# pylint: disable=unused-argument
test_cases = ['test_refresh_routes_cache', 'test_refresh_stops_cache', 'test_gtfs_from_cache']
suite = unittest.TestSuite(map(TestCreatorsNiManagua, test_cases))
return suite
if __name__ == '__main__':
unittest.main()
|
nlehuby/osm2gtfs
|
osm2gtfs/tests/creators/tests_ni_managua.py
|
Python
|
gpl-3.0
| 1,515
|
import sys, os
import pyentropy
# BEFORE importing disutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
from distutils.core import setup
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError
extension_build_failed = False
def ext_failed_warning(name):
print( ('*'*70+'\n')*3)
print("""WARNING: The %s extension module could not be
compiled. pyEntropy should run, but the features
present in that file will not be available.
Above is the ouput showing how the compilation
failed."""%name)
if sys.platform == 'win32':
print()
print("""I see you are using Windows. The default
compiler for this platform is the Microsoft Visual
Studio C compiler. However, a free alternative
compiler called mingw can be used instead.""")
print()
print( ('*'*70+'\n')*3)
global extension_build_failed
extension_build_failed = True
try:
from gsl_dist.gsl_Extension import gsl_Extension
except DistutilsExecError:
ext_failed_warning('gsl-based')
exts = []
wrap_sources = ['hist_c.c', 'sort_c.c', 'gen_c.c', 'entropy_c.c',
'entropy_nsb_c.cpp', 'entropy_bub_c.c', 'wrap.c']
statk_wrap_sources = [os.path.join('pyentropy','statk',x) for x in wrap_sources]
try:
statk_wrap = gsl_Extension("statk.wrap",
sources = statk_wrap_sources,
gsl_min_version=(1,),
python_min_version=(2,5)
)
exts.append(statk_wrap)
except:
pass
class build_ext_allow_fail( build_ext ):
# This class allows C extension building to fail.
# Taken from visionegg (LGPL)
# http://github.com/visionegg/visionegg/blob/master/setup.py
# http://www.visionegg.org/
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except CCompilerError as x:
ext_failed_warning(ext.name)
setup(name='pyentropy',
version=pyentropy.__version__,
description='Entropy and Information Theoretic Estimates',
author=pyentropy.__author__,
author_email='pyentropy@robince.net',
url='http://code.google.com/p/pyentropy',
packages=['pyentropy','pyentropy.tests','pyentropy.statk'],
ext_package='pyentropy',
ext_modules=exts,
cmdclass={'build_ext':build_ext_allow_fail}
)
if extension_build_failed:
print( ('*'*70+'\n')*3)
print ("""WARNING: Building of some extensions failed. Please
see the messages above for details.\n""")
print( ('*'*70+'\n')*3)
|
robince/pyentropy
|
setup.py
|
Python
|
gpl-2.0
| 2,767
|
from django.core.cache import cache
from airmozilla.starred.models import StarredEvent
def stars(request):
context = {}
if request.user.is_active:
context['star_ids'] = _get_star_ids(request.user)
return context
def _get_star_ids(user):
cache_key = 'star_ids%s' % user.id
as_string = cache.get(cache_key)
if as_string is None:
ids = list(
StarredEvent.objects
.filter(user=user)
.values_list('event_id', flat=True)
.order_by('created')
)
as_string = ','.join(str(x) for x in ids)
cache.set(cache_key, as_string, 60 * 60)
return as_string
|
chirilo/airmozilla
|
airmozilla/starred/context_processors.py
|
Python
|
bsd-3-clause
| 659
|
#!/usr/bin/env python
# This file is part of ntdsxtract.
#
# ntdsxtract is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ntdsxtract is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ntdsxtract. If not, see <http://www.gnu.org/licenses/>.
'''
@author: Csaba Barta
@license: GNU General Public License 2.0 or later
@contact: csaba.barta@gmail.com
'''
import sys
from struct import *
from binascii import *
import ntds.version
from ntds.dstime import *
from ntds.lib.dump import *
import time
if len(sys.argv) < 2:
sys.stderr.write("\nDSFileInformation v" + str(ntds.version.version))
sys.stderr.write("\nExtracts information related to the NTDS.DIT database file")
sys.stderr.write("\n\nusage: %s <ntds.dit>\n" % sys.argv[0])
sys.stderr.write("\n\n options:")
sys.stderr.write("\n --debug")
sys.stderr.write("\n Turn on detailed error messages and stack trace")
sys.exit(1)
sys.stderr.write("\n[+] Started at: %s" % time.strftime(
"%a, %d %b %Y %H:%M:%S UTC",
time.gmtime()))
f = open(sys.argv[1], "rb", 0)
header = f.read(8192)
(pagesize, ) = unpack('I', header[236:240])
(wmajorversion, ) = unpack('I', header[216:220])
(wminorversion, ) = unpack('I', header[220:224])
(wbuildnumber, ) = unpack('I', header[224:228])
(wservicepack, ) = unpack('I', header[228:232])
print "Header checksum: %s" % hexlify(header[:4][::-1])
print "Signature: %s" % hexlify(header[4:8][::-1])
print "File format version: %s" % hexlify(header[8:12][::-1])
print "File type: %s" % hexlify(header[12:16][::-1])
print "Page size: %d bytes" % pagesize
print "DB time: %s" % hexlify(header[16:24][::-1])
print "Windows version: %d.%d (%d) Service pack %d" % (
wmajorversion,
wminorversion,
wbuildnumber,
wservicepack
)
print "Creation time: %04d.%02d.%02d %02d:%02d:%02d" % dsGetDBLogTimeStampStr(header[24:52][4:12])
print "Attach time: %04d.%02d.%02d %02d:%02d:%02d" % dsGetDBLogTimeStampStr(header[72:80])
if unpack("B", header[88:96][:1]) == (0, ):
print "Detach time: database is in dirty state"
else:
print "Detach time: %04d.%02d.%02d %02d:%02d:%02d" % dsGetDBLogTimeStampStr(header[88:96])
print "Consistent time: %04d.%02d.%02d %02d:%02d:%02d" % dsGetDBLogTimeStampStr(header[64:72])
print "Recovery time: %04d.%02d.%02d %02d:%02d:%02d" % dsGetDBLogTimeStampStr(header[244:252])
print "Header dump (first 672 bytes):"
print dump(header[:672], 16, 4)
f.close()
|
csababarta/ntdsxtract
|
dsfileinformation.py
|
Python
|
gpl-3.0
| 3,281
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# BEGIN LICENSE
# Copyright (C) 2019, Wolf Vollprecht <w.vollprecht@gmail.com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# END LICENSE
import re
import os
import telnetlib
from gettext import gettext as _
from urllib.parse import unquote
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("WebKit2", "4.0")
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib
from gi.repository import WebKit2
from uberwriter import latex_to_PNG, markup_regex
from uberwriter.settings import Settings
class DictAccessor:
reEndResponse = re.compile(br"^[2-5][0-58][0-9] .*\r\n$", re.DOTALL + re.MULTILINE)
reDefinition = re.compile(br"^151(.*?)^\.", re.DOTALL + re.MULTILINE)
def __init__(self, host="pan.alephnull.com", port=2628, timeout=60):
self.telnet = telnetlib.Telnet(host, port)
self.timeout = timeout
self.login_response = self.telnet.expect([self.reEndResponse], self.timeout)[2]
def run_command(self, cmd):
self.telnet.write(cmd.encode("utf-8") + b"\r\n")
return self.telnet.expect([self.reEndResponse], self.timeout)[2]
def get_matches(self, database, strategy, word):
if database in ["", "all"]:
d = "*"
else:
d = database
if strategy in ["", "default"]:
s = "."
else:
s = strategy
w = word.replace("\"", r"\\\"")
tsplit = self.run_command("MATCH {} {} \"{}\"".format(d, s, w)).splitlines()
mlist = list()
if tsplit[-1].startswith(b"250 ok") and tsplit[0].startswith(b"1"):
mlines = tsplit[1:-2]
for line in mlines:
lsplit = line.strip().split()
db = lsplit[0]
word = unquote(" ".join(lsplit[1:]))
mlist.append((db, word))
return mlist
def get_definition(self, database, word):
if database in ["", "all"]:
d = "*"
else:
d = database
w = word.replace("\"", r"\\\"")
dsplit = self.run_command("DEFINE {} \"{}\"".format(d, w)).splitlines(True)
dlist = list()
if dsplit[-1].startswith(b"250 ok") and dsplit[0].startswith(b"1"):
dlines = dsplit[1:-1]
dtext = b"".join(dlines)
dlist = [dtext]
return dlist
def close(self):
t = self.run_command("QUIT")
self.telnet.close()
return t
def parse_wordnet(self, response):
# consisting of group (n,v,adj,adv)
# number, description, examples, synonyms, antonyms
lines = response.splitlines()
lines = lines[2:]
lines = " ".join(lines)
lines = re.sub(r"\s+", " ", lines).strip()
lines = re.split(r"( adv | adj | n | v |^adv |^adj |^n |^v )", lines)
res = []
act_res = {"defs": [], "class": "none", "num": "None"}
for l in lines:
l = l.strip()
if not l:
continue
if l in ["adv", "adj", "n", "v"]:
if act_res:
res.append(act_res.copy())
act_res = {"defs": [], "class": l}
else:
ll = re.split(r"(?: |^)(\d): ", l)
act_def = {}
for lll in ll:
if lll.strip().isdigit() or not lll.strip():
if "description" in act_def and act_def["description"]:
act_res["defs"].append(act_def.copy())
act_def = {"num": lll}
continue
a = re.findall(r"(\[(syn|ant): (.+?)\] ??)+", lll)
for n in a:
if n[1] == "syn":
act_def["syn"] = re.findall(r"{(.*?)}.*?", n[2])
else:
act_def["ant"] = re.findall(r"{(.*?)}.*?", n[2])
tbr = re.search(r"\[.+\]", lll)
if tbr:
lll = lll[:tbr.start()]
lll = lll.split(";")
act_def["examples"] = []
act_def["description"] = []
for llll in lll:
llll = llll.strip()
if llll.strip().startswith("\""):
act_def["examples"].append(llll)
else:
act_def["description"].append(llll)
if act_def and "description" in act_def:
act_res["defs"].append(act_def.copy())
res.append(act_res.copy())
return res
def get_dictionary(term):
da = DictAccessor()
output = da.get_definition("wn", term)
if output:
output = output[0]
else:
return None
return da.parse_wordnet(output.decode(encoding="UTF-8"))
class InlinePreview:
WIDTH = 400
HEIGHT = 300
def __init__(self, text_view):
self.settings = Settings.new()
self.text_view = text_view
self.text_view.connect("button-press-event", self.on_button_press_event)
self.text_buffer = text_view.get_buffer()
self.cursor_mark = self.text_buffer.create_mark(
"click", self.text_buffer.get_iter_at_mark(self.text_buffer.get_insert()))
self.latex_converter = latex_to_PNG.LatexToPNG()
self.characters_per_line = self.settings.get_int("characters-per-line")
self.popover = Gtk.Popover.new(self.text_view)
self.popover.get_style_context().add_class("quick-preview-popup")
self.popover.set_modal(True)
self.preview_fns = {
markup_regex.MATH: self.get_view_for_math,
markup_regex.IMAGE: self.get_view_for_image,
markup_regex.LINK: self.get_view_for_link,
markup_regex.LINK_ALT: self.get_view_for_link,
markup_regex.FOOTNOTE_ID: self.get_view_for_footnote,
re.compile(r"(?P<text>\w+)"): self.get_view_for_lexikon
}
def on_button_press_event(self, _text_view, event):
if event.button == 1 and event.state & Gdk.ModifierType.CONTROL_MASK:
x, y = self.text_view.window_to_buffer_coords(2, int(event.x), int(event.y))
self.text_buffer.move_mark(
self.cursor_mark, self.text_view.get_iter_at_location(x, y).iter)
self.open_popover(self.text_view)
def get_view_for_math(self, match):
success, result = self.latex_converter.generatepng(match.group("text"))
if success:
return Gtk.Image.new_from_file(result)
else:
error = _("Formula looks incorrect:")
error += "\n\n“{}”".format(result)
return Gtk.Label(label=error)
def get_view_for_image(self, match):
path = match.group("url")
if path.startswith(("https://", "http://", "www.")):
return self.get_view_for_link(match)
if path.startswith(("file://")):
path = path[7:]
if not path.startswith(("/", "file://")):
path = os.path.join(self.settings.get_string("open-file-path"), path)
path = unquote(path)
return Gtk.Image.new_from_pixbuf(
GdkPixbuf.Pixbuf.new_from_file_at_size(path, self.WIDTH, self.HEIGHT))
def get_view_for_link(self, match):
url = match.group("url")
web_view = WebKit2.WebView(zoom_level=0.3125) # ~1280x960
web_view.set_size_request(self.WIDTH, self.HEIGHT)
if GLib.uri_parse_scheme(url) is None:
url = "http://{}".format(url)
web_view.load_uri(url)
return web_view
def get_view_for_footnote(self, match):
footnote_id = match.group("id")
fn_matches = re.finditer(markup_regex.FOOTNOTE, self.text_buffer.props.text)
for fn_match in fn_matches:
if fn_match.group("id") == footnote_id:
if fn_match:
footnote = re.sub("\n[\t ]+", "\n", fn_match.group("text"))
else:
footnote = _("No matching footnote found")
label = Gtk.Label(label=footnote)
label.set_max_width_chars(self.characters_per_line)
label.set_line_wrap(True)
return label
return None
def get_view_for_lexikon(self, match):
term = match.group("text")
lexikon_dict = get_dictionary(term)
if lexikon_dict:
grid = Gtk.Grid.new()
grid.get_style_context().add_class("lexikon")
grid.set_row_spacing(2)
grid.set_column_spacing(4)
i = 0
for entry in lexikon_dict:
if not entry["defs"]:
continue
elif entry["class"].startswith("n"):
word_type = _("noun")
elif entry["class"].startswith("v"):
word_type = _("verb")
elif entry["class"].startswith("adj"):
word_type = _("adjective")
elif entry["class"].startswith("adv"):
word_type = _("adverb")
else:
continue
vocab_label = Gtk.Label.new(term + " ~ " + word_type)
vocab_label.get_style_context().add_class("header")
if i == 0:
vocab_label.get_style_context().add_class("first")
vocab_label.set_halign(Gtk.Align.START)
vocab_label.set_justify(Gtk.Justification.LEFT)
grid.attach(vocab_label, 0, i, 3, 1)
for definition in entry["defs"]:
i = i + 1
num_label = Gtk.Label.new(definition["num"] + ".")
num_label.get_style_context().add_class("number")
num_label.set_valign(Gtk.Align.START)
grid.attach(num_label, 0, i, 1, 1)
def_label = Gtk.Label(label=" ".join(definition["description"]))
def_label.get_style_context().add_class("description")
def_label.set_halign(Gtk.Align.START)
def_label.set_max_width_chars(self.characters_per_line)
def_label.set_line_wrap(True)
def_label.set_justify(Gtk.Justification.FILL)
grid.attach(def_label, 1, i, 1, 1)
i = i + 1
if i > 0:
return grid
return None
def open_popover(self, _editor, _data=None):
start_iter = self.text_buffer.get_iter_at_mark(self.cursor_mark)
line_offset = start_iter.get_line_offset()
end_iter = start_iter.copy()
start_iter.set_line_offset(0)
end_iter.forward_to_line_end()
text = self.text_buffer.get_text(start_iter, end_iter, False)
for regex, get_view_fn in self.preview_fns.items():
matches = re.finditer(regex, text)
for match in matches:
if match.start() <= line_offset <= match.end():
prev_view = self.popover.get_child()
if prev_view:
prev_view.destroy()
view = get_view_fn(match)
view.show_all()
self.popover.add(view)
rect = self.text_view.get_iter_location(
self.text_buffer.get_iter_at_mark(self.cursor_mark))
rect.x, rect.y = self.text_view.buffer_to_window_coords(
Gtk.TextWindowType.TEXT, rect.x, rect.y)
self.popover.set_pointing_to(rect)
GLib.idle_add(self.popover.popup) # TODO: It doesn't popup without idle_add.
return
|
wolfv/uberwriter
|
uberwriter/inline_preview.py
|
Python
|
gpl-3.0
| 12,369
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""These are job parameters that are common to every type of Jenkins job.
Example:
.. literalinclude:: /../../tests/yamlparser/fixtures/general-example-001.yaml
:Job Parameters:
* **project-type**:
Defaults to "freestyle", but "maven" as well as "multijob", "flow",
"workflow" or "externaljob" can also be specified.
* **defaults**:
Specifies a set of :ref:`defaults` to use for this job, defaults to
''global''. If you have values that are common to all of your jobs,
create a ``global`` :ref:`defaults` object to hold them, and no further
configuration of individual jobs is necessary. If some jobs
should not use the ``global`` defaults, use this field to specify a
different set of defaults.
* **description**:
The description for the job. By default, the description
"!-- Managed by Jenkins Job Builder" is applied.
* **disabled**:
Boolean value to set whether or not this job should be disabled in
Jenkins. Defaults to ``false`` (job will be enabled).
* **display-name**:
Optional name shown for the project throughout the Jenkins web GUI in
place of the actual job name. The jenkins_jobs tool cannot fully remove
this trait once it is set, so use caution when setting it. Setting it to
the same string as the job's name is an effective un-set workaround.
Alternately, the field can be cleared manually using the Jenkins web
interface.
* **concurrent**:
Boolean value to set whether or not Jenkins can run this job
concurrently. Defaults to ``false``.
* **workspace**:
Path for a custom workspace. Defaults to Jenkins default
configuration.
* **child-workspace**:
Path for a child custom workspace. Defaults to Jenkins default
configuration. This parameter is only valid for matrix type jobs.
* **quiet-period**:
Number of seconds to wait between consecutive runs of this job.
Defaults to ``0``.
* **block-downstream**:
Boolean value to set whether or not this job must block while
downstream jobs are running. Downstream jobs are determined
transitively. Defaults to ``false``.
* **block-upstream**:
Boolean value to set whether or not this job must block while
upstream jobs are running. Upstream jobs are determined
transitively. Defaults to ``false``.
* **auth-token**:
Specifies an authentication token that allows new builds to be
triggered by accessing a special predefined URL. Only those who
know the token will be able to trigger builds remotely.
* **retry-count**:
If a build fails to checkout from the repository, Jenkins will
retry the specified number of times before giving up.
* **node**:
Restrict where this job can be run. If there is a group of
machines that the job can be built on, you can specify that
label as the node to tie on, which will cause Jenkins to build the job on
any of the machines with that label. For matrix projects, this parameter
will only restrict where the parent job will run.
* **logrotate**:
The Logrotate section allows you to automatically remove old build
history. It adds the ``logrotate`` attribute to the :ref:`Job`
definition. All logrotate attributes default to "-1" (keep forever).
* **raw**:
If present, this section should contain a single **xml** entry. This XML
will be inserted at the top-level of the :ref:`Job` definition.
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.xml_config import remove_ignorable_whitespace
class General(jenkins_jobs.modules.base.Base):
sequence = 10
def gen_xml(self, parser, xml, data):
jdk = data.get('jdk', None)
if jdk:
XML.SubElement(xml, 'jdk').text = jdk
XML.SubElement(xml, 'actions')
desc_text = data.get('description', None)
if desc_text is not None:
description = XML.SubElement(xml, 'description')
description.text = desc_text
XML.SubElement(xml, 'keepDependencies').text = 'false'
disabled = data.get('disabled', None)
if disabled is not None:
if disabled:
XML.SubElement(xml, 'disabled').text = 'true'
else:
XML.SubElement(xml, 'disabled').text = 'false'
if 'display-name' in data:
XML.SubElement(xml, 'displayName').text = data['display-name']
if data.get('block-downstream'):
XML.SubElement(xml,
'blockBuildWhenDownstreamBuilding').text = 'true'
else:
XML.SubElement(xml,
'blockBuildWhenDownstreamBuilding').text = 'false'
if data.get('block-upstream'):
XML.SubElement(xml,
'blockBuildWhenUpstreamBuilding').text = 'true'
else:
XML.SubElement(xml,
'blockBuildWhenUpstreamBuilding').text = 'false'
if 'auth-token' in data:
XML.SubElement(xml, 'authToken').text = data['auth-token']
if data.get('concurrent'):
XML.SubElement(xml, 'concurrentBuild').text = 'true'
else:
XML.SubElement(xml, 'concurrentBuild').text = 'false'
if 'workspace' in data:
XML.SubElement(xml, 'customWorkspace').text = \
str(data['workspace'])
if (xml.tag == 'matrix-project') and ('child-workspace' in data):
XML.SubElement(xml, 'childCustomWorkspace').text = \
str(data['child-workspace'])
if 'quiet-period' in data:
XML.SubElement(xml, 'quietPeriod').text = str(data['quiet-period'])
node = data.get('node', None)
if node:
XML.SubElement(xml, 'assignedNode').text = node
XML.SubElement(xml, 'canRoam').text = 'false'
else:
XML.SubElement(xml, 'canRoam').text = 'true'
if 'retry-count' in data:
XML.SubElement(xml, 'scmCheckoutRetryCount').text = \
str(data['retry-count'])
if 'logrotate' in data:
lr_xml = XML.SubElement(xml, 'logRotator')
logrotate = data['logrotate']
lr_days = XML.SubElement(lr_xml, 'daysToKeep')
lr_days.text = str(logrotate.get('daysToKeep', -1))
lr_num = XML.SubElement(lr_xml, 'numToKeep')
lr_num.text = str(logrotate.get('numToKeep', -1))
lr_adays = XML.SubElement(lr_xml, 'artifactDaysToKeep')
lr_adays.text = str(logrotate.get('artifactDaysToKeep', -1))
lr_anum = XML.SubElement(lr_xml, 'artifactNumToKeep')
lr_anum.text = str(logrotate.get('artifactNumToKeep', -1))
if 'raw' in data:
raw(parser, xml, data['raw'])
def raw(parser, xml_parent, data):
# documented in definition.rst since includes and docs is not working well
# For cross cutting method like this
root = XML.fromstring(data.get('xml'))
remove_ignorable_whitespace(root)
xml_parent.append(root)
|
mjeanson/jenkins-job-builder
|
jenkins_jobs/modules/general.py
|
Python
|
apache-2.0
| 7,787
|
# -*- coding: utf-8 -*-
# Copyright 2008 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module reads an XML file defining zim pages.
For now the only XML tags which are supported are 'section' and 'page'. The
'section' tag serves as a container for multiple pages. The 'page' tag serves
as a container for the page content plus any sub-pages. Each page should have
an attribute 'name' giving it's basename, so the file can look like this::
<section>
<page name="Foo">
Some text in page Foo
<page name="Bar">
This is text in page 'Foo:Bar'
</page>
</page>
</section>
We read the whole file to memory, which puts certain limits on
scalability.
'''
# FUTURE: This module does not support attachments in the xml data
import zim.stores.memory
# importing class from this module makes get_store() fail
from zim.formats import get_format, ElementTreeModule
from zim.notebook import Path
from zim.parsing import TextBuffer
class XMLStore(zim.stores.memory.MemoryStore):
properties = {
'read-only': True
}
def __init__(self, notebook, path, file=None):
zim.stores.memory.MemoryStore.__init__(self, notebook, path)
self.file = file
if not self.store_has_file():
raise AssertionError, 'XMl store needs file'
# not using assert here because it could be optimized away
self.format = get_format('wiki') # FIXME store format in XML header
if self.file.exists():
self.parse(self.file.read())
def store_page(self, page):
memory.Store.store_page(self, page)
self.file.writelines(self.dump())
def parse(self, content):
if isinstance(content, list):
content = ''.join(content)
target = MemoryStoreTreeBuilder(self)
builder = ElementTreeModule.XMLTreeBuilder(target=target)
builder.feed(content)
builder.close()
def dump(self):
text = TextBuffer([
u'<?xml version="1.0" encoding="utf-8"?>\n',
u'<section>\n' ])
for node in self._nodetree:
text += self._dump_node(node)
text.append(u'</section>\n')
return text.get_lines()
def _dump_node(self, node):
text = [u'<page name="%s">\n' % node.basename]
if node.text:
text.append(node.text)
for n in node.children:
text += self._dump_node(n) # recurs
text.append('</page>\n')
return text
class MemoryStoreTreeBuilder(object):
def __init__(self, store):
self.store = store
self.path = Path(':')
self.stack = []
def start(self, tag, attrib):
if tag == 'section':
pass
elif tag == 'page':
assert 'name' in attrib
self.path = self.path + attrib['name']
node = self.store.get_node(self.path, vivificate=True)
self.stack.append(node)
else:
assert False, 'Unknown tag'
def data(self, data):
if self.stack:
node = self.stack[-1]
if node.text:
node.text += data
else:
node.text = data
def end(self, tag):
if tag == 'section':
pass
else:
assert self.stack
self.path = self.path.parent
node = self.stack.pop()
if node.text and node.text.isspace():
node.text = ''
elif node.text:
node.text = unicode(node.text.strip('\n') + '\n')
def close(self):
pass
|
fabricehong/zim-desktop
|
zim/stores/xml.py
|
Python
|
gpl-2.0
| 3,065
|
from rsf.proj import *
import dix
mig2cip = None
def velcon(data, # data name
nv, # continuation steps
v0, # initial velocity
dv, # velocity step
nx, # lateral dimension
nh, # number of offsets
padt, # time padding
padt2, # extra time padding
padx=None, # lateral padding
v1=None, # other velocity
n1=None, # time extent
dt=0.004, # time sampling
dx=0.01, # midpoint sampling
units='km', # lateral units
vslope=None, # semblance muting
vx0=0, # semblance muting
x0=0, # lateral origin
srect1=3, # semblance vertical smoothing
srect2=1, # semblance lateral smoothing
rect1=10, # vertical smoothing
rect2=10): # lateral smoothing
'''Velocity continuation'''
global mig2cip
vm = v0+0.5*nv*dv
mig=data+'-mig'
Flow(mig,data,
'''
halfint inv=y adj=y |
preconstkirch vel=%g
''' % v0,split=[4,nh])
if n1:
mig2cip = '''
transp plane=34 memsize=500 |
transp plane=23 | window n1=%d
''' % n1
else:
mig2cip = '''
transp plane=34 memsize=500 |
transp plane=23
'''
n1=100
cip=data+'-cip'
Flow(cip,mig,mig2cip)
if padx:
pad=data+'-pad'
Flow(pad,cip,'pad n3=%d' % padx)
else:
pad=cip
padx=nx
ckx=data+'-ckx'
vlf=data+'-vlf'
ckx2=data+'-ckx2'
vlf2=data+'-vlf2'
Flow(ckx,pad,'cosft sign3=1 | put o4=0')
Flow(ckx+'v',ckx,
'''
fourvc nv=%d dv=%g v0=%g pad=%d pad2=%d verb=y
''' % (nv,dv,v0,padt,padt2),
split=[3,padx])
Flow(vlf,ckx+'v',
'''
cosft sign3=-1 | window n3=%d
''' % nx)
Flow(ckx2,pad,
'''
halfint inv=y adj=y |
math output="input*input" |
halfint adj=y |
cosft sign3=1 | put o4=0
''')
Flow(ckx2+'v',ckx2,
'''
fourvc nv=%d dv=%g v0=%g pad=%d pad2=%d verb=y
''' % (nv,dv,v0,padt,padt2),
split=[3,padx])
Flow(vlf2,ckx2+'v',
'''
cosft sign3=-1 | window n3=%d | clip2 lower=0
''' % nx)
sem = data+'-sem'
Flow(sem,[vlf,vlf2],
'''
mul $SOURCE |
divn den=${SOURCES[1]} rect1=%d rect3=%d
''' % (srect1,srect2))
vlf1 = data+'-vlf1'
Flow(vlf1,pad,
'''
transp plane=23 memsize=1000 |
fourvc2 nv=%d dv=%g v0=%g pad=%d pad2=%d |
window n2=%d | transp plane=23 memsize=1000
''' % (nv,dv,v0,padt,padt2,nx))
if v1:
Flow(mig+'1',data,'preconstkirch vel=%g' % v1,split=[4,nh])
Flow(cip+'1',mig+'1',mig2cip)
migr = data+'-migr'
Flow(migr,cip,'stack norm=y')
Plot(migr,'grey title=Migration0')
Flow(migr+'1',cip+'1','stack norm=y')
Plot(migr+'1','grey title=Migration1')
vlfr = data+'-vlfr'
Flow(vlfr,vlf,'window n2=1 min2=%g' % v1)
Plot(vlfr,'grey title="Velocity Continuation 0 -> 1" ')
Result(migr,[migr,migr+'1',vlfr],'SideBySideAniso')
if vslope:
pick = '''
mutter x0=%g v0=%g half=n |
scale axis=2 | pick rect1=%d rect2=%d
''' % (vx0,vslope,rect1,rect2)
else:
pick = '''
scale axis=2 | pick rect1=%d rect2=%d
''' % (rect1,rect2)
npk = data+'-npk'
Flow(npk,sem,pick)
Plot(npk,
'''
grey pclip=100 color=j bias=%g
scalebar=y title="Picked Migration Velocity"
label1=Time unit1=s label2="Lateral Position" unit2=%s
barlabel=Velocity barunit="%s/s" barreverse=y
''' % (vm,units,units))
fmg = data+'-fmg'
Flow(fmg,[vlf,npk],'slice pick=${SOURCES[1]}')
Result(fmg,
'''
grey title=Slice label1=Time unit1=s
label2="Lateral Position" unit2=%s
''' % units)
agc = data+'-agc'
Flow(agc,fmg,'agc rect1=200')
Plot(fmg,agc,
'''
grey title="Time-Migrated Image" label1="Time"
unit1=s label2="Lateral Position" unit2=%s
''' % units)
Result(agc,
'''
grey title=Picked pclip=98 label1="Time"
unit1=s label2="Lateral Position" unit2=%s
''' % units)
Result(fmg+'2',[fmg,npk],'SideBySideAniso',vppen='txscale=1.2')
Flow(agc+'2',[sem,npk],'slice pick=${SOURCES[1]} | window')
|
zxtstarry/src
|
book/Recipes/velcon.py
|
Python
|
gpl-2.0
| 4,733
|
from pyblish import api
from pyblish_bumpybox import inventory
reload(inventory)
class ExtractConstructionHistory(api.InstancePlugin):
""" Option to extract the with/without construction history. """
order = inventory.get_order(__file__, "ExtractConstructionHistory")
families = ["mayaAscii", "mayaBinary"]
optional = True
label = "Remove Construction History"
hosts = ["maya"]
targets = ["process.local"]
def process(self, instance):
instance.data["constructionHistory"] = False
|
Bumpybox/pyblish-bumpybox
|
pyblish_bumpybox/plugins/maya/lookdev/extract_construction_history.py
|
Python
|
lgpl-3.0
| 525
|
# -*- coding: utf-8 -*-
import time
from odoo import api, models, _
from odoo.tools import float_is_zero
from datetime import datetime
from dateutil.relativedelta import relativedelta
class ReportAgedPartnerBalance(models.AbstractModel):
_name = 'report.account.report_agedpartnerbalance'
def _get_partner_move_lines(self, account_type, date_from, target_move, period_length):
periods = {}
start = datetime.strptime(date_from, "%Y-%m-%d")
for i in range(5)[::-1]:
stop = start - relativedelta(days=period_length)
periods[str(i)] = {
'name': (i!=0 and (str((5-(i+1)) * period_length) + '-' + str((5-i) * period_length)) or ('+'+str(4 * period_length))),
'stop': start.strftime('%Y-%m-%d'),
'start': (i!=0 and stop.strftime('%Y-%m-%d') or False),
}
start = stop - relativedelta(days=1)
res = []
total = []
cr = self.env.cr
user_company = self.env.user.company_id.id
move_state = ['draft', 'posted']
if target_move == 'posted':
move_state = ['posted']
arg_list = (tuple(move_state), tuple(account_type))
#build the reconciliation clause to see what partner needs to be printed
reconciliation_clause = '(l.reconciled IS FALSE)'
cr.execute('SELECT debit_move_id, credit_move_id FROM account_partial_reconcile where create_date > %s', (date_from,))
reconciled_after_date = []
for row in cr.fetchall():
reconciled_after_date += [row[0], row[1]]
if reconciled_after_date:
reconciliation_clause = '(l.reconciled IS FALSE OR l.id IN %s)'
arg_list += (tuple(reconciled_after_date),)
arg_list += (date_from, user_company)
query = '''
SELECT DISTINCT l.partner_id, UPPER(res_partner.name)
FROM account_move_line AS l left join res_partner on l.partner_id = res_partner.id, account_account, account_move am
WHERE (l.account_id = account_account.id)
AND (l.move_id = am.id)
AND (am.state IN %s)
AND (account_account.internal_type IN %s)
AND ''' + reconciliation_clause + '''
AND (l.date <= %s)
AND l.company_id = %s
ORDER BY UPPER(res_partner.name)'''
cr.execute(query, arg_list)
partners = cr.dictfetchall()
# put a total of 0
for i in range(7):
total.append(0)
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [partner['partner_id'] for partner in partners if partner['partner_id']]
lines = dict([(partner['partner_id'], []) for partner in partners if partner['partner_id']])
if not partner_ids:
return [], [], []
# This dictionary will store the not due amount of all partners
undue_amounts = {}
query = '''SELECT l.id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id = am.id)
AND (am.state IN %s)
AND (account_account.internal_type IN %s)
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.partner_id IN %s) OR (l.partner_id IS NULL))
AND (l.date <= %s)
AND l.company_id = %s'''
cr.execute(query, (tuple(move_state), tuple(account_type), date_from, tuple(partner_ids), date_from, user_company))
aml_ids = cr.fetchall()
aml_ids = aml_ids and [x[0] for x in aml_ids] or []
for line in self.env['account.move.line'].browse(aml_ids):
partner_id = line.partner_id.id or None
if partner_id not in undue_amounts:
undue_amounts[partner_id] = 0.0
line_amount = line.balance
if line.balance == 0:
continue
for partial_line in line.matched_debit_ids:
if partial_line.create_date[:10] <= date_from:
line_amount += partial_line.amount
for partial_line in line.matched_credit_ids:
if partial_line.create_date[:10] <= date_from:
line_amount -= partial_line.amount
undue_amounts[partner_id] += line_amount
lines[partner_id].append({
'line': line,
'amount': line_amount,
'period': 6,
})
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(account_type), tuple(partner_ids),)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if periods[str(i)]['start'] and periods[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (periods[str(i)]['start'], periods[str(i)]['stop'])
elif periods[str(i)]['start']:
dates_query += ' >= %s)'
args_list += (periods[str(i)]['start'],)
else:
dates_query += ' <= %s)'
args_list += (periods[str(i)]['stop'],)
args_list += (date_from, user_company)
query = '''SELECT l.id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id = am.id)
AND (am.state IN %s)
AND (account_account.internal_type IN %s)
AND ((l.partner_id IN %s) OR (l.partner_id IS NULL))
AND ''' + dates_query + '''
AND (l.date <= %s)
AND l.company_id = %s'''
cr.execute(query, args_list)
partners_amount = {}
aml_ids = cr.fetchall()
aml_ids = aml_ids and [x[0] for x in aml_ids] or []
for line in self.env['account.move.line'].browse(aml_ids):
partner_id = line.partner_id.id or None
if partner_id not in partners_amount:
partners_amount[partner_id] = 0.0
line_amount = line.balance
if line.balance == 0:
continue
for partial_line in line.matched_debit_ids:
if partial_line.create_date[:10] <= date_from:
line_amount += partial_line.amount
for partial_line in line.matched_credit_ids:
if partial_line.create_date[:10] <= date_from:
line_amount -= partial_line.amount
partners_amount[partner_id] += line_amount
lines[partner_id].append({
'line': line,
'amount': line_amount,
'period': i + 1,
})
history.append(partners_amount)
for partner in partners:
at_least_one_amount = False
values = {}
undue_amt = 0.0
if partner['partner_id'] in undue_amounts: # Making sure this partner actually was found by the query
undue_amt = undue_amounts[partner['partner_id']]
total[6] = total[6] + undue_amt
values['direction'] = undue_amt
if not float_is_zero(values['direction'], precision_rounding=self.env.user.company_id.currency_id.rounding):
at_least_one_amount = True
for i in range(5):
during = False
if partner['partner_id'] in history[i]:
during = [history[i][partner['partner_id']]]
# Adding counter
total[(i)] = total[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
if not float_is_zero(values[str(i)], precision_rounding=self.env.user.company_id.currency_id.rounding):
at_least_one_amount = True
values['total'] = sum([values['direction']] + [values[str(i)] for i in range(5)])
## Add for total
total[(i + 1)] += values['total']
values['partner_id'] = partner['partner_id']
if partner['partner_id']:
browsed_partner = self.env['res.partner'].browse(partner['partner_id'])
values['name'] = browsed_partner.name and len(browsed_partner.name) >= 45 and browsed_partner.name[0:40] + '...' or browsed_partner.name
values['trust'] = browsed_partner.trust
else:
values['name'] = _('Unknown Partner')
values['trust'] = False
if at_least_one_amount:
res.append(values)
return res, total, lines
@api.model
def render_html(self, docids, data=None):
total = []
model = self.env.context.get('active_model')
docs = self.env[model].browse(self.env.context.get('active_id'))
target_move = data['form'].get('target_move', 'all')
date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if data['form']['result_selection'] == 'customer':
account_type = ['receivable']
elif data['form']['result_selection'] == 'supplier':
account_type = ['payable']
else:
account_type = ['payable', 'receivable']
movelines, total, dummy = self._get_partner_move_lines(account_type, date_from, target_move, data['form']['period_length'])
docargs = {
'doc_ids': self.ids,
'doc_model': model,
'data': data['form'],
'docs': docs,
'time': time,
'get_partner_lines': movelines,
'get_direction': total,
}
return self.env['report'].render('account.report_agedpartnerbalance', docargs)
|
ayepezv/GAD_ERP
|
addons/account/report/account_aged_partner_balance.py
|
Python
|
gpl-3.0
| 10,087
|
import datetime
import os
from django.conf import settings
from django.db import models
from django.db.models import Count
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from easy_thumbnails.files import get_thumbnailer
from radpress.compat import User
from radpress.settings import MORE_TAG, DEFAULT_MARKUP
from radpress.readers import get_reader, get_markup_choices
class ThumbnailModelMixin(object):
def get_thumbnail(self, path, size):
thumbnailer = get_thumbnailer(path)
thumb = thumbnailer.get_thumbnail({'size': size, 'crop': True})
return thumb
def get_thumbnail_tag(self, image, size=None):
size = size or (50, 50)
thumb = self.get_thumbnail(image.path, size)
url = thumb.url.replace(
'%s/' % settings.MEDIA_ROOT, settings.MEDIA_URL)
res = '<a href="%s" target="_blank"><img src="%s" height="%s" /></a>'
return res % (image.url, url, size[1])
class TagManager(models.Manager):
def get_available_tags(self):
"""
Receives list of available tags. To be available a tag, it should be
used by any published article.
"""
return self.annotate(Count('article')).filter(
article__count__gt=0, article__is_published=True)
class Tag(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
objects = TagManager()
def __unicode__(self):
return unicode(self.name)
def save(self, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(Tag, self).save(**kwargs)
class EntryImage(ThumbnailModelMixin, models.Model):
name = models.CharField(
max_length=100, blank=True,
help_text=_("A simple description about image."))
image = models.ImageField(upload_to='radpress/entry_images/')
class Meta:
verbose_name = _("Image")
verbose_name_plural = _("Images")
def __unicode__(self):
image_name = os.path.split(self.image.name)[1]
return u"%s - %s" % (self.name, image_name)
def thumbnail_tag(self):
if not self.image:
return ''
return self.get_thumbnail_tag(self.image)
thumbnail_tag.allow_tags = True
thumbnail_tag.short_description = ''
class EntryManager(models.Manager):
def all_published(self, **kwargs):
return self.filter(is_published=True, **kwargs)
class Entry(models.Model):
"""
Radpress' main model. It includes articles to show in Radpress mainpage.
The content body is auto filled by content value after it converted to html
from restructuredtext. And it has `is_published` to avoid viewing in blog
list page.
The `created_at` is set datetime information automatically when a 'new'
blog entry saved, but `updated_at` will be updated in each save method ran.
"""
MARKUP_CHOICES = get_markup_choices()
title = models.CharField(max_length=500)
markup = models.CharField(
max_length=20, choices=MARKUP_CHOICES, default=DEFAULT_MARKUP)
slug = models.SlugField(unique=True)
content = models.TextField()
content_body = models.TextField(editable=False)
is_published = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(default=datetime.datetime.now)
objects = EntryManager()
class Meta:
abstract = True
ordering = ('-created_at', '-updated_at')
def __unicode__(self):
return unicode(self.title)
def save(self, **kwargs):
reader = get_reader(markup=self.markup)
content_body, metadata = reader(self.content).read()
if not self.content_body:
self.content_body = content_body
if not self.slug:
self.slug = slugify(self.title)
if not kwargs.pop('skip_updated_at', False):
self.updated_at = datetime.datetime.now()
super(Entry, self).save(**kwargs)
class Article(Entry):
author = models.ForeignKey(User, null=True, editable=False)
cover_image = models.ForeignKey(EntryImage, blank=True, null=True)
tags = models.ManyToManyField(
Tag, null=True, blank=True, through='ArticleTag')
@property
def content_by_more(self):
content_list = self.content_body.split(MORE_TAG, 1)
content = content_list[0].strip()
return content
@models.permalink
def get_absolute_url(self):
return 'radpress-article-detail', [self.slug]
class ArticleTag(models.Model):
tag = models.ForeignKey(Tag)
article = models.ForeignKey(Article)
def __unicode__(self):
return u"%s - %s" % (self.tag.name, self.article)
class Page(Entry):
@models.permalink
def get_absolute_url(self):
return 'radpress-page-detail', [self.slug]
class MenuManager(models.Manager):
def get_menu_context(self):
menus = []
for menu in Menu.objects.filter(page__is_published=True):
menus.append({
'url': menu.page.get_absolute_url(),
'title': menu.page.title
})
return menus
class Menu(models.Model):
order = models.PositiveSmallIntegerField(default=3)
page = models.ForeignKey(Page, unique=True)
objects = MenuManager()
class Meta:
unique_together = ('order', 'page')
def __unicode__(self):
return u'%s - %s' % (self.order, self.page.title)
|
ifearcompilererrors/fle_redesign
|
fle_redesign/apps/radpress/models.py
|
Python
|
mit
| 5,522
|
import re
def test_pattern(word):
pattern = r"\b{}\b".format(re.sub(r"([\.\^\$\*\+\?\{\}\[\]\|\(\)])", r'\\\1', r""+word+""))
print pattern
def test_pattern2(word):
pattern = r"{}".format(r"{}".format(word).replace(r'\\', r'\\\\'))
print pattern
test_pattern(r"i have a * in this string. This is a\new text")
test_pattern("i have a * in this string. This is a\new text")
test_pattern2(r"i have a * in this string. This is a\new text")
test_pattern2("i have a * in this string. This is a\new text")
|
RohitMetaCube/test_code
|
test_patterns.py
|
Python
|
gpl-3.0
| 525
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AnalyticalStorageConfiguration(msrest.serialization.Model):
"""Analytical storage specific properties.
:param schema_type: Describes the types of schema for analytical storage. Possible values
include: "WellDefined", "FullFidelity".
:type schema_type: str or ~azure.mgmt.cosmosdb.models.AnalyticalStorageSchemaType
"""
_attribute_map = {
'schema_type': {'key': 'schemaType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnalyticalStorageConfiguration, self).__init__(**kwargs)
self.schema_type = kwargs.get('schema_type', None)
class ApiProperties(msrest.serialization.Model):
"""ApiProperties.
:param server_version: Describes the ServerVersion of an a MongoDB account. Possible values
include: "3.2", "3.6", "4.0".
:type server_version: str or ~azure.mgmt.cosmosdb.models.ServerVersion
"""
_attribute_map = {
'server_version': {'key': 'serverVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApiProperties, self).__init__(**kwargs)
self.server_version = kwargs.get('server_version', None)
class ARMProxyResource(msrest.serialization.Model):
"""The resource model definition for a ARM proxy resource. It will have everything other than required location and tags.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ARMProxyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ARMResourceProperties(msrest.serialization.Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ARMResourceProperties, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class AutoscaleSettings(msrest.serialization.Model):
"""AutoscaleSettings.
:param max_throughput: Represents maximum throughput, the resource can scale up to.
:type max_throughput: int
"""
_attribute_map = {
'max_throughput': {'key': 'maxThroughput', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AutoscaleSettings, self).__init__(**kwargs)
self.max_throughput = kwargs.get('max_throughput', None)
class AutoscaleSettingsResource(msrest.serialization.Model):
"""Cosmos DB provisioned throughput settings object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param max_throughput: Required. Represents maximum throughput container can scale up to.
:type max_throughput: int
:param auto_upgrade_policy: Cosmos DB resource auto-upgrade policy.
:type auto_upgrade_policy: ~azure.mgmt.cosmosdb.models.AutoUpgradePolicyResource
:ivar target_max_throughput: Represents target maximum throughput container can scale up to
once offer is no longer in pending state.
:vartype target_max_throughput: int
"""
_validation = {
'max_throughput': {'required': True},
'target_max_throughput': {'readonly': True},
}
_attribute_map = {
'max_throughput': {'key': 'maxThroughput', 'type': 'int'},
'auto_upgrade_policy': {'key': 'autoUpgradePolicy', 'type': 'AutoUpgradePolicyResource'},
'target_max_throughput': {'key': 'targetMaxThroughput', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AutoscaleSettingsResource, self).__init__(**kwargs)
self.max_throughput = kwargs['max_throughput']
self.auto_upgrade_policy = kwargs.get('auto_upgrade_policy', None)
self.target_max_throughput = None
class AutoUpgradePolicyResource(msrest.serialization.Model):
"""Cosmos DB resource auto-upgrade policy.
:param throughput_policy: Represents throughput policy which service must adhere to for
auto-upgrade.
:type throughput_policy: ~azure.mgmt.cosmosdb.models.ThroughputPolicyResource
"""
_attribute_map = {
'throughput_policy': {'key': 'throughputPolicy', 'type': 'ThroughputPolicyResource'},
}
def __init__(
self,
**kwargs
):
super(AutoUpgradePolicyResource, self).__init__(**kwargs)
self.throughput_policy = kwargs.get('throughput_policy', None)
class BackupInformation(msrest.serialization.Model):
"""Backup information of a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar continuous_backup_information: Information about the status of continuous backups.
:vartype continuous_backup_information: ~azure.mgmt.cosmosdb.models.ContinuousBackupInformation
"""
_validation = {
'continuous_backup_information': {'readonly': True},
}
_attribute_map = {
'continuous_backup_information': {'key': 'continuousBackupInformation', 'type': 'ContinuousBackupInformation'},
}
def __init__(
self,
**kwargs
):
super(BackupInformation, self).__init__(**kwargs)
self.continuous_backup_information = None
class BackupPolicy(msrest.serialization.Model):
"""The object representing the policy for taking backups on an account.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContinuousModeBackupPolicy, PeriodicModeBackupPolicy.
All required parameters must be populated in order to send to Azure.
:param type: Required. Describes the mode of backups.Constant filled by server. Possible
values include: "Periodic", "Continuous".
:type type: str or ~azure.mgmt.cosmosdb.models.BackupPolicyType
:param migration_state: The object representing the state of the migration between the backup
policies.
:type migration_state: ~azure.mgmt.cosmosdb.models.BackupPolicyMigrationState
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'migration_state': {'key': 'migrationState', 'type': 'BackupPolicyMigrationState'},
}
_subtype_map = {
'type': {'Continuous': 'ContinuousModeBackupPolicy', 'Periodic': 'PeriodicModeBackupPolicy'}
}
def __init__(
self,
**kwargs
):
super(BackupPolicy, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.migration_state = kwargs.get('migration_state', None)
class BackupPolicyMigrationState(msrest.serialization.Model):
"""The object representing the state of the migration between the backup policies.
:param status: Describes the status of migration between backup policy types. Possible values
include: "Invalid", "InProgress", "Completed", "Failed".
:type status: str or ~azure.mgmt.cosmosdb.models.BackupPolicyMigrationStatus
:param target_type: Describes the target backup policy type of the backup policy migration.
Possible values include: "Periodic", "Continuous".
:type target_type: str or ~azure.mgmt.cosmosdb.models.BackupPolicyType
:param start_time: Time at which the backup policy migration started (ISO-8601 format).
:type start_time: ~datetime.datetime
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'target_type': {'key': 'targetType', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(BackupPolicyMigrationState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.target_type = kwargs.get('target_type', None)
self.start_time = kwargs.get('start_time', None)
class Capability(msrest.serialization.Model):
"""Cosmos DB capability object.
:param name: Name of the Cosmos DB capability. For example, "name": "EnableCassandra". Current
values also include "EnableTable" and "EnableGremlin".
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Capability, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class Capacity(msrest.serialization.Model):
"""The object that represents all properties related to capacity enforcement on an account.
:param total_throughput_limit: The total throughput limit imposed on the account. A
totalThroughputLimit of 2000 imposes a strict limit of max throughput that can be provisioned
on that account to be 2000. A totalThroughputLimit of -1 indicates no limits on provisioning of
throughput.
:type total_throughput_limit: int
"""
_validation = {
'total_throughput_limit': {'minimum': -1},
}
_attribute_map = {
'total_throughput_limit': {'key': 'totalThroughputLimit', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(Capacity, self).__init__(**kwargs)
self.total_throughput_limit = kwargs.get('total_throughput_limit', None)
class CassandraClusterPublicStatus(msrest.serialization.Model):
"""Properties of a managed Cassandra cluster public status.
:param e_tag:
:type e_tag: str
:param reaper_status:
:type reaper_status: ~azure.mgmt.cosmosdb.models.ManagedCassandraReaperStatus
:param connection_errors: List relevant information about any connection errors to the
Datacenters.
:type connection_errors: list[~azure.mgmt.cosmosdb.models.ConnectionError]
:param data_centers: List of the status of each datacenter in this cluster.
:type data_centers:
list[~azure.mgmt.cosmosdb.models.CassandraClusterPublicStatusDataCentersItem]
"""
_attribute_map = {
'e_tag': {'key': 'eTag', 'type': 'str'},
'reaper_status': {'key': 'reaperStatus', 'type': 'ManagedCassandraReaperStatus'},
'connection_errors': {'key': 'connectionErrors', 'type': '[ConnectionError]'},
'data_centers': {'key': 'dataCenters', 'type': '[CassandraClusterPublicStatusDataCentersItem]'},
}
def __init__(
self,
**kwargs
):
super(CassandraClusterPublicStatus, self).__init__(**kwargs)
self.e_tag = kwargs.get('e_tag', None)
self.reaper_status = kwargs.get('reaper_status', None)
self.connection_errors = kwargs.get('connection_errors', None)
self.data_centers = kwargs.get('data_centers', None)
class CassandraClusterPublicStatusDataCentersItem(msrest.serialization.Model):
"""CassandraClusterPublicStatusDataCentersItem.
:param name: The name of this Datacenter.
:type name: str
:param seed_nodes: A list of all seed nodes in the cluster, managed and unmanaged.
:type seed_nodes: list[str]
:param nodes:
:type nodes:
list[~azure.mgmt.cosmosdb.models.ComponentsM9L909SchemasCassandraclusterpublicstatusPropertiesDatacentersItemsPropertiesNodesItems]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'seed_nodes': {'key': 'seedNodes', 'type': '[str]'},
'nodes': {'key': 'nodes', 'type': '[ComponentsM9L909SchemasCassandraclusterpublicstatusPropertiesDatacentersItemsPropertiesNodesItems]'},
}
def __init__(
self,
**kwargs
):
super(CassandraClusterPublicStatusDataCentersItem, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.seed_nodes = kwargs.get('seed_nodes', None)
self.nodes = kwargs.get('nodes', None)
class CassandraKeyspaceCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB Cassandra keyspace.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a Cassandra keyspace.
:type resource: ~azure.mgmt.cosmosdb.models.CassandraKeyspaceResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'CassandraKeyspaceResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(CassandraKeyspaceCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class OptionsResource(msrest.serialization.Model):
"""Cosmos DB options resource object.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(OptionsResource, self).__init__(**kwargs)
self.throughput = kwargs.get('throughput', None)
self.autoscale_settings = kwargs.get('autoscale_settings', None)
class CassandraKeyspaceGetPropertiesOptions(OptionsResource):
"""CassandraKeyspaceGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(CassandraKeyspaceGetPropertiesOptions, self).__init__(**kwargs)
class CassandraKeyspaceResource(msrest.serialization.Model):
"""Cosmos DB Cassandra keyspace resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Cassandra keyspace.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CassandraKeyspaceResource, self).__init__(**kwargs)
self.id = kwargs['id']
class ExtendedResourceProperties(msrest.serialization.Model):
"""The system generated resource properties associated with SQL databases, SQL containers, Gremlin databases and Gremlin graphs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtendedResourceProperties, self).__init__(**kwargs)
self.rid = None
self.ts = None
self.etag = None
class CassandraKeyspaceGetPropertiesResource(ExtendedResourceProperties, CassandraKeyspaceResource):
"""CassandraKeyspaceGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Cassandra keyspace.
:type id: str
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CassandraKeyspaceGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.rid = None
self.ts = None
self.etag = None
class CassandraKeyspaceGetResults(ARMResourceProperties):
"""An Azure Cosmos DB Cassandra keyspace.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'CassandraKeyspaceGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'CassandraKeyspaceGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(CassandraKeyspaceGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class CassandraKeyspaceListResult(msrest.serialization.Model):
"""The List operation response, that contains the Cassandra keyspaces and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Cassandra keyspaces and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.CassandraKeyspaceGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CassandraKeyspaceGetResults]'},
}
def __init__(
self,
**kwargs
):
super(CassandraKeyspaceListResult, self).__init__(**kwargs)
self.value = None
class CassandraPartitionKey(msrest.serialization.Model):
"""Cosmos DB Cassandra table partition key.
:param name: Name of the Cosmos DB Cassandra table partition key.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CassandraPartitionKey, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class CassandraSchema(msrest.serialization.Model):
"""Cosmos DB Cassandra table schema.
:param columns: List of Cassandra table columns.
:type columns: list[~azure.mgmt.cosmosdb.models.Column]
:param partition_keys: List of partition key.
:type partition_keys: list[~azure.mgmt.cosmosdb.models.CassandraPartitionKey]
:param cluster_keys: List of cluster key.
:type cluster_keys: list[~azure.mgmt.cosmosdb.models.ClusterKey]
"""
_attribute_map = {
'columns': {'key': 'columns', 'type': '[Column]'},
'partition_keys': {'key': 'partitionKeys', 'type': '[CassandraPartitionKey]'},
'cluster_keys': {'key': 'clusterKeys', 'type': '[ClusterKey]'},
}
def __init__(
self,
**kwargs
):
super(CassandraSchema, self).__init__(**kwargs)
self.columns = kwargs.get('columns', None)
self.partition_keys = kwargs.get('partition_keys', None)
self.cluster_keys = kwargs.get('cluster_keys', None)
class CassandraTableCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB Cassandra table.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a Cassandra table.
:type resource: ~azure.mgmt.cosmosdb.models.CassandraTableResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'CassandraTableResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(CassandraTableCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class CassandraTableGetPropertiesOptions(OptionsResource):
"""CassandraTableGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(CassandraTableGetPropertiesOptions, self).__init__(**kwargs)
class CassandraTableResource(msrest.serialization.Model):
"""Cosmos DB Cassandra table resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Cassandra table.
:type id: str
:param default_ttl: Time to live of the Cosmos DB Cassandra table.
:type default_ttl: int
:param schema: Schema of the Cosmos DB Cassandra table.
:type schema: ~azure.mgmt.cosmosdb.models.CassandraSchema
:param analytical_storage_ttl: Analytical TTL.
:type analytical_storage_ttl: int
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_ttl': {'key': 'defaultTtl', 'type': 'int'},
'schema': {'key': 'schema', 'type': 'CassandraSchema'},
'analytical_storage_ttl': {'key': 'analyticalStorageTtl', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(CassandraTableResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.default_ttl = kwargs.get('default_ttl', None)
self.schema = kwargs.get('schema', None)
self.analytical_storage_ttl = kwargs.get('analytical_storage_ttl', None)
class CassandraTableGetPropertiesResource(ExtendedResourceProperties, CassandraTableResource):
"""CassandraTableGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Cassandra table.
:type id: str
:param default_ttl: Time to live of the Cosmos DB Cassandra table.
:type default_ttl: int
:param schema: Schema of the Cosmos DB Cassandra table.
:type schema: ~azure.mgmt.cosmosdb.models.CassandraSchema
:param analytical_storage_ttl: Analytical TTL.
:type analytical_storage_ttl: int
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_ttl': {'key': 'defaultTtl', 'type': 'int'},
'schema': {'key': 'schema', 'type': 'CassandraSchema'},
'analytical_storage_ttl': {'key': 'analyticalStorageTtl', 'type': 'int'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CassandraTableGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.default_ttl = kwargs.get('default_ttl', None)
self.schema = kwargs.get('schema', None)
self.analytical_storage_ttl = kwargs.get('analytical_storage_ttl', None)
self.rid = None
self.ts = None
self.etag = None
class CassandraTableGetResults(ARMResourceProperties):
"""An Azure Cosmos DB Cassandra table.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.CassandraTableGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.CassandraTableGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'CassandraTableGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'CassandraTableGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(CassandraTableGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class CassandraTableListResult(msrest.serialization.Model):
"""The List operation response, that contains the Cassandra tables and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Cassandra tables and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.CassandraTableGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CassandraTableGetResults]'},
}
def __init__(
self,
**kwargs
):
super(CassandraTableListResult, self).__init__(**kwargs)
self.value = None
class Certificate(msrest.serialization.Model):
"""Certificate.
:param pem: PEM formatted public key.
:type pem: str
"""
_attribute_map = {
'pem': {'key': 'pem', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Certificate, self).__init__(**kwargs)
self.pem = kwargs.get('pem', None)
class ClusterKey(msrest.serialization.Model):
"""Cosmos DB Cassandra table cluster key.
:param name: Name of the Cosmos DB Cassandra table cluster key.
:type name: str
:param order_by: Order of the Cosmos DB Cassandra table cluster key, only support "Asc" and
"Desc".
:type order_by: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'order_by': {'key': 'orderBy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterKey, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.order_by = kwargs.get('order_by', None)
class ManagedCassandraARMResourceProperties(msrest.serialization.Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.cosmosdb.models.ManagedCassandraManagedServiceIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedCassandraManagedServiceIdentity'},
}
def __init__(
self,
**kwargs
):
super(ManagedCassandraARMResourceProperties, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
class ClusterResource(ManagedCassandraARMResourceProperties):
"""Representation of a managed Cassandra cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.cosmosdb.models.ManagedCassandraManagedServiceIdentity
:param properties: Properties of a managed Cassandra cluster.
:type properties: ~azure.mgmt.cosmosdb.models.ClusterResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedCassandraManagedServiceIdentity'},
'properties': {'key': 'properties', 'type': 'ClusterResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(ClusterResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class ClusterResourceProperties(msrest.serialization.Model):
"""Properties of a managed Cassandra cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:param provisioning_state: The status of the resource at the time the operation was called.
Possible values include: "Creating", "Updating", "Deleting", "Succeeded", "Failed", "Canceled".
:type provisioning_state: str or ~azure.mgmt.cosmosdb.models.ManagedCassandraProvisioningState
:param restore_from_backup_id: To create an empty cluster, omit this field or set it to null.
To restore a backup into a new cluster, set this field to the resource id of the backup.
:type restore_from_backup_id: str
:param delegated_management_subnet_id: Resource id of a subnet that this cluster's management
service should have its network interface attached to. The subnet must be routable to all
subnets that will be delegated to data centers. The resource id must be of the form
'/subscriptions/:code:`<subscription id>`/resourceGroups/:code:`<resource
group>`/providers/Microsoft.Network/virtualNetworks/:code:`<virtual
network>`/subnets/:code:`<subnet>`'.
:type delegated_management_subnet_id: str
:param cassandra_version: Which version of Cassandra should this cluster converge to running
(e.g., 3.11). When updated, the cluster may take some time to migrate to the new version.
:type cassandra_version: str
:param cluster_name_override: If you need to set the clusterName property in cassandra.yaml to
something besides the resource name of the cluster, set the value to use on this property.
:type cluster_name_override: str
:param authentication_method: Which authentication method Cassandra should use to authenticate
clients. 'None' turns off authentication, so should not be used except in emergencies.
'Cassandra' is the default password based authentication. The default is 'Cassandra'. Possible
values include: "None", "Cassandra".
:type authentication_method: str or ~azure.mgmt.cosmosdb.models.AuthenticationMethod
:param initial_cassandra_admin_password: Initial password for clients connecting as admin to
the cluster. Should be changed after cluster creation. Returns null on GET. This field only
applies when the authenticationMethod field is 'Cassandra'.
:type initial_cassandra_admin_password: str
:param prometheus_endpoint: Hostname or IP address where the Prometheus endpoint containing
data about the managed Cassandra nodes can be reached.
:type prometheus_endpoint: ~azure.mgmt.cosmosdb.models.SeedNode
:param repair_enabled: Should automatic repairs run on this cluster? If omitted, this is true,
and should stay true unless you are running a hybrid cluster where you are already doing your
own repairs.
:type repair_enabled: bool
:param client_certificates: List of TLS certificates used to authorize clients connecting to
the cluster. All connections are TLS encrypted whether clientCertificates is set or not, but if
clientCertificates is set, the managed Cassandra cluster will reject all connections not
bearing a TLS client certificate that can be validated from one or more of the public
certificates in this property.
:type client_certificates: list[~azure.mgmt.cosmosdb.models.Certificate]
:param external_gossip_certificates: List of TLS certificates used to authorize gossip from
unmanaged data centers. The TLS certificates of all nodes in unmanaged data centers must be
verifiable using one of the certificates provided in this property.
:type external_gossip_certificates: list[~azure.mgmt.cosmosdb.models.Certificate]
:ivar gossip_certificates: List of TLS certificates that unmanaged nodes must trust for gossip
with managed nodes. All managed nodes will present TLS client certificates that are verifiable
using one of the certificates provided in this property.
:vartype gossip_certificates: list[~azure.mgmt.cosmosdb.models.Certificate]
:param external_seed_nodes: List of IP addresses of seed nodes in unmanaged data centers. These
will be added to the seed node lists of all managed nodes.
:type external_seed_nodes: list[~azure.mgmt.cosmosdb.models.SeedNode]
:ivar seed_nodes: List of IP addresses of seed nodes in the managed data centers. These should
be added to the seed node lists of all unmanaged nodes.
:vartype seed_nodes: list[~azure.mgmt.cosmosdb.models.SeedNode]
:param hours_between_backups: Number of hours to wait between taking a backup of the cluster.
To disable backups, set this property to 0.
:type hours_between_backups: int
:param deallocated: Whether the cluster and associated data centers has been deallocated.
:type deallocated: bool
:param cassandra_audit_logging_enabled: Whether Cassandra audit logging is enabled.
:type cassandra_audit_logging_enabled: bool
"""
_validation = {
'gossip_certificates': {'readonly': True},
'seed_nodes': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'restore_from_backup_id': {'key': 'restoreFromBackupId', 'type': 'str'},
'delegated_management_subnet_id': {'key': 'delegatedManagementSubnetId', 'type': 'str'},
'cassandra_version': {'key': 'cassandraVersion', 'type': 'str'},
'cluster_name_override': {'key': 'clusterNameOverride', 'type': 'str'},
'authentication_method': {'key': 'authenticationMethod', 'type': 'str'},
'initial_cassandra_admin_password': {'key': 'initialCassandraAdminPassword', 'type': 'str'},
'prometheus_endpoint': {'key': 'prometheusEndpoint', 'type': 'SeedNode'},
'repair_enabled': {'key': 'repairEnabled', 'type': 'bool'},
'client_certificates': {'key': 'clientCertificates', 'type': '[Certificate]'},
'external_gossip_certificates': {'key': 'externalGossipCertificates', 'type': '[Certificate]'},
'gossip_certificates': {'key': 'gossipCertificates', 'type': '[Certificate]'},
'external_seed_nodes': {'key': 'externalSeedNodes', 'type': '[SeedNode]'},
'seed_nodes': {'key': 'seedNodes', 'type': '[SeedNode]'},
'hours_between_backups': {'key': 'hoursBetweenBackups', 'type': 'int'},
'deallocated': {'key': 'deallocated', 'type': 'bool'},
'cassandra_audit_logging_enabled': {'key': 'cassandraAuditLoggingEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ClusterResourceProperties, self).__init__(**kwargs)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.restore_from_backup_id = kwargs.get('restore_from_backup_id', None)
self.delegated_management_subnet_id = kwargs.get('delegated_management_subnet_id', None)
self.cassandra_version = kwargs.get('cassandra_version', None)
self.cluster_name_override = kwargs.get('cluster_name_override', None)
self.authentication_method = kwargs.get('authentication_method', None)
self.initial_cassandra_admin_password = kwargs.get('initial_cassandra_admin_password', None)
self.prometheus_endpoint = kwargs.get('prometheus_endpoint', None)
self.repair_enabled = kwargs.get('repair_enabled', None)
self.client_certificates = kwargs.get('client_certificates', None)
self.external_gossip_certificates = kwargs.get('external_gossip_certificates', None)
self.gossip_certificates = None
self.external_seed_nodes = kwargs.get('external_seed_nodes', None)
self.seed_nodes = None
self.hours_between_backups = kwargs.get('hours_between_backups', None)
self.deallocated = kwargs.get('deallocated', None)
self.cassandra_audit_logging_enabled = kwargs.get('cassandra_audit_logging_enabled', None)
class Column(msrest.serialization.Model):
"""Cosmos DB Cassandra table column.
:param name: Name of the Cosmos DB Cassandra table column.
:type name: str
:param type: Type of the Cosmos DB Cassandra table column.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Column, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CommandOutput(msrest.serialization.Model):
"""Response of /command api.
:param command_output: Output of the command.
:type command_output: str
"""
_attribute_map = {
'command_output': {'key': 'commandOutput', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CommandOutput, self).__init__(**kwargs)
self.command_output = kwargs.get('command_output', None)
class CommandPostBody(msrest.serialization.Model):
"""Specification of which command to run where.
All required parameters must be populated in order to send to Azure.
:param command: Required. The command which should be run.
:type command: str
:param arguments: The arguments for the command to be run.
:type arguments: dict[str, str]
:param host: Required. IP address of the cassandra host to run the command on.
:type host: str
:param cassandra_stop_start: If true, stops cassandra before executing the command and then
start it again.
:type cassandra_stop_start: bool
:param readwrite: If true, allows the command to *write* to the cassandra directory, otherwise
read-only.
:type readwrite: bool
"""
_validation = {
'command': {'required': True},
'host': {'required': True},
}
_attribute_map = {
'command': {'key': 'command', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': '{str}'},
'host': {'key': 'host', 'type': 'str'},
'cassandra_stop_start': {'key': 'cassandra-stop-start', 'type': 'bool'},
'readwrite': {'key': 'readwrite', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(CommandPostBody, self).__init__(**kwargs)
self.command = kwargs['command']
self.arguments = kwargs.get('arguments', None)
self.host = kwargs['host']
self.cassandra_stop_start = kwargs.get('cassandra_stop_start', None)
self.readwrite = kwargs.get('readwrite', None)
class Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ComponentsM9L909SchemasCassandraclusterpublicstatusPropertiesDatacentersItemsPropertiesNodesItems(msrest.serialization.Model):
"""ComponentsM9L909SchemasCassandraclusterpublicstatusPropertiesDatacentersItemsPropertiesNodesItems.
:param address: The node's IP address.
:type address: str
:param state: The state of the node in Cassandra ring. Possible values include: "Normal",
"Leaving", "Joining", "Moving", "Stopped".
:type state: str or ~azure.mgmt.cosmosdb.models.NodeState
:param status:
:type status: str
:param load: The amount of file system data in the data directory (e.g., 47.66 kB), excluding
all content in the snapshots subdirectories. Because all SSTable data files are included, any
data that is not cleaned up (such as TTL-expired cells or tombstones) is counted.
:type load: str
:param tokens: List of tokens this node covers.
:type tokens: list[str]
:param size:
:type size: int
:param host_id: The network ID of the node.
:type host_id: str
:param rack: The rack this node is part of.
:type rack: str
:param timestamp: The timestamp when these statistics were captured.
:type timestamp: str
:param disk_used_kb: The amount of disk used, in kB, of the directory /var/lib/cassandra.
:type disk_used_kb: long
:param disk_free_kb: The amount of disk free, in kB, of the directory /var/lib/cassandra.
:type disk_free_kb: long
:param memory_used_kb: Used memory (calculated as total - free - buffers - cache), in kB.
:type memory_used_kb: long
:param memory_buffers_and_cached_kb: Memory used by kernel buffers (Buffers in /proc/meminfo)
and page cache and slabs (Cached and SReclaimable in /proc/meminfo), in kB.
:type memory_buffers_and_cached_kb: long
:param memory_free_kb: Unused memory (MemFree and SwapFree in /proc/meminfo), in kB.
:type memory_free_kb: long
:param memory_total_kb: Total installed memory (MemTotal and SwapTotal in /proc/meminfo), in
kB.
:type memory_total_kb: long
:param cpu_usage: A float representing the current system-wide CPU utilization as a percentage.
:type cpu_usage: float
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'load': {'key': 'load', 'type': 'str'},
'tokens': {'key': 'tokens', 'type': '[str]'},
'size': {'key': 'size', 'type': 'int'},
'host_id': {'key': 'hostID', 'type': 'str'},
'rack': {'key': 'rack', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'str'},
'disk_used_kb': {'key': 'diskUsedKB', 'type': 'long'},
'disk_free_kb': {'key': 'diskFreeKB', 'type': 'long'},
'memory_used_kb': {'key': 'memoryUsedKB', 'type': 'long'},
'memory_buffers_and_cached_kb': {'key': 'memoryBuffersAndCachedKB', 'type': 'long'},
'memory_free_kb': {'key': 'memoryFreeKB', 'type': 'long'},
'memory_total_kb': {'key': 'memoryTotalKB', 'type': 'long'},
'cpu_usage': {'key': 'cpuUsage', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(ComponentsM9L909SchemasCassandraclusterpublicstatusPropertiesDatacentersItemsPropertiesNodesItems, self).__init__(**kwargs)
self.address = kwargs.get('address', None)
self.state = kwargs.get('state', None)
self.status = kwargs.get('status', None)
self.load = kwargs.get('load', None)
self.tokens = kwargs.get('tokens', None)
self.size = kwargs.get('size', None)
self.host_id = kwargs.get('host_id', None)
self.rack = kwargs.get('rack', None)
self.timestamp = kwargs.get('timestamp', None)
self.disk_used_kb = kwargs.get('disk_used_kb', None)
self.disk_free_kb = kwargs.get('disk_free_kb', None)
self.memory_used_kb = kwargs.get('memory_used_kb', None)
self.memory_buffers_and_cached_kb = kwargs.get('memory_buffers_and_cached_kb', None)
self.memory_free_kb = kwargs.get('memory_free_kb', None)
self.memory_total_kb = kwargs.get('memory_total_kb', None)
self.cpu_usage = kwargs.get('cpu_usage', None)
class CompositePath(msrest.serialization.Model):
"""CompositePath.
:param path: The path for which the indexing behavior applies to. Index paths typically start
with root and end with wildcard (/path/*).
:type path: str
:param order: Sort order for composite paths. Possible values include: "ascending",
"descending".
:type order: str or ~azure.mgmt.cosmosdb.models.CompositePathSortOrder
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'order': {'key': 'order', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CompositePath, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
self.order = kwargs.get('order', None)
class ConflictResolutionPolicy(msrest.serialization.Model):
"""The conflict resolution policy for the container.
:param mode: Indicates the conflict resolution mode. Possible values include: "LastWriterWins",
"Custom". Default value: "LastWriterWins".
:type mode: str or ~azure.mgmt.cosmosdb.models.ConflictResolutionMode
:param conflict_resolution_path: The conflict resolution path in the case of LastWriterWins
mode.
:type conflict_resolution_path: str
:param conflict_resolution_procedure: The procedure to resolve conflicts in the case of custom
mode.
:type conflict_resolution_procedure: str
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'conflict_resolution_path': {'key': 'conflictResolutionPath', 'type': 'str'},
'conflict_resolution_procedure': {'key': 'conflictResolutionProcedure', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConflictResolutionPolicy, self).__init__(**kwargs)
self.mode = kwargs.get('mode', "LastWriterWins")
self.conflict_resolution_path = kwargs.get('conflict_resolution_path', None)
self.conflict_resolution_procedure = kwargs.get('conflict_resolution_procedure', None)
class ConnectionError(msrest.serialization.Model):
"""ConnectionError.
:param connection_state: The kind of connection error that occurred. Possible values include:
"Unknown", "OK", "OperatorToDataCenterNetworkError", "DatacenterToDatacenterNetworkError",
"InternalOperatorToDataCenterCertificateError", "InternalError".
:type connection_state: str or ~azure.mgmt.cosmosdb.models.ConnectionState
:param i_p_from: The IP of host that originated the failed connection.
:type i_p_from: str
:param i_p_to: The IP that the connection attempted to reach.
:type i_p_to: str
:param port: The TCP port the connection was attempted on.
:type port: int
:param exception: Detailed error message about the failed connection.
:type exception: str
"""
_attribute_map = {
'connection_state': {'key': 'connectionState', 'type': 'str'},
'i_p_from': {'key': 'iPFrom', 'type': 'str'},
'i_p_to': {'key': 'iPTo', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'exception': {'key': 'exception', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionError, self).__init__(**kwargs)
self.connection_state = kwargs.get('connection_state', None)
self.i_p_from = kwargs.get('i_p_from', None)
self.i_p_to = kwargs.get('i_p_to', None)
self.port = kwargs.get('port', None)
self.exception = kwargs.get('exception', None)
class ConsistencyPolicy(msrest.serialization.Model):
"""The consistency policy for the Cosmos DB database account.
All required parameters must be populated in order to send to Azure.
:param default_consistency_level: Required. The default consistency level and configuration
settings of the Cosmos DB account. Possible values include: "Eventual", "Session",
"BoundedStaleness", "Strong", "ConsistentPrefix".
:type default_consistency_level: str or ~azure.mgmt.cosmosdb.models.DefaultConsistencyLevel
:param max_staleness_prefix: When used with the Bounded Staleness consistency level, this value
represents the number of stale requests tolerated. Accepted range for this value is 1 –
2,147,483,647. Required when defaultConsistencyPolicy is set to 'BoundedStaleness'.
:type max_staleness_prefix: long
:param max_interval_in_seconds: When used with the Bounded Staleness consistency level, this
value represents the time amount of staleness (in seconds) tolerated. Accepted range for this
value is 5 - 86400. Required when defaultConsistencyPolicy is set to 'BoundedStaleness'.
:type max_interval_in_seconds: int
"""
_validation = {
'default_consistency_level': {'required': True},
'max_staleness_prefix': {'maximum': 2147483647, 'minimum': 1},
'max_interval_in_seconds': {'maximum': 86400, 'minimum': 5},
}
_attribute_map = {
'default_consistency_level': {'key': 'defaultConsistencyLevel', 'type': 'str'},
'max_staleness_prefix': {'key': 'maxStalenessPrefix', 'type': 'long'},
'max_interval_in_seconds': {'key': 'maxIntervalInSeconds', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ConsistencyPolicy, self).__init__(**kwargs)
self.default_consistency_level = kwargs['default_consistency_level']
self.max_staleness_prefix = kwargs.get('max_staleness_prefix', None)
self.max_interval_in_seconds = kwargs.get('max_interval_in_seconds', None)
class ContainerPartitionKey(msrest.serialization.Model):
"""The configuration of the partition key to be used for partitioning data into multiple partitions.
Variables are only populated by the server, and will be ignored when sending a request.
:param paths: List of paths using which data within the container can be partitioned.
:type paths: list[str]
:param kind: Indicates the kind of algorithm used for partitioning. For MultiHash, multiple
partition keys (upto three maximum) are supported for container create. Possible values
include: "Hash", "Range", "MultiHash". Default value: "Hash".
:type kind: str or ~azure.mgmt.cosmosdb.models.PartitionKind
:param version: Indicates the version of the partition key definition.
:type version: int
:ivar system_key: Indicates if the container is using a system generated partition key.
:vartype system_key: bool
"""
_validation = {
'version': {'maximum': 2, 'minimum': 1},
'system_key': {'readonly': True},
}
_attribute_map = {
'paths': {'key': 'paths', 'type': '[str]'},
'kind': {'key': 'kind', 'type': 'str'},
'version': {'key': 'version', 'type': 'int'},
'system_key': {'key': 'systemKey', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ContainerPartitionKey, self).__init__(**kwargs)
self.paths = kwargs.get('paths', None)
self.kind = kwargs.get('kind', "Hash")
self.version = kwargs.get('version', None)
self.system_key = None
class ContinuousBackupInformation(msrest.serialization.Model):
"""Information about the status of continuous backups.
:param latest_restorable_timestamp: The latest restorable timestamp for a resource.
:type latest_restorable_timestamp: str
"""
_attribute_map = {
'latest_restorable_timestamp': {'key': 'latestRestorableTimestamp', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContinuousBackupInformation, self).__init__(**kwargs)
self.latest_restorable_timestamp = kwargs.get('latest_restorable_timestamp', None)
class ContinuousBackupRestoreLocation(msrest.serialization.Model):
"""Properties of the regional restorable account.
:param location: The name of the continuous backup restore location.
:type location: str
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContinuousBackupRestoreLocation, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
class ContinuousModeBackupPolicy(BackupPolicy):
"""The object representing continuous mode backup policy.
All required parameters must be populated in order to send to Azure.
:param type: Required. Describes the mode of backups.Constant filled by server. Possible
values include: "Periodic", "Continuous".
:type type: str or ~azure.mgmt.cosmosdb.models.BackupPolicyType
:param migration_state: The object representing the state of the migration between the backup
policies.
:type migration_state: ~azure.mgmt.cosmosdb.models.BackupPolicyMigrationState
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'migration_state': {'key': 'migrationState', 'type': 'BackupPolicyMigrationState'},
}
def __init__(
self,
**kwargs
):
super(ContinuousModeBackupPolicy, self).__init__(**kwargs)
self.type = 'Continuous' # type: str
class CorsPolicy(msrest.serialization.Model):
"""The CORS policy for the Cosmos DB database account.
All required parameters must be populated in order to send to Azure.
:param allowed_origins: Required. The origin domains that are permitted to make a request
against the service via CORS.
:type allowed_origins: str
:param allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a
CORS request.
:type allowed_methods: str
:param allowed_headers: The request headers that the origin domain may specify on the CORS
request.
:type allowed_headers: str
:param exposed_headers: The response headers that may be sent in the response to the CORS
request and exposed by the browser to the request issuer.
:type exposed_headers: str
:param max_age_in_seconds: The maximum amount time that a browser should cache the preflight
OPTIONS request.
:type max_age_in_seconds: long
"""
_validation = {
'allowed_origins': {'required': True},
'max_age_in_seconds': {'maximum': 2147483647, 'minimum': 1},
}
_attribute_map = {
'allowed_origins': {'key': 'allowedOrigins', 'type': 'str'},
'allowed_methods': {'key': 'allowedMethods', 'type': 'str'},
'allowed_headers': {'key': 'allowedHeaders', 'type': 'str'},
'exposed_headers': {'key': 'exposedHeaders', 'type': 'str'},
'max_age_in_seconds': {'key': 'maxAgeInSeconds', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(CorsPolicy, self).__init__(**kwargs)
self.allowed_origins = kwargs['allowed_origins']
self.allowed_methods = kwargs.get('allowed_methods', None)
self.allowed_headers = kwargs.get('allowed_headers', None)
self.exposed_headers = kwargs.get('exposed_headers', None)
self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
class CreateUpdateOptions(msrest.serialization.Model):
"""CreateUpdateOptions are a list of key-value pairs that describe the resource. Supported keys are "If-Match", "If-None-Match", "Session-Token" and "Throughput".
:param throughput: Request Units per second. For example, "throughput": 10000.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(CreateUpdateOptions, self).__init__(**kwargs)
self.throughput = kwargs.get('throughput', None)
self.autoscale_settings = kwargs.get('autoscale_settings', None)
class DatabaseAccountConnectionString(msrest.serialization.Model):
"""Connection string for the Cosmos DB account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar connection_string: Value of the connection string.
:vartype connection_string: str
:ivar description: Description of the connection string.
:vartype description: str
"""
_validation = {
'connection_string': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountConnectionString, self).__init__(**kwargs)
self.connection_string = None
self.description = None
class DatabaseAccountCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB database accounts.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param kind: Indicates the type of database account. This can only be set at database account
creation. Possible values include: "GlobalDocumentDB", "MongoDB", "Parse".
:type kind: str or ~azure.mgmt.cosmosdb.models.DatabaseAccountKind
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.cosmosdb.models.ManagedServiceIdentity
:param consistency_policy: The consistency policy for the Cosmos DB account.
:type consistency_policy: ~azure.mgmt.cosmosdb.models.ConsistencyPolicy
:param locations: Required. An array that contains the georeplication locations enabled for the
Cosmos DB account.
:type locations: list[~azure.mgmt.cosmosdb.models.Location]
:ivar database_account_offer_type: The offer type for the database. Has constant value:
"Standard".
:vartype database_account_offer_type: str
:param ip_rules: List of IpRules.
:type ip_rules: list[~azure.mgmt.cosmosdb.models.IpAddressOrRange]
:param is_virtual_network_filter_enabled: Flag to indicate whether to enable/disable Virtual
Network ACL rules.
:type is_virtual_network_filter_enabled: bool
:param enable_automatic_failover: Enables automatic failover of the write region in the rare
event that the region is unavailable due to an outage. Automatic failover will result in a new
write region for the account and is chosen based on the failover priorities configured for the
account.
:type enable_automatic_failover: bool
:param capabilities: List of Cosmos DB capabilities for the account.
:type capabilities: list[~azure.mgmt.cosmosdb.models.Capability]
:param virtual_network_rules: List of Virtual Network ACL rules configured for the Cosmos DB
account.
:type virtual_network_rules: list[~azure.mgmt.cosmosdb.models.VirtualNetworkRule]
:param enable_multiple_write_locations: Enables the account to write in multiple locations.
:type enable_multiple_write_locations: bool
:param enable_cassandra_connector: Enables the cassandra connector on the Cosmos DB C* account.
:type enable_cassandra_connector: bool
:param connector_offer: The cassandra connector offer type for the Cosmos DB database C*
account. Possible values include: "Small".
:type connector_offer: str or ~azure.mgmt.cosmosdb.models.ConnectorOffer
:param disable_key_based_metadata_write_access: Disable write operations on metadata resources
(databases, containers, throughput) via account keys.
:type disable_key_based_metadata_write_access: bool
:param key_vault_key_uri: The URI of the key vault.
:type key_vault_key_uri: str
:param default_identity: The default identity for accessing key vault used in features like
customer managed keys. The default identity needs to be explicitly set by the users. It can be
"FirstPartyIdentity", "SystemAssignedIdentity" and more.
:type default_identity: str
:param public_network_access: Whether requests from Public Network are allowed. Possible values
include: "Enabled", "Disabled".
:type public_network_access: str or ~azure.mgmt.cosmosdb.models.PublicNetworkAccess
:param enable_free_tier: Flag to indicate whether Free Tier is enabled.
:type enable_free_tier: bool
:param api_properties: API specific properties. Currently, supported only for MongoDB API.
:type api_properties: ~azure.mgmt.cosmosdb.models.ApiProperties
:param enable_analytical_storage: Flag to indicate whether to enable storage analytics.
:type enable_analytical_storage: bool
:param analytical_storage_configuration: Analytical storage specific properties.
:type analytical_storage_configuration:
~azure.mgmt.cosmosdb.models.AnalyticalStorageConfiguration
:param create_mode: Enum to indicate the mode of account creation. Possible values include:
"Default", "Restore". Default value: "Default".
:type create_mode: str or ~azure.mgmt.cosmosdb.models.CreateMode
:param backup_policy: The object representing the policy for taking backups on an account.
:type backup_policy: ~azure.mgmt.cosmosdb.models.BackupPolicy
:param cors: The CORS policy for the Cosmos DB database account.
:type cors: list[~azure.mgmt.cosmosdb.models.CorsPolicy]
:param network_acl_bypass: Indicates what services are allowed to bypass firewall checks.
Possible values include: "None", "AzureServices".
:type network_acl_bypass: str or ~azure.mgmt.cosmosdb.models.NetworkAclBypass
:param network_acl_bypass_resource_ids: An array that contains the Resource Ids for Network Acl
Bypass for the Cosmos DB account.
:type network_acl_bypass_resource_ids: list[str]
:param disable_local_auth: Opt-out of local authentication and ensure only MSI and AAD can be
used exclusively for authentication.
:type disable_local_auth: bool
:param restore_parameters: Parameters to indicate the information about the restore.
:type restore_parameters: ~azure.mgmt.cosmosdb.models.RestoreParameters
:param capacity: The object that represents all properties related to capacity enforcement on
an account.
:type capacity: ~azure.mgmt.cosmosdb.models.Capacity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'locations': {'required': True},
'database_account_offer_type': {'required': True, 'constant': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'consistency_policy': {'key': 'properties.consistencyPolicy', 'type': 'ConsistencyPolicy'},
'locations': {'key': 'properties.locations', 'type': '[Location]'},
'database_account_offer_type': {'key': 'properties.databaseAccountOfferType', 'type': 'str'},
'ip_rules': {'key': 'properties.ipRules', 'type': '[IpAddressOrRange]'},
'is_virtual_network_filter_enabled': {'key': 'properties.isVirtualNetworkFilterEnabled', 'type': 'bool'},
'enable_automatic_failover': {'key': 'properties.enableAutomaticFailover', 'type': 'bool'},
'capabilities': {'key': 'properties.capabilities', 'type': '[Capability]'},
'virtual_network_rules': {'key': 'properties.virtualNetworkRules', 'type': '[VirtualNetworkRule]'},
'enable_multiple_write_locations': {'key': 'properties.enableMultipleWriteLocations', 'type': 'bool'},
'enable_cassandra_connector': {'key': 'properties.enableCassandraConnector', 'type': 'bool'},
'connector_offer': {'key': 'properties.connectorOffer', 'type': 'str'},
'disable_key_based_metadata_write_access': {'key': 'properties.disableKeyBasedMetadataWriteAccess', 'type': 'bool'},
'key_vault_key_uri': {'key': 'properties.keyVaultKeyUri', 'type': 'str'},
'default_identity': {'key': 'properties.defaultIdentity', 'type': 'str'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'enable_free_tier': {'key': 'properties.enableFreeTier', 'type': 'bool'},
'api_properties': {'key': 'properties.apiProperties', 'type': 'ApiProperties'},
'enable_analytical_storage': {'key': 'properties.enableAnalyticalStorage', 'type': 'bool'},
'analytical_storage_configuration': {'key': 'properties.analyticalStorageConfiguration', 'type': 'AnalyticalStorageConfiguration'},
'create_mode': {'key': 'properties.createMode', 'type': 'str'},
'backup_policy': {'key': 'properties.backupPolicy', 'type': 'BackupPolicy'},
'cors': {'key': 'properties.cors', 'type': '[CorsPolicy]'},
'network_acl_bypass': {'key': 'properties.networkAclBypass', 'type': 'str'},
'network_acl_bypass_resource_ids': {'key': 'properties.networkAclBypassResourceIds', 'type': '[str]'},
'disable_local_auth': {'key': 'properties.disableLocalAuth', 'type': 'bool'},
'restore_parameters': {'key': 'properties.restoreParameters', 'type': 'RestoreParameters'},
'capacity': {'key': 'properties.capacity', 'type': 'Capacity'},
}
database_account_offer_type = "Standard"
def __init__(
self,
**kwargs
):
super(DatabaseAccountCreateUpdateParameters, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.identity = kwargs.get('identity', None)
self.consistency_policy = kwargs.get('consistency_policy', None)
self.locations = kwargs['locations']
self.ip_rules = kwargs.get('ip_rules', None)
self.is_virtual_network_filter_enabled = kwargs.get('is_virtual_network_filter_enabled', None)
self.enable_automatic_failover = kwargs.get('enable_automatic_failover', None)
self.capabilities = kwargs.get('capabilities', None)
self.virtual_network_rules = kwargs.get('virtual_network_rules', None)
self.enable_multiple_write_locations = kwargs.get('enable_multiple_write_locations', None)
self.enable_cassandra_connector = kwargs.get('enable_cassandra_connector', None)
self.connector_offer = kwargs.get('connector_offer', None)
self.disable_key_based_metadata_write_access = kwargs.get('disable_key_based_metadata_write_access', None)
self.key_vault_key_uri = kwargs.get('key_vault_key_uri', None)
self.default_identity = kwargs.get('default_identity', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.enable_free_tier = kwargs.get('enable_free_tier', None)
self.api_properties = kwargs.get('api_properties', None)
self.enable_analytical_storage = kwargs.get('enable_analytical_storage', None)
self.analytical_storage_configuration = kwargs.get('analytical_storage_configuration', None)
self.create_mode = kwargs.get('create_mode', "Default")
self.backup_policy = kwargs.get('backup_policy', None)
self.cors = kwargs.get('cors', None)
self.network_acl_bypass = kwargs.get('network_acl_bypass', None)
self.network_acl_bypass_resource_ids = kwargs.get('network_acl_bypass_resource_ids', None)
self.disable_local_auth = kwargs.get('disable_local_auth', None)
self.restore_parameters = kwargs.get('restore_parameters', None)
self.capacity = kwargs.get('capacity', None)
class DatabaseAccountGetResults(ARMResourceProperties):
"""An Azure Cosmos DB database account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param kind: Indicates the type of database account. This can only be set at database account
creation. Possible values include: "GlobalDocumentDB", "MongoDB", "Parse".
:type kind: str or ~azure.mgmt.cosmosdb.models.DatabaseAccountKind
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.cosmosdb.models.ManagedServiceIdentity
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~azure.mgmt.cosmosdb.models.SystemData
:ivar provisioning_state: The status of the Cosmos DB account at the time the operation was
called. The status can be one of following. 'Creating' – the Cosmos DB account is being
created. When an account is in Creating state, only properties that are specified as input for
the Create Cosmos DB account operation are returned. 'Succeeded' – the Cosmos DB account is
active for use. 'Updating' – the Cosmos DB account is being updated. 'Deleting' – the Cosmos DB
account is being deleted. 'Failed' – the Cosmos DB account failed creation. 'DeletionFailed' –
the Cosmos DB account deletion failed.
:vartype provisioning_state: str
:ivar document_endpoint: The connection endpoint for the Cosmos DB database account.
:vartype document_endpoint: str
:ivar database_account_offer_type: The offer type for the Cosmos DB database account. Default
value: Standard. The only acceptable values to pass in are None and "Standard". The default
value is None.
:vartype database_account_offer_type: str
:param ip_rules: List of IpRules.
:type ip_rules: list[~azure.mgmt.cosmosdb.models.IpAddressOrRange]
:param is_virtual_network_filter_enabled: Flag to indicate whether to enable/disable Virtual
Network ACL rules.
:type is_virtual_network_filter_enabled: bool
:param enable_automatic_failover: Enables automatic failover of the write region in the rare
event that the region is unavailable due to an outage. Automatic failover will result in a new
write region for the account and is chosen based on the failover priorities configured for the
account.
:type enable_automatic_failover: bool
:param consistency_policy: The consistency policy for the Cosmos DB database account.
:type consistency_policy: ~azure.mgmt.cosmosdb.models.ConsistencyPolicy
:param capabilities: List of Cosmos DB capabilities for the account.
:type capabilities: list[~azure.mgmt.cosmosdb.models.Capability]
:ivar write_locations: An array that contains the write location for the Cosmos DB account.
:vartype write_locations: list[~azure.mgmt.cosmosdb.models.Location]
:ivar read_locations: An array that contains of the read locations enabled for the Cosmos DB
account.
:vartype read_locations: list[~azure.mgmt.cosmosdb.models.Location]
:ivar locations: An array that contains all of the locations enabled for the Cosmos DB account.
:vartype locations: list[~azure.mgmt.cosmosdb.models.Location]
:ivar failover_policies: An array that contains the regions ordered by their failover
priorities.
:vartype failover_policies: list[~azure.mgmt.cosmosdb.models.FailoverPolicy]
:param virtual_network_rules: List of Virtual Network ACL rules configured for the Cosmos DB
account.
:type virtual_network_rules: list[~azure.mgmt.cosmosdb.models.VirtualNetworkRule]
:ivar private_endpoint_connections: List of Private Endpoint Connections configured for the
Cosmos DB account.
:vartype private_endpoint_connections:
list[~azure.mgmt.cosmosdb.models.PrivateEndpointConnection]
:param enable_multiple_write_locations: Enables the account to write in multiple locations.
:type enable_multiple_write_locations: bool
:param enable_cassandra_connector: Enables the cassandra connector on the Cosmos DB C* account.
:type enable_cassandra_connector: bool
:param connector_offer: The cassandra connector offer type for the Cosmos DB database C*
account. Possible values include: "Small".
:type connector_offer: str or ~azure.mgmt.cosmosdb.models.ConnectorOffer
:param disable_key_based_metadata_write_access: Disable write operations on metadata resources
(databases, containers, throughput) via account keys.
:type disable_key_based_metadata_write_access: bool
:param key_vault_key_uri: The URI of the key vault.
:type key_vault_key_uri: str
:param default_identity: The default identity for accessing key vault used in features like
customer managed keys. The default identity needs to be explicitly set by the users. It can be
"FirstPartyIdentity", "SystemAssignedIdentity" and more.
:type default_identity: str
:param public_network_access: Whether requests from Public Network are allowed. Possible values
include: "Enabled", "Disabled".
:type public_network_access: str or ~azure.mgmt.cosmosdb.models.PublicNetworkAccess
:param enable_free_tier: Flag to indicate whether Free Tier is enabled.
:type enable_free_tier: bool
:param api_properties: API specific properties.
:type api_properties: ~azure.mgmt.cosmosdb.models.ApiProperties
:param enable_analytical_storage: Flag to indicate whether to enable storage analytics.
:type enable_analytical_storage: bool
:param analytical_storage_configuration: Analytical storage specific properties.
:type analytical_storage_configuration:
~azure.mgmt.cosmosdb.models.AnalyticalStorageConfiguration
:ivar instance_id: A unique identifier assigned to the database account.
:vartype instance_id: str
:param create_mode: Enum to indicate the mode of account creation. Possible values include:
"Default", "Restore". Default value: "Default".
:type create_mode: str or ~azure.mgmt.cosmosdb.models.CreateMode
:param restore_parameters: Parameters to indicate the information about the restore.
:type restore_parameters: ~azure.mgmt.cosmosdb.models.RestoreParameters
:param backup_policy: The object representing the policy for taking backups on an account.
:type backup_policy: ~azure.mgmt.cosmosdb.models.BackupPolicy
:param cors: The CORS policy for the Cosmos DB database account.
:type cors: list[~azure.mgmt.cosmosdb.models.CorsPolicy]
:param network_acl_bypass: Indicates what services are allowed to bypass firewall checks.
Possible values include: "None", "AzureServices".
:type network_acl_bypass: str or ~azure.mgmt.cosmosdb.models.NetworkAclBypass
:param network_acl_bypass_resource_ids: An array that contains the Resource Ids for Network Acl
Bypass for the Cosmos DB account.
:type network_acl_bypass_resource_ids: list[str]
:param disable_local_auth: Opt-out of local authentication and ensure only MSI and AAD can be
used exclusively for authentication.
:type disable_local_auth: bool
:param capacity: The object that represents all properties related to capacity enforcement on
an account.
:type capacity: ~azure.mgmt.cosmosdb.models.Capacity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'document_endpoint': {'readonly': True},
'database_account_offer_type': {'readonly': True},
'write_locations': {'readonly': True},
'read_locations': {'readonly': True},
'locations': {'readonly': True},
'failover_policies': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'instance_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'document_endpoint': {'key': 'properties.documentEndpoint', 'type': 'str'},
'database_account_offer_type': {'key': 'properties.databaseAccountOfferType', 'type': 'str'},
'ip_rules': {'key': 'properties.ipRules', 'type': '[IpAddressOrRange]'},
'is_virtual_network_filter_enabled': {'key': 'properties.isVirtualNetworkFilterEnabled', 'type': 'bool'},
'enable_automatic_failover': {'key': 'properties.enableAutomaticFailover', 'type': 'bool'},
'consistency_policy': {'key': 'properties.consistencyPolicy', 'type': 'ConsistencyPolicy'},
'capabilities': {'key': 'properties.capabilities', 'type': '[Capability]'},
'write_locations': {'key': 'properties.writeLocations', 'type': '[Location]'},
'read_locations': {'key': 'properties.readLocations', 'type': '[Location]'},
'locations': {'key': 'properties.locations', 'type': '[Location]'},
'failover_policies': {'key': 'properties.failoverPolicies', 'type': '[FailoverPolicy]'},
'virtual_network_rules': {'key': 'properties.virtualNetworkRules', 'type': '[VirtualNetworkRule]'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'enable_multiple_write_locations': {'key': 'properties.enableMultipleWriteLocations', 'type': 'bool'},
'enable_cassandra_connector': {'key': 'properties.enableCassandraConnector', 'type': 'bool'},
'connector_offer': {'key': 'properties.connectorOffer', 'type': 'str'},
'disable_key_based_metadata_write_access': {'key': 'properties.disableKeyBasedMetadataWriteAccess', 'type': 'bool'},
'key_vault_key_uri': {'key': 'properties.keyVaultKeyUri', 'type': 'str'},
'default_identity': {'key': 'properties.defaultIdentity', 'type': 'str'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'enable_free_tier': {'key': 'properties.enableFreeTier', 'type': 'bool'},
'api_properties': {'key': 'properties.apiProperties', 'type': 'ApiProperties'},
'enable_analytical_storage': {'key': 'properties.enableAnalyticalStorage', 'type': 'bool'},
'analytical_storage_configuration': {'key': 'properties.analyticalStorageConfiguration', 'type': 'AnalyticalStorageConfiguration'},
'instance_id': {'key': 'properties.instanceId', 'type': 'str'},
'create_mode': {'key': 'properties.createMode', 'type': 'str'},
'restore_parameters': {'key': 'properties.restoreParameters', 'type': 'RestoreParameters'},
'backup_policy': {'key': 'properties.backupPolicy', 'type': 'BackupPolicy'},
'cors': {'key': 'properties.cors', 'type': '[CorsPolicy]'},
'network_acl_bypass': {'key': 'properties.networkAclBypass', 'type': 'str'},
'network_acl_bypass_resource_ids': {'key': 'properties.networkAclBypassResourceIds', 'type': '[str]'},
'disable_local_auth': {'key': 'properties.disableLocalAuth', 'type': 'bool'},
'capacity': {'key': 'properties.capacity', 'type': 'Capacity'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountGetResults, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.identity = kwargs.get('identity', None)
self.system_data = None
self.provisioning_state = None
self.document_endpoint = None
self.database_account_offer_type = None
self.ip_rules = kwargs.get('ip_rules', None)
self.is_virtual_network_filter_enabled = kwargs.get('is_virtual_network_filter_enabled', None)
self.enable_automatic_failover = kwargs.get('enable_automatic_failover', None)
self.consistency_policy = kwargs.get('consistency_policy', None)
self.capabilities = kwargs.get('capabilities', None)
self.write_locations = None
self.read_locations = None
self.locations = None
self.failover_policies = None
self.virtual_network_rules = kwargs.get('virtual_network_rules', None)
self.private_endpoint_connections = None
self.enable_multiple_write_locations = kwargs.get('enable_multiple_write_locations', None)
self.enable_cassandra_connector = kwargs.get('enable_cassandra_connector', None)
self.connector_offer = kwargs.get('connector_offer', None)
self.disable_key_based_metadata_write_access = kwargs.get('disable_key_based_metadata_write_access', None)
self.key_vault_key_uri = kwargs.get('key_vault_key_uri', None)
self.default_identity = kwargs.get('default_identity', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.enable_free_tier = kwargs.get('enable_free_tier', None)
self.api_properties = kwargs.get('api_properties', None)
self.enable_analytical_storage = kwargs.get('enable_analytical_storage', None)
self.analytical_storage_configuration = kwargs.get('analytical_storage_configuration', None)
self.instance_id = None
self.create_mode = kwargs.get('create_mode', "Default")
self.restore_parameters = kwargs.get('restore_parameters', None)
self.backup_policy = kwargs.get('backup_policy', None)
self.cors = kwargs.get('cors', None)
self.network_acl_bypass = kwargs.get('network_acl_bypass', None)
self.network_acl_bypass_resource_ids = kwargs.get('network_acl_bypass_resource_ids', None)
self.disable_local_auth = kwargs.get('disable_local_auth', None)
self.capacity = kwargs.get('capacity', None)
class DatabaseAccountListConnectionStringsResult(msrest.serialization.Model):
"""The connection strings for the given database account.
:param connection_strings: An array that contains the connection strings for the Cosmos DB
account.
:type connection_strings: list[~azure.mgmt.cosmosdb.models.DatabaseAccountConnectionString]
"""
_attribute_map = {
'connection_strings': {'key': 'connectionStrings', 'type': '[DatabaseAccountConnectionString]'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountListConnectionStringsResult, self).__init__(**kwargs)
self.connection_strings = kwargs.get('connection_strings', None)
class DatabaseAccountListReadOnlyKeysResult(msrest.serialization.Model):
"""The read-only access keys for the given database account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar primary_readonly_master_key: Base 64 encoded value of the primary read-only key.
:vartype primary_readonly_master_key: str
:ivar secondary_readonly_master_key: Base 64 encoded value of the secondary read-only key.
:vartype secondary_readonly_master_key: str
"""
_validation = {
'primary_readonly_master_key': {'readonly': True},
'secondary_readonly_master_key': {'readonly': True},
}
_attribute_map = {
'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'},
'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountListReadOnlyKeysResult, self).__init__(**kwargs)
self.primary_readonly_master_key = None
self.secondary_readonly_master_key = None
class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult):
"""The access keys for the given database account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar primary_readonly_master_key: Base 64 encoded value of the primary read-only key.
:vartype primary_readonly_master_key: str
:ivar secondary_readonly_master_key: Base 64 encoded value of the secondary read-only key.
:vartype secondary_readonly_master_key: str
:ivar primary_master_key: Base 64 encoded value of the primary read-write key.
:vartype primary_master_key: str
:ivar secondary_master_key: Base 64 encoded value of the secondary read-write key.
:vartype secondary_master_key: str
"""
_validation = {
'primary_readonly_master_key': {'readonly': True},
'secondary_readonly_master_key': {'readonly': True},
'primary_master_key': {'readonly': True},
'secondary_master_key': {'readonly': True},
}
_attribute_map = {
'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'},
'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'},
'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'},
'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountListKeysResult, self).__init__(**kwargs)
self.primary_master_key = None
self.secondary_master_key = None
class DatabaseAccountRegenerateKeyParameters(msrest.serialization.Model):
"""Parameters to regenerate the keys within the database account.
All required parameters must be populated in order to send to Azure.
:param key_kind: Required. The access key to regenerate. Possible values include: "primary",
"secondary", "primaryReadonly", "secondaryReadonly".
:type key_kind: str or ~azure.mgmt.cosmosdb.models.KeyKind
"""
_validation = {
'key_kind': {'required': True},
}
_attribute_map = {
'key_kind': {'key': 'keyKind', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountRegenerateKeyParameters, self).__init__(**kwargs)
self.key_kind = kwargs['key_kind']
class DatabaseAccountsListResult(msrest.serialization.Model):
"""The List operation response, that contains the database accounts and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of database account and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.DatabaseAccountGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DatabaseAccountGetResults]'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountsListResult, self).__init__(**kwargs)
self.value = None
class DatabaseAccountUpdateParameters(msrest.serialization.Model):
"""Parameters for patching Azure Cosmos DB database account properties.
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.cosmosdb.models.ManagedServiceIdentity
:param consistency_policy: The consistency policy for the Cosmos DB account.
:type consistency_policy: ~azure.mgmt.cosmosdb.models.ConsistencyPolicy
:param locations: An array that contains the georeplication locations enabled for the Cosmos DB
account.
:type locations: list[~azure.mgmt.cosmosdb.models.Location]
:param ip_rules: List of IpRules.
:type ip_rules: list[~azure.mgmt.cosmosdb.models.IpAddressOrRange]
:param is_virtual_network_filter_enabled: Flag to indicate whether to enable/disable Virtual
Network ACL rules.
:type is_virtual_network_filter_enabled: bool
:param enable_automatic_failover: Enables automatic failover of the write region in the rare
event that the region is unavailable due to an outage. Automatic failover will result in a new
write region for the account and is chosen based on the failover priorities configured for the
account.
:type enable_automatic_failover: bool
:param capabilities: List of Cosmos DB capabilities for the account.
:type capabilities: list[~azure.mgmt.cosmosdb.models.Capability]
:param virtual_network_rules: List of Virtual Network ACL rules configured for the Cosmos DB
account.
:type virtual_network_rules: list[~azure.mgmt.cosmosdb.models.VirtualNetworkRule]
:param enable_multiple_write_locations: Enables the account to write in multiple locations.
:type enable_multiple_write_locations: bool
:param enable_cassandra_connector: Enables the cassandra connector on the Cosmos DB C* account.
:type enable_cassandra_connector: bool
:param connector_offer: The cassandra connector offer type for the Cosmos DB database C*
account. Possible values include: "Small".
:type connector_offer: str or ~azure.mgmt.cosmosdb.models.ConnectorOffer
:param disable_key_based_metadata_write_access: Disable write operations on metadata resources
(databases, containers, throughput) via account keys.
:type disable_key_based_metadata_write_access: bool
:param key_vault_key_uri: The URI of the key vault.
:type key_vault_key_uri: str
:param default_identity: The default identity for accessing key vault used in features like
customer managed keys. The default identity needs to be explicitly set by the users. It can be
"FirstPartyIdentity", "SystemAssignedIdentity" and more.
:type default_identity: str
:param public_network_access: Whether requests from Public Network are allowed. Possible values
include: "Enabled", "Disabled".
:type public_network_access: str or ~azure.mgmt.cosmosdb.models.PublicNetworkAccess
:param enable_free_tier: Flag to indicate whether Free Tier is enabled.
:type enable_free_tier: bool
:param api_properties: API specific properties. Currently, supported only for MongoDB API.
:type api_properties: ~azure.mgmt.cosmosdb.models.ApiProperties
:param enable_analytical_storage: Flag to indicate whether to enable storage analytics.
:type enable_analytical_storage: bool
:param analytical_storage_configuration: Analytical storage specific properties.
:type analytical_storage_configuration:
~azure.mgmt.cosmosdb.models.AnalyticalStorageConfiguration
:param backup_policy: The object representing the policy for taking backups on an account.
:type backup_policy: ~azure.mgmt.cosmosdb.models.BackupPolicy
:param cors: The CORS policy for the Cosmos DB database account.
:type cors: list[~azure.mgmt.cosmosdb.models.CorsPolicy]
:param network_acl_bypass: Indicates what services are allowed to bypass firewall checks.
Possible values include: "None", "AzureServices".
:type network_acl_bypass: str or ~azure.mgmt.cosmosdb.models.NetworkAclBypass
:param network_acl_bypass_resource_ids: An array that contains the Resource Ids for Network Acl
Bypass for the Cosmos DB account.
:type network_acl_bypass_resource_ids: list[str]
:param disable_local_auth: Opt-out of local authentication and ensure only MSI and AAD can be
used exclusively for authentication.
:type disable_local_auth: bool
:param capacity: The object that represents all properties related to capacity enforcement on
an account.
:type capacity: ~azure.mgmt.cosmosdb.models.Capacity
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'consistency_policy': {'key': 'properties.consistencyPolicy', 'type': 'ConsistencyPolicy'},
'locations': {'key': 'properties.locations', 'type': '[Location]'},
'ip_rules': {'key': 'properties.ipRules', 'type': '[IpAddressOrRange]'},
'is_virtual_network_filter_enabled': {'key': 'properties.isVirtualNetworkFilterEnabled', 'type': 'bool'},
'enable_automatic_failover': {'key': 'properties.enableAutomaticFailover', 'type': 'bool'},
'capabilities': {'key': 'properties.capabilities', 'type': '[Capability]'},
'virtual_network_rules': {'key': 'properties.virtualNetworkRules', 'type': '[VirtualNetworkRule]'},
'enable_multiple_write_locations': {'key': 'properties.enableMultipleWriteLocations', 'type': 'bool'},
'enable_cassandra_connector': {'key': 'properties.enableCassandraConnector', 'type': 'bool'},
'connector_offer': {'key': 'properties.connectorOffer', 'type': 'str'},
'disable_key_based_metadata_write_access': {'key': 'properties.disableKeyBasedMetadataWriteAccess', 'type': 'bool'},
'key_vault_key_uri': {'key': 'properties.keyVaultKeyUri', 'type': 'str'},
'default_identity': {'key': 'properties.defaultIdentity', 'type': 'str'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'enable_free_tier': {'key': 'properties.enableFreeTier', 'type': 'bool'},
'api_properties': {'key': 'properties.apiProperties', 'type': 'ApiProperties'},
'enable_analytical_storage': {'key': 'properties.enableAnalyticalStorage', 'type': 'bool'},
'analytical_storage_configuration': {'key': 'properties.analyticalStorageConfiguration', 'type': 'AnalyticalStorageConfiguration'},
'backup_policy': {'key': 'properties.backupPolicy', 'type': 'BackupPolicy'},
'cors': {'key': 'properties.cors', 'type': '[CorsPolicy]'},
'network_acl_bypass': {'key': 'properties.networkAclBypass', 'type': 'str'},
'network_acl_bypass_resource_ids': {'key': 'properties.networkAclBypassResourceIds', 'type': '[str]'},
'disable_local_auth': {'key': 'properties.disableLocalAuth', 'type': 'bool'},
'capacity': {'key': 'properties.capacity', 'type': 'Capacity'},
}
def __init__(
self,
**kwargs
):
super(DatabaseAccountUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs.get('location', None)
self.identity = kwargs.get('identity', None)
self.consistency_policy = kwargs.get('consistency_policy', None)
self.locations = kwargs.get('locations', None)
self.ip_rules = kwargs.get('ip_rules', None)
self.is_virtual_network_filter_enabled = kwargs.get('is_virtual_network_filter_enabled', None)
self.enable_automatic_failover = kwargs.get('enable_automatic_failover', None)
self.capabilities = kwargs.get('capabilities', None)
self.virtual_network_rules = kwargs.get('virtual_network_rules', None)
self.enable_multiple_write_locations = kwargs.get('enable_multiple_write_locations', None)
self.enable_cassandra_connector = kwargs.get('enable_cassandra_connector', None)
self.connector_offer = kwargs.get('connector_offer', None)
self.disable_key_based_metadata_write_access = kwargs.get('disable_key_based_metadata_write_access', None)
self.key_vault_key_uri = kwargs.get('key_vault_key_uri', None)
self.default_identity = kwargs.get('default_identity', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.enable_free_tier = kwargs.get('enable_free_tier', None)
self.api_properties = kwargs.get('api_properties', None)
self.enable_analytical_storage = kwargs.get('enable_analytical_storage', None)
self.analytical_storage_configuration = kwargs.get('analytical_storage_configuration', None)
self.backup_policy = kwargs.get('backup_policy', None)
self.cors = kwargs.get('cors', None)
self.network_acl_bypass = kwargs.get('network_acl_bypass', None)
self.network_acl_bypass_resource_ids = kwargs.get('network_acl_bypass_resource_ids', None)
self.disable_local_auth = kwargs.get('disable_local_auth', None)
self.capacity = kwargs.get('capacity', None)
class DatabaseRestoreResource(msrest.serialization.Model):
"""Specific Databases to restore.
:param database_name: The name of the database available for restore.
:type database_name: str
:param collection_names: The names of the collections available for restore.
:type collection_names: list[str]
"""
_attribute_map = {
'database_name': {'key': 'databaseName', 'type': 'str'},
'collection_names': {'key': 'collectionNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(DatabaseRestoreResource, self).__init__(**kwargs)
self.database_name = kwargs.get('database_name', None)
self.collection_names = kwargs.get('collection_names', None)
class DataCenterResource(ARMProxyResource):
"""A managed Cassandra data center.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param properties: Properties of a managed Cassandra data center.
:type properties: ~azure.mgmt.cosmosdb.models.DataCenterResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DataCenterResourceProperties'},
}
def __init__(
self,
**kwargs
):
super(DataCenterResource, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class DataCenterResourceProperties(msrest.serialization.Model):
"""Properties of a managed Cassandra data center.
Variables are only populated by the server, and will be ignored when sending a request.
:param provisioning_state: The status of the resource at the time the operation was called.
Possible values include: "Creating", "Updating", "Deleting", "Succeeded", "Failed", "Canceled".
:type provisioning_state: str or ~azure.mgmt.cosmosdb.models.ManagedCassandraProvisioningState
:param data_center_location: The region this data center should be created in.
:type data_center_location: str
:param delegated_subnet_id: Resource id of a subnet the nodes in this data center should have
their network interfaces connected to. The subnet must be in the same region specified in
'dataCenterLocation' and must be able to route to the subnet specified in the cluster's
'delegatedManagementSubnetId' property. This resource id will be of the form
'/subscriptions/:code:`<subscription id>`/resourceGroups/:code:`<resource
group>`/providers/Microsoft.Network/virtualNetworks/:code:`<virtual
network>`/subnets/:code:`<subnet>`'.
:type delegated_subnet_id: str
:param node_count: The number of nodes the data center should have. This is the desired number.
After it is set, it may take some time for the data center to be scaled to match. To monitor
the number of nodes and their status, use the fetchNodeStatus method on the cluster.
:type node_count: int
:ivar seed_nodes: IP addresses for seed nodes in this data center. This is for reference.
Generally you will want to use the seedNodes property on the cluster, which aggregates the seed
nodes from all data centers in the cluster.
:vartype seed_nodes: list[~azure.mgmt.cosmosdb.models.SeedNode]
:param base64_encoded_cassandra_yaml_fragment: A fragment of a cassandra.yaml configuration
file to be included in the cassandra.yaml for all nodes in this data center. The fragment
should be Base64 encoded, and only a subset of keys are allowed.
:type base64_encoded_cassandra_yaml_fragment: str
:param managed_disk_customer_key_uri: Key uri to use for encryption of managed disks. Ensure
the system assigned identity of the cluster has been assigned appropriate permissions(key
get/wrap/unwrap permissions) on the key.
:type managed_disk_customer_key_uri: str
:param backup_storage_customer_key_uri: Indicates the Key Uri of the customer key to use for
encryption of the backup storage account.
:type backup_storage_customer_key_uri: str
:param sku: Virtual Machine SKU used for data centers. Default value is Standard_DS14_v2.
:type sku: str
:param disk_sku: Disk SKU used for data centers. Default value is P30.
:type disk_sku: str
:param disk_capacity: Number of disk used for data centers. Default value is 4.
:type disk_capacity: int
:param availability_zone: If the azure data center has Availability Zone support, apply it to
the Virtual Machine ScaleSet that host the cassandra data center virtual machines.
:type availability_zone: bool
"""
_validation = {
'seed_nodes': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'data_center_location': {'key': 'dataCenterLocation', 'type': 'str'},
'delegated_subnet_id': {'key': 'delegatedSubnetId', 'type': 'str'},
'node_count': {'key': 'nodeCount', 'type': 'int'},
'seed_nodes': {'key': 'seedNodes', 'type': '[SeedNode]'},
'base64_encoded_cassandra_yaml_fragment': {'key': 'base64EncodedCassandraYamlFragment', 'type': 'str'},
'managed_disk_customer_key_uri': {'key': 'managedDiskCustomerKeyUri', 'type': 'str'},
'backup_storage_customer_key_uri': {'key': 'backupStorageCustomerKeyUri', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'disk_sku': {'key': 'diskSku', 'type': 'str'},
'disk_capacity': {'key': 'diskCapacity', 'type': 'int'},
'availability_zone': {'key': 'availabilityZone', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(DataCenterResourceProperties, self).__init__(**kwargs)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.data_center_location = kwargs.get('data_center_location', None)
self.delegated_subnet_id = kwargs.get('delegated_subnet_id', None)
self.node_count = kwargs.get('node_count', None)
self.seed_nodes = None
self.base64_encoded_cassandra_yaml_fragment = kwargs.get('base64_encoded_cassandra_yaml_fragment', None)
self.managed_disk_customer_key_uri = kwargs.get('managed_disk_customer_key_uri', None)
self.backup_storage_customer_key_uri = kwargs.get('backup_storage_customer_key_uri', None)
self.sku = kwargs.get('sku', None)
self.disk_sku = kwargs.get('disk_sku', None)
self.disk_capacity = kwargs.get('disk_capacity', None)
self.availability_zone = kwargs.get('availability_zone', None)
class ErrorResponse(msrest.serialization.Model):
"""Error Response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ExcludedPath(msrest.serialization.Model):
"""ExcludedPath.
:param path: The path for which the indexing behavior applies to. Index paths typically start
with root and end with wildcard (/path/*).
:type path: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExcludedPath, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
class FailoverPolicies(msrest.serialization.Model):
"""The list of new failover policies for the failover priority change.
All required parameters must be populated in order to send to Azure.
:param failover_policies: Required. List of failover policies.
:type failover_policies: list[~azure.mgmt.cosmosdb.models.FailoverPolicy]
"""
_validation = {
'failover_policies': {'required': True},
}
_attribute_map = {
'failover_policies': {'key': 'failoverPolicies', 'type': '[FailoverPolicy]'},
}
def __init__(
self,
**kwargs
):
super(FailoverPolicies, self).__init__(**kwargs)
self.failover_policies = kwargs['failover_policies']
class FailoverPolicy(msrest.serialization.Model):
"""The failover policy for a given region of a database account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique identifier of the region in which the database account replicates to.
Example: <accountName>-<locationName>.
:vartype id: str
:param location_name: The name of the region in which the database account exists.
:type location_name: str
:param failover_priority: The failover priority of the region. A failover priority of 0
indicates a write region. The maximum value for a failover priority = (total number of regions
- 1). Failover priority values must be unique for each of the regions in which the database
account exists.
:type failover_priority: int
"""
_validation = {
'id': {'readonly': True},
'failover_priority': {'minimum': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'location_name': {'key': 'locationName', 'type': 'str'},
'failover_priority': {'key': 'failoverPriority', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FailoverPolicy, self).__init__(**kwargs)
self.id = None
self.location_name = kwargs.get('location_name', None)
self.failover_priority = kwargs.get('failover_priority', None)
class GremlinDatabaseCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB Gremlin database.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a Gremlin database.
:type resource: ~azure.mgmt.cosmosdb.models.GremlinDatabaseResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'GremlinDatabaseResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(GremlinDatabaseCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class GremlinDatabaseGetPropertiesOptions(OptionsResource):
"""GremlinDatabaseGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(GremlinDatabaseGetPropertiesOptions, self).__init__(**kwargs)
class GremlinDatabaseResource(msrest.serialization.Model):
"""Cosmos DB Gremlin database resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Gremlin database.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GremlinDatabaseResource, self).__init__(**kwargs)
self.id = kwargs['id']
class GremlinDatabaseGetPropertiesResource(ExtendedResourceProperties, GremlinDatabaseResource):
"""GremlinDatabaseGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Gremlin database.
:type id: str
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GremlinDatabaseGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.rid = None
self.ts = None
self.etag = None
class GremlinDatabaseGetResults(ARMResourceProperties):
"""An Azure Cosmos DB Gremlin database.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.GremlinDatabaseGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.GremlinDatabaseGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'GremlinDatabaseGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'GremlinDatabaseGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(GremlinDatabaseGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class GremlinDatabaseListResult(msrest.serialization.Model):
"""The List operation response, that contains the Gremlin databases and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Gremlin databases and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.GremlinDatabaseGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GremlinDatabaseGetResults]'},
}
def __init__(
self,
**kwargs
):
super(GremlinDatabaseListResult, self).__init__(**kwargs)
self.value = None
class GremlinGraphCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB Gremlin graph.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a Gremlin graph.
:type resource: ~azure.mgmt.cosmosdb.models.GremlinGraphResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'GremlinGraphResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(GremlinGraphCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class GremlinGraphGetPropertiesOptions(OptionsResource):
"""GremlinGraphGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(GremlinGraphGetPropertiesOptions, self).__init__(**kwargs)
class GremlinGraphResource(msrest.serialization.Model):
"""Cosmos DB Gremlin graph resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Gremlin graph.
:type id: str
:param indexing_policy: The configuration of the indexing policy. By default, the indexing is
automatic for all document paths within the graph.
:type indexing_policy: ~azure.mgmt.cosmosdb.models.IndexingPolicy
:param partition_key: The configuration of the partition key to be used for partitioning data
into multiple partitions.
:type partition_key: ~azure.mgmt.cosmosdb.models.ContainerPartitionKey
:param default_ttl: Default time to live.
:type default_ttl: int
:param unique_key_policy: The unique key policy configuration for specifying uniqueness
constraints on documents in the collection in the Azure Cosmos DB service.
:type unique_key_policy: ~azure.mgmt.cosmosdb.models.UniqueKeyPolicy
:param conflict_resolution_policy: The conflict resolution policy for the graph.
:type conflict_resolution_policy: ~azure.mgmt.cosmosdb.models.ConflictResolutionPolicy
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'indexing_policy': {'key': 'indexingPolicy', 'type': 'IndexingPolicy'},
'partition_key': {'key': 'partitionKey', 'type': 'ContainerPartitionKey'},
'default_ttl': {'key': 'defaultTtl', 'type': 'int'},
'unique_key_policy': {'key': 'uniqueKeyPolicy', 'type': 'UniqueKeyPolicy'},
'conflict_resolution_policy': {'key': 'conflictResolutionPolicy', 'type': 'ConflictResolutionPolicy'},
}
def __init__(
self,
**kwargs
):
super(GremlinGraphResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.indexing_policy = kwargs.get('indexing_policy', None)
self.partition_key = kwargs.get('partition_key', None)
self.default_ttl = kwargs.get('default_ttl', None)
self.unique_key_policy = kwargs.get('unique_key_policy', None)
self.conflict_resolution_policy = kwargs.get('conflict_resolution_policy', None)
class GremlinGraphGetPropertiesResource(ExtendedResourceProperties, GremlinGraphResource):
"""GremlinGraphGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB Gremlin graph.
:type id: str
:param indexing_policy: The configuration of the indexing policy. By default, the indexing is
automatic for all document paths within the graph.
:type indexing_policy: ~azure.mgmt.cosmosdb.models.IndexingPolicy
:param partition_key: The configuration of the partition key to be used for partitioning data
into multiple partitions.
:type partition_key: ~azure.mgmt.cosmosdb.models.ContainerPartitionKey
:param default_ttl: Default time to live.
:type default_ttl: int
:param unique_key_policy: The unique key policy configuration for specifying uniqueness
constraints on documents in the collection in the Azure Cosmos DB service.
:type unique_key_policy: ~azure.mgmt.cosmosdb.models.UniqueKeyPolicy
:param conflict_resolution_policy: The conflict resolution policy for the graph.
:type conflict_resolution_policy: ~azure.mgmt.cosmosdb.models.ConflictResolutionPolicy
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'indexing_policy': {'key': 'indexingPolicy', 'type': 'IndexingPolicy'},
'partition_key': {'key': 'partitionKey', 'type': 'ContainerPartitionKey'},
'default_ttl': {'key': 'defaultTtl', 'type': 'int'},
'unique_key_policy': {'key': 'uniqueKeyPolicy', 'type': 'UniqueKeyPolicy'},
'conflict_resolution_policy': {'key': 'conflictResolutionPolicy', 'type': 'ConflictResolutionPolicy'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GremlinGraphGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.indexing_policy = kwargs.get('indexing_policy', None)
self.partition_key = kwargs.get('partition_key', None)
self.default_ttl = kwargs.get('default_ttl', None)
self.unique_key_policy = kwargs.get('unique_key_policy', None)
self.conflict_resolution_policy = kwargs.get('conflict_resolution_policy', None)
self.rid = None
self.ts = None
self.etag = None
class GremlinGraphGetResults(ARMResourceProperties):
"""An Azure Cosmos DB Gremlin graph.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.GremlinGraphGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.GremlinGraphGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'GremlinGraphGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'GremlinGraphGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(GremlinGraphGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class GremlinGraphListResult(msrest.serialization.Model):
"""The List operation response, that contains the graphs and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of graphs and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.GremlinGraphGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GremlinGraphGetResults]'},
}
def __init__(
self,
**kwargs
):
super(GremlinGraphListResult, self).__init__(**kwargs)
self.value = None
class IncludedPath(msrest.serialization.Model):
"""The paths that are included in indexing.
:param path: The path for which the indexing behavior applies to. Index paths typically start
with root and end with wildcard (/path/*).
:type path: str
:param indexes: List of indexes for this path.
:type indexes: list[~azure.mgmt.cosmosdb.models.Indexes]
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'indexes': {'key': 'indexes', 'type': '[Indexes]'},
}
def __init__(
self,
**kwargs
):
super(IncludedPath, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
self.indexes = kwargs.get('indexes', None)
class Indexes(msrest.serialization.Model):
"""The indexes for the path.
:param data_type: The datatype for which the indexing behavior is applied to. Possible values
include: "String", "Number", "Point", "Polygon", "LineString", "MultiPolygon". Default value:
"String".
:type data_type: str or ~azure.mgmt.cosmosdb.models.DataType
:param precision: The precision of the index. -1 is maximum precision.
:type precision: int
:param kind: Indicates the type of index. Possible values include: "Hash", "Range", "Spatial".
Default value: "Hash".
:type kind: str or ~azure.mgmt.cosmosdb.models.IndexKind
"""
_attribute_map = {
'data_type': {'key': 'dataType', 'type': 'str'},
'precision': {'key': 'precision', 'type': 'int'},
'kind': {'key': 'kind', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Indexes, self).__init__(**kwargs)
self.data_type = kwargs.get('data_type', "String")
self.precision = kwargs.get('precision', None)
self.kind = kwargs.get('kind', "Hash")
class IndexingPolicy(msrest.serialization.Model):
"""Cosmos DB indexing policy.
:param automatic: Indicates if the indexing policy is automatic.
:type automatic: bool
:param indexing_mode: Indicates the indexing mode. Possible values include: "consistent",
"lazy", "none". Default value: "consistent".
:type indexing_mode: str or ~azure.mgmt.cosmosdb.models.IndexingMode
:param included_paths: List of paths to include in the indexing.
:type included_paths: list[~azure.mgmt.cosmosdb.models.IncludedPath]
:param excluded_paths: List of paths to exclude from indexing.
:type excluded_paths: list[~azure.mgmt.cosmosdb.models.ExcludedPath]
:param composite_indexes: List of composite path list.
:type composite_indexes: list[list[~azure.mgmt.cosmosdb.models.CompositePath]]
:param spatial_indexes: List of spatial specifics.
:type spatial_indexes: list[~azure.mgmt.cosmosdb.models.SpatialSpec]
"""
_attribute_map = {
'automatic': {'key': 'automatic', 'type': 'bool'},
'indexing_mode': {'key': 'indexingMode', 'type': 'str'},
'included_paths': {'key': 'includedPaths', 'type': '[IncludedPath]'},
'excluded_paths': {'key': 'excludedPaths', 'type': '[ExcludedPath]'},
'composite_indexes': {'key': 'compositeIndexes', 'type': '[[CompositePath]]'},
'spatial_indexes': {'key': 'spatialIndexes', 'type': '[SpatialSpec]'},
}
def __init__(
self,
**kwargs
):
super(IndexingPolicy, self).__init__(**kwargs)
self.automatic = kwargs.get('automatic', None)
self.indexing_mode = kwargs.get('indexing_mode', "consistent")
self.included_paths = kwargs.get('included_paths', None)
self.excluded_paths = kwargs.get('excluded_paths', None)
self.composite_indexes = kwargs.get('composite_indexes', None)
self.spatial_indexes = kwargs.get('spatial_indexes', None)
class IpAddressOrRange(msrest.serialization.Model):
"""IpAddressOrRange object.
:param ip_address_or_range: A single IPv4 address or a single IPv4 address range in CIDR
format. Provided IPs must be well-formatted and cannot be contained in one of the following
ranges: 10.0.0.0/8, 100.64.0.0/10, 172.16.0.0/12, 192.168.0.0/16, since these are not
enforceable by the IP address filter. Example of valid inputs: “23.40.210.245” or
“23.40.210.0/8”.
:type ip_address_or_range: str
"""
_attribute_map = {
'ip_address_or_range': {'key': 'ipAddressOrRange', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpAddressOrRange, self).__init__(**kwargs)
self.ip_address_or_range = kwargs.get('ip_address_or_range', None)
class ListClusters(msrest.serialization.Model):
"""List of managed Cassandra clusters.
:param value: Container for the array of clusters.
:type value: list[~azure.mgmt.cosmosdb.models.ClusterResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ClusterResource]'},
}
def __init__(
self,
**kwargs
):
super(ListClusters, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ListDataCenters(msrest.serialization.Model):
"""List of managed Cassandra data centers and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Container for array of data centers.
:vartype value: list[~azure.mgmt.cosmosdb.models.DataCenterResource]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCenterResource]'},
}
def __init__(
self,
**kwargs
):
super(ListDataCenters, self).__init__(**kwargs)
self.value = None
class Location(msrest.serialization.Model):
"""A region in which the Azure Cosmos DB database account is deployed.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique identifier of the region within the database account. Example:
<accountName>-<locationName>.
:vartype id: str
:param location_name: The name of the region.
:type location_name: str
:ivar document_endpoint: The connection endpoint for the specific region. Example:
https://<accountName>-<locationName>.documents.azure.com:443/.
:vartype document_endpoint: str
:ivar provisioning_state: The status of the Cosmos DB account at the time the operation was
called. The status can be one of following. 'Creating' – the Cosmos DB account is being
created. When an account is in Creating state, only properties that are specified as input for
the Create Cosmos DB account operation are returned. 'Succeeded' – the Cosmos DB account is
active for use. 'Updating' – the Cosmos DB account is being updated. 'Deleting' – the Cosmos DB
account is being deleted. 'Failed' – the Cosmos DB account failed creation. 'DeletionFailed' –
the Cosmos DB account deletion failed.
:vartype provisioning_state: str
:param failover_priority: The failover priority of the region. A failover priority of 0
indicates a write region. The maximum value for a failover priority = (total number of regions
- 1). Failover priority values must be unique for each of the regions in which the database
account exists.
:type failover_priority: int
:param is_zone_redundant: Flag to indicate whether or not this region is an AvailabilityZone
region.
:type is_zone_redundant: bool
"""
_validation = {
'id': {'readonly': True},
'document_endpoint': {'readonly': True},
'provisioning_state': {'readonly': True},
'failover_priority': {'minimum': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'location_name': {'key': 'locationName', 'type': 'str'},
'document_endpoint': {'key': 'documentEndpoint', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'failover_priority': {'key': 'failoverPriority', 'type': 'int'},
'is_zone_redundant': {'key': 'isZoneRedundant', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(Location, self).__init__(**kwargs)
self.id = None
self.location_name = kwargs.get('location_name', None)
self.document_endpoint = None
self.provisioning_state = None
self.failover_priority = kwargs.get('failover_priority', None)
self.is_zone_redundant = kwargs.get('is_zone_redundant', None)
class LocationGetResult(ARMProxyResource):
"""Cosmos DB location get result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param properties: Cosmos DB location metadata.
:type properties: ~azure.mgmt.cosmosdb.models.LocationProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'LocationProperties'},
}
def __init__(
self,
**kwargs
):
super(LocationGetResult, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None)
class LocationListResult(msrest.serialization.Model):
"""The List operation response, that contains Cosmos DB locations and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Cosmos DB locations and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.LocationGetResult]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[LocationGetResult]'},
}
def __init__(
self,
**kwargs
):
super(LocationListResult, self).__init__(**kwargs)
self.value = None
class LocationProperties(msrest.serialization.Model):
"""Cosmos DB location metadata.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar supports_availability_zone: Flag indicating whether the location supports availability
zones or not.
:vartype supports_availability_zone: bool
:ivar is_residency_restricted: Flag indicating whether the location is residency sensitive.
:vartype is_residency_restricted: bool
:ivar backup_storage_redundancies: The properties of available backup storage redundancies.
:vartype backup_storage_redundancies: list[str or
~azure.mgmt.cosmosdb.models.BackupStorageRedundancy]
"""
_validation = {
'supports_availability_zone': {'readonly': True},
'is_residency_restricted': {'readonly': True},
'backup_storage_redundancies': {'readonly': True},
}
_attribute_map = {
'supports_availability_zone': {'key': 'supportsAvailabilityZone', 'type': 'bool'},
'is_residency_restricted': {'key': 'isResidencyRestricted', 'type': 'bool'},
'backup_storage_redundancies': {'key': 'backupStorageRedundancies', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(LocationProperties, self).__init__(**kwargs)
self.supports_availability_zone = None
self.is_residency_restricted = None
self.backup_storage_redundancies = None
class ManagedCassandraManagedServiceIdentity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The object id of the identity resource.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the resource.
:vartype tenant_id: str
:param type: The type of the resource. Possible values include: "SystemAssigned", "None".
:type type: str or ~azure.mgmt.cosmosdb.models.ManagedCassandraResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedCassandraManagedServiceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
class ManagedCassandraReaperStatus(msrest.serialization.Model):
"""ManagedCassandraReaperStatus.
:param healthy:
:type healthy: bool
:param repair_run_ids: Dictionary of :code:`<string>`.
:type repair_run_ids: dict[str, str]
:param repair_schedules: Dictionary of :code:`<string>`.
:type repair_schedules: dict[str, str]
"""
_attribute_map = {
'healthy': {'key': 'healthy', 'type': 'bool'},
'repair_run_ids': {'key': 'repairRunIds', 'type': '{str}'},
'repair_schedules': {'key': 'repairSchedules', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ManagedCassandraReaperStatus, self).__init__(**kwargs)
self.healthy = kwargs.get('healthy', None)
self.repair_run_ids = kwargs.get('repair_run_ids', None)
self.repair_schedules = kwargs.get('repair_schedules', None)
class ManagedServiceIdentity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the system assigned identity. This property will only
be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the system assigned identity. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:param type: The type of identity used for the resource. The type 'SystemAssigned,UserAssigned'
includes both an implicitly created identity and a set of user assigned identities. The type
'None' will remove any identities from the service. Possible values include: "SystemAssigned",
"UserAssigned", "SystemAssigned,UserAssigned", "None".
:type type: str or ~azure.mgmt.cosmosdb.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with resource. The user
identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.cosmosdb.models.Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties}'},
}
def __init__(
self,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class Metric(msrest.serialization.Model):
"""Metric data.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar start_time: The start time for the metric (ISO-8601 format).
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time for the metric (ISO-8601 format).
:vartype end_time: ~datetime.datetime
:ivar time_grain: The time grain to be used to summarize the metric values.
:vartype time_grain: str
:ivar unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"Percent", "CountPerSecond", "BytesPerSecond", "Milliseconds".
:vartype unit: str or ~azure.mgmt.cosmosdb.models.UnitType
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.cosmosdb.models.MetricName
:ivar metric_values: The metric values for the specified time window and timestep.
:vartype metric_values: list[~azure.mgmt.cosmosdb.models.MetricValue]
"""
_validation = {
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'time_grain': {'readonly': True},
'unit': {'readonly': True},
'name': {'readonly': True},
'metric_values': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'MetricName'},
'metric_values': {'key': 'metricValues', 'type': '[MetricValue]'},
}
def __init__(
self,
**kwargs
):
super(Metric, self).__init__(**kwargs)
self.start_time = None
self.end_time = None
self.time_grain = None
self.unit = None
self.name = None
self.metric_values = None
class MetricAvailability(msrest.serialization.Model):
"""The availability of the metric.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar time_grain: The time grain to be used to summarize the metric values.
:vartype time_grain: str
:ivar retention: The retention for the metric values.
:vartype retention: str
"""
_validation = {
'time_grain': {'readonly': True},
'retention': {'readonly': True},
}
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'retention': {'key': 'retention', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricAvailability, self).__init__(**kwargs)
self.time_grain = None
self.retention = None
class MetricDefinition(msrest.serialization.Model):
"""The definition of a metric.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar metric_availabilities: The list of metric availabilities for the account.
:vartype metric_availabilities: list[~azure.mgmt.cosmosdb.models.MetricAvailability]
:ivar primary_aggregation_type: The primary aggregation type of the metric. Possible values
include: "None", "Average", "Total", "Minimum", "Maximum", "Last".
:vartype primary_aggregation_type: str or ~azure.mgmt.cosmosdb.models.PrimaryAggregationType
:ivar unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"Percent", "CountPerSecond", "BytesPerSecond", "Milliseconds".
:vartype unit: str or ~azure.mgmt.cosmosdb.models.UnitType
:ivar resource_uri: The resource uri of the database.
:vartype resource_uri: str
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.cosmosdb.models.MetricName
"""
_validation = {
'metric_availabilities': {'readonly': True},
'primary_aggregation_type': {'readonly': True},
'unit': {'readonly': True},
'resource_uri': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'metric_availabilities': {'key': 'metricAvailabilities', 'type': '[MetricAvailability]'},
'primary_aggregation_type': {'key': 'primaryAggregationType', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'name': {'key': 'name', 'type': 'MetricName'},
}
def __init__(
self,
**kwargs
):
super(MetricDefinition, self).__init__(**kwargs)
self.metric_availabilities = None
self.primary_aggregation_type = None
self.unit = None
self.resource_uri = None
self.name = None
class MetricDefinitionsListResult(msrest.serialization.Model):
"""The response to a list metric definitions request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of metric definitions for the account.
:vartype value: list[~azure.mgmt.cosmosdb.models.MetricDefinition]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricDefinition]'},
}
def __init__(
self,
**kwargs
):
super(MetricDefinitionsListResult, self).__init__(**kwargs)
self.value = None
class MetricListResult(msrest.serialization.Model):
"""The response to a list metrics request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of metrics for the account.
:vartype value: list[~azure.mgmt.cosmosdb.models.Metric]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Metric]'},
}
def __init__(
self,
**kwargs
):
super(MetricListResult, self).__init__(**kwargs)
self.value = None
class MetricName(msrest.serialization.Model):
"""A metric name.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The name of the metric.
:vartype value: str
:ivar localized_value: The friendly name of the metric.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class MetricValue(msrest.serialization.Model):
"""Represents metrics values.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar count: The number of values for the metric.
:vartype count: int
:ivar average: The average value of the metric.
:vartype average: float
:ivar maximum: The max value of the metric.
:vartype maximum: float
:ivar minimum: The min value of the metric.
:vartype minimum: float
:ivar timestamp: The metric timestamp (ISO-8601 format).
:vartype timestamp: ~datetime.datetime
:ivar total: The total value of the metric.
:vartype total: float
"""
_validation = {
'count': {'readonly': True},
'average': {'readonly': True},
'maximum': {'readonly': True},
'minimum': {'readonly': True},
'timestamp': {'readonly': True},
'total': {'readonly': True},
}
_attribute_map = {
'count': {'key': '_count', 'type': 'int'},
'average': {'key': 'average', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'total': {'key': 'total', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(MetricValue, self).__init__(**kwargs)
self.count = None
self.average = None
self.maximum = None
self.minimum = None
self.timestamp = None
self.total = None
class MongoDBCollectionCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB MongoDB collection.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a MongoDB collection.
:type resource: ~azure.mgmt.cosmosdb.models.MongoDBCollectionResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'MongoDBCollectionResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(MongoDBCollectionCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class MongoDBCollectionGetPropertiesOptions(OptionsResource):
"""MongoDBCollectionGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(MongoDBCollectionGetPropertiesOptions, self).__init__(**kwargs)
class MongoDBCollectionResource(msrest.serialization.Model):
"""Cosmos DB MongoDB collection resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB MongoDB collection.
:type id: str
:param shard_key: A key-value pair of shard keys to be applied for the request.
:type shard_key: dict[str, str]
:param indexes: List of index keys.
:type indexes: list[~azure.mgmt.cosmosdb.models.MongoIndex]
:param analytical_storage_ttl: Analytical TTL.
:type analytical_storage_ttl: int
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'shard_key': {'key': 'shardKey', 'type': '{str}'},
'indexes': {'key': 'indexes', 'type': '[MongoIndex]'},
'analytical_storage_ttl': {'key': 'analyticalStorageTtl', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MongoDBCollectionResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.shard_key = kwargs.get('shard_key', None)
self.indexes = kwargs.get('indexes', None)
self.analytical_storage_ttl = kwargs.get('analytical_storage_ttl', None)
class MongoDBCollectionGetPropertiesResource(ExtendedResourceProperties, MongoDBCollectionResource):
"""MongoDBCollectionGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB MongoDB collection.
:type id: str
:param shard_key: A key-value pair of shard keys to be applied for the request.
:type shard_key: dict[str, str]
:param indexes: List of index keys.
:type indexes: list[~azure.mgmt.cosmosdb.models.MongoIndex]
:param analytical_storage_ttl: Analytical TTL.
:type analytical_storage_ttl: int
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'shard_key': {'key': 'shardKey', 'type': '{str}'},
'indexes': {'key': 'indexes', 'type': '[MongoIndex]'},
'analytical_storage_ttl': {'key': 'analyticalStorageTtl', 'type': 'int'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MongoDBCollectionGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.shard_key = kwargs.get('shard_key', None)
self.indexes = kwargs.get('indexes', None)
self.analytical_storage_ttl = kwargs.get('analytical_storage_ttl', None)
self.rid = None
self.ts = None
self.etag = None
class MongoDBCollectionGetResults(ARMResourceProperties):
"""An Azure Cosmos DB MongoDB collection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.MongoDBCollectionGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.MongoDBCollectionGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'MongoDBCollectionGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'MongoDBCollectionGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(MongoDBCollectionGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class MongoDBCollectionListResult(msrest.serialization.Model):
"""The List operation response, that contains the MongoDB collections and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of MongoDB collections and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.MongoDBCollectionGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MongoDBCollectionGetResults]'},
}
def __init__(
self,
**kwargs
):
super(MongoDBCollectionListResult, self).__init__(**kwargs)
self.value = None
class MongoDBDatabaseCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB MongoDB database.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a MongoDB database.
:type resource: ~azure.mgmt.cosmosdb.models.MongoDBDatabaseResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'MongoDBDatabaseResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDatabaseCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class MongoDBDatabaseGetPropertiesOptions(OptionsResource):
"""MongoDBDatabaseGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDatabaseGetPropertiesOptions, self).__init__(**kwargs)
class MongoDBDatabaseResource(msrest.serialization.Model):
"""Cosmos DB MongoDB database resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB MongoDB database.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDatabaseResource, self).__init__(**kwargs)
self.id = kwargs['id']
class MongoDBDatabaseGetPropertiesResource(ExtendedResourceProperties, MongoDBDatabaseResource):
"""MongoDBDatabaseGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB MongoDB database.
:type id: str
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDatabaseGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.rid = None
self.ts = None
self.etag = None
class MongoDBDatabaseGetResults(ARMResourceProperties):
"""An Azure Cosmos DB MongoDB database.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.MongoDBDatabaseGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.MongoDBDatabaseGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'MongoDBDatabaseGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'MongoDBDatabaseGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDatabaseGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class MongoDBDatabaseListResult(msrest.serialization.Model):
"""The List operation response, that contains the MongoDB databases and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of MongoDB databases and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.MongoDBDatabaseGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MongoDBDatabaseGetResults]'},
}
def __init__(
self,
**kwargs
):
super(MongoDBDatabaseListResult, self).__init__(**kwargs)
self.value = None
class MongoIndex(msrest.serialization.Model):
"""Cosmos DB MongoDB collection index key.
:param key: Cosmos DB MongoDB collection index keys.
:type key: ~azure.mgmt.cosmosdb.models.MongoIndexKeys
:param options: Cosmos DB MongoDB collection index key options.
:type options: ~azure.mgmt.cosmosdb.models.MongoIndexOptions
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'MongoIndexKeys'},
'options': {'key': 'options', 'type': 'MongoIndexOptions'},
}
def __init__(
self,
**kwargs
):
super(MongoIndex, self).__init__(**kwargs)
self.key = kwargs.get('key', None)
self.options = kwargs.get('options', None)
class MongoIndexKeys(msrest.serialization.Model):
"""Cosmos DB MongoDB collection resource object.
:param keys: List of keys for each MongoDB collection in the Azure Cosmos DB service.
:type keys: list[str]
"""
_attribute_map = {
'keys': {'key': 'keys', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MongoIndexKeys, self).__init__(**kwargs)
self.keys = kwargs.get('keys', None)
class MongoIndexOptions(msrest.serialization.Model):
"""Cosmos DB MongoDB collection index options.
:param expire_after_seconds: Expire after seconds.
:type expire_after_seconds: int
:param unique: Is unique or not.
:type unique: bool
"""
_attribute_map = {
'expire_after_seconds': {'key': 'expireAfterSeconds', 'type': 'int'},
'unique': {'key': 'unique', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MongoIndexOptions, self).__init__(**kwargs)
self.expire_after_seconds = kwargs.get('expire_after_seconds', None)
self.unique = kwargs.get('unique', None)
class NotebookWorkspace(ARMProxyResource):
"""A notebook workspace resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:ivar notebook_server_endpoint: Specifies the endpoint of Notebook server.
:vartype notebook_server_endpoint: str
:ivar status: Status of the notebook workspace. Possible values are: Creating, Online,
Deleting, Failed, Updating.
:vartype status: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'notebook_server_endpoint': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'notebook_server_endpoint': {'key': 'properties.notebookServerEndpoint', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NotebookWorkspace, self).__init__(**kwargs)
self.notebook_server_endpoint = None
self.status = None
class NotebookWorkspaceConnectionInfoResult(msrest.serialization.Model):
"""The connection info for the given notebook workspace.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar auth_token: Specifies auth token used for connecting to Notebook server (uses token-based
auth).
:vartype auth_token: str
:ivar notebook_server_endpoint: Specifies the endpoint of Notebook server.
:vartype notebook_server_endpoint: str
"""
_validation = {
'auth_token': {'readonly': True},
'notebook_server_endpoint': {'readonly': True},
}
_attribute_map = {
'auth_token': {'key': 'authToken', 'type': 'str'},
'notebook_server_endpoint': {'key': 'notebookServerEndpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NotebookWorkspaceConnectionInfoResult, self).__init__(**kwargs)
self.auth_token = None
self.notebook_server_endpoint = None
class NotebookWorkspaceCreateUpdateParameters(ARMProxyResource):
"""Parameters to create a notebook workspace resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NotebookWorkspaceCreateUpdateParameters, self).__init__(**kwargs)
class NotebookWorkspaceListResult(msrest.serialization.Model):
"""A list of notebook workspace resources.
:param value: Array of notebook workspace resources.
:type value: list[~azure.mgmt.cosmosdb.models.NotebookWorkspace]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[NotebookWorkspace]'},
}
def __init__(
self,
**kwargs
):
super(NotebookWorkspaceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class Operation(msrest.serialization.Model):
"""REST API operation.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.cosmosdb.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.ResourceProvider.
:type provider: str
:param resource: Resource on which the operation is performed: Profile, endpoint, etc.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Description of operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'Provider', 'type': 'str'},
'resource': {'key': 'Resource', 'type': 'str'},
'operation': {'key': 'Operation', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results.
:param value: List of operations supported by the Resource Provider.
:type value: list[~azure.mgmt.cosmosdb.models.Operation]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PartitionMetric(Metric):
"""The metric values for a single partition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar start_time: The start time for the metric (ISO-8601 format).
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time for the metric (ISO-8601 format).
:vartype end_time: ~datetime.datetime
:ivar time_grain: The time grain to be used to summarize the metric values.
:vartype time_grain: str
:ivar unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"Percent", "CountPerSecond", "BytesPerSecond", "Milliseconds".
:vartype unit: str or ~azure.mgmt.cosmosdb.models.UnitType
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.cosmosdb.models.MetricName
:ivar metric_values: The metric values for the specified time window and timestep.
:vartype metric_values: list[~azure.mgmt.cosmosdb.models.MetricValue]
:ivar partition_id: The partition id (GUID identifier) of the metric values.
:vartype partition_id: str
:ivar partition_key_range_id: The partition key range id (integer identifier) of the metric
values.
:vartype partition_key_range_id: str
"""
_validation = {
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'time_grain': {'readonly': True},
'unit': {'readonly': True},
'name': {'readonly': True},
'metric_values': {'readonly': True},
'partition_id': {'readonly': True},
'partition_key_range_id': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'MetricName'},
'metric_values': {'key': 'metricValues', 'type': '[MetricValue]'},
'partition_id': {'key': 'partitionId', 'type': 'str'},
'partition_key_range_id': {'key': 'partitionKeyRangeId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PartitionMetric, self).__init__(**kwargs)
self.partition_id = None
self.partition_key_range_id = None
class PartitionMetricListResult(msrest.serialization.Model):
"""The response to a list partition metrics request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of partition-level metrics for the account.
:vartype value: list[~azure.mgmt.cosmosdb.models.PartitionMetric]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PartitionMetric]'},
}
def __init__(
self,
**kwargs
):
super(PartitionMetricListResult, self).__init__(**kwargs)
self.value = None
class Usage(msrest.serialization.Model):
"""The usage data for a usage request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"Percent", "CountPerSecond", "BytesPerSecond", "Milliseconds".
:vartype unit: str or ~azure.mgmt.cosmosdb.models.UnitType
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.cosmosdb.models.MetricName
:ivar quota_period: The quota period used to summarize the usage values.
:vartype quota_period: str
:ivar limit: Maximum value for this metric.
:vartype limit: long
:ivar current_value: Current value for this metric.
:vartype current_value: long
"""
_validation = {
'unit': {'readonly': True},
'name': {'readonly': True},
'quota_period': {'readonly': True},
'limit': {'readonly': True},
'current_value': {'readonly': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'MetricName'},
'quota_period': {'key': 'quotaPeriod', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'current_value': {'key': 'currentValue', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.unit = None
self.name = None
self.quota_period = None
self.limit = None
self.current_value = None
class PartitionUsage(Usage):
"""The partition level usage data for a usage request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"Percent", "CountPerSecond", "BytesPerSecond", "Milliseconds".
:vartype unit: str or ~azure.mgmt.cosmosdb.models.UnitType
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.cosmosdb.models.MetricName
:ivar quota_period: The quota period used to summarize the usage values.
:vartype quota_period: str
:ivar limit: Maximum value for this metric.
:vartype limit: long
:ivar current_value: Current value for this metric.
:vartype current_value: long
:ivar partition_id: The partition id (GUID identifier) of the usages.
:vartype partition_id: str
:ivar partition_key_range_id: The partition key range id (integer identifier) of the usages.
:vartype partition_key_range_id: str
"""
_validation = {
'unit': {'readonly': True},
'name': {'readonly': True},
'quota_period': {'readonly': True},
'limit': {'readonly': True},
'current_value': {'readonly': True},
'partition_id': {'readonly': True},
'partition_key_range_id': {'readonly': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'MetricName'},
'quota_period': {'key': 'quotaPeriod', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'partition_id': {'key': 'partitionId', 'type': 'str'},
'partition_key_range_id': {'key': 'partitionKeyRangeId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PartitionUsage, self).__init__(**kwargs)
self.partition_id = None
self.partition_key_range_id = None
class PartitionUsagesResult(msrest.serialization.Model):
"""The response to a list partition level usage request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of partition-level usages for the database. A usage is a point in time
metric.
:vartype value: list[~azure.mgmt.cosmosdb.models.PartitionUsage]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PartitionUsage]'},
}
def __init__(
self,
**kwargs
):
super(PartitionUsagesResult, self).__init__(**kwargs)
self.value = None
class PercentileMetric(msrest.serialization.Model):
"""Percentile Metric data.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar start_time: The start time for the metric (ISO-8601 format).
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time for the metric (ISO-8601 format).
:vartype end_time: ~datetime.datetime
:ivar time_grain: The time grain to be used to summarize the metric values.
:vartype time_grain: str
:ivar unit: The unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"Percent", "CountPerSecond", "BytesPerSecond", "Milliseconds".
:vartype unit: str or ~azure.mgmt.cosmosdb.models.UnitType
:ivar name: The name information for the metric.
:vartype name: ~azure.mgmt.cosmosdb.models.MetricName
:ivar metric_values: The percentile metric values for the specified time window and timestep.
:vartype metric_values: list[~azure.mgmt.cosmosdb.models.PercentileMetricValue]
"""
_validation = {
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'time_grain': {'readonly': True},
'unit': {'readonly': True},
'name': {'readonly': True},
'metric_values': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'name': {'key': 'name', 'type': 'MetricName'},
'metric_values': {'key': 'metricValues', 'type': '[PercentileMetricValue]'},
}
def __init__(
self,
**kwargs
):
super(PercentileMetric, self).__init__(**kwargs)
self.start_time = None
self.end_time = None
self.time_grain = None
self.unit = None
self.name = None
self.metric_values = None
class PercentileMetricListResult(msrest.serialization.Model):
"""The response to a list percentile metrics request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of percentile metrics for the account.
:vartype value: list[~azure.mgmt.cosmosdb.models.PercentileMetric]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PercentileMetric]'},
}
def __init__(
self,
**kwargs
):
super(PercentileMetricListResult, self).__init__(**kwargs)
self.value = None
class PercentileMetricValue(MetricValue):
"""Represents percentile metrics values.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar count: The number of values for the metric.
:vartype count: int
:ivar average: The average value of the metric.
:vartype average: float
:ivar maximum: The max value of the metric.
:vartype maximum: float
:ivar minimum: The min value of the metric.
:vartype minimum: float
:ivar timestamp: The metric timestamp (ISO-8601 format).
:vartype timestamp: ~datetime.datetime
:ivar total: The total value of the metric.
:vartype total: float
:ivar p10: The 10th percentile value for the metric.
:vartype p10: float
:ivar p25: The 25th percentile value for the metric.
:vartype p25: float
:ivar p50: The 50th percentile value for the metric.
:vartype p50: float
:ivar p75: The 75th percentile value for the metric.
:vartype p75: float
:ivar p90: The 90th percentile value for the metric.
:vartype p90: float
:ivar p95: The 95th percentile value for the metric.
:vartype p95: float
:ivar p99: The 99th percentile value for the metric.
:vartype p99: float
"""
_validation = {
'count': {'readonly': True},
'average': {'readonly': True},
'maximum': {'readonly': True},
'minimum': {'readonly': True},
'timestamp': {'readonly': True},
'total': {'readonly': True},
'p10': {'readonly': True},
'p25': {'readonly': True},
'p50': {'readonly': True},
'p75': {'readonly': True},
'p90': {'readonly': True},
'p95': {'readonly': True},
'p99': {'readonly': True},
}
_attribute_map = {
'count': {'key': '_count', 'type': 'int'},
'average': {'key': 'average', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'total': {'key': 'total', 'type': 'float'},
'p10': {'key': 'P10', 'type': 'float'},
'p25': {'key': 'P25', 'type': 'float'},
'p50': {'key': 'P50', 'type': 'float'},
'p75': {'key': 'P75', 'type': 'float'},
'p90': {'key': 'P90', 'type': 'float'},
'p95': {'key': 'P95', 'type': 'float'},
'p99': {'key': 'P99', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(PercentileMetricValue, self).__init__(**kwargs)
self.p10 = None
self.p25 = None
self.p50 = None
self.p75 = None
self.p90 = None
self.p95 = None
self.p99 = None
class PeriodicModeBackupPolicy(BackupPolicy):
"""The object representing periodic mode backup policy.
All required parameters must be populated in order to send to Azure.
:param type: Required. Describes the mode of backups.Constant filled by server. Possible
values include: "Periodic", "Continuous".
:type type: str or ~azure.mgmt.cosmosdb.models.BackupPolicyType
:param migration_state: The object representing the state of the migration between the backup
policies.
:type migration_state: ~azure.mgmt.cosmosdb.models.BackupPolicyMigrationState
:param periodic_mode_properties: Configuration values for periodic mode backup.
:type periodic_mode_properties: ~azure.mgmt.cosmosdb.models.PeriodicModeProperties
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'migration_state': {'key': 'migrationState', 'type': 'BackupPolicyMigrationState'},
'periodic_mode_properties': {'key': 'periodicModeProperties', 'type': 'PeriodicModeProperties'},
}
def __init__(
self,
**kwargs
):
super(PeriodicModeBackupPolicy, self).__init__(**kwargs)
self.type = 'Periodic' # type: str
self.periodic_mode_properties = kwargs.get('periodic_mode_properties', None)
class PeriodicModeProperties(msrest.serialization.Model):
"""Configuration values for periodic mode backup.
:param backup_interval_in_minutes: An integer representing the interval in minutes between two
backups.
:type backup_interval_in_minutes: int
:param backup_retention_interval_in_hours: An integer representing the time (in hours) that
each backup is retained.
:type backup_retention_interval_in_hours: int
:param backup_storage_redundancy: Enum to indicate type of backup residency. Possible values
include: "Geo", "Local", "Zone".
:type backup_storage_redundancy: str or ~azure.mgmt.cosmosdb.models.BackupStorageRedundancy
"""
_validation = {
'backup_interval_in_minutes': {'minimum': 0},
'backup_retention_interval_in_hours': {'minimum': 0},
}
_attribute_map = {
'backup_interval_in_minutes': {'key': 'backupIntervalInMinutes', 'type': 'int'},
'backup_retention_interval_in_hours': {'key': 'backupRetentionIntervalInHours', 'type': 'int'},
'backup_storage_redundancy': {'key': 'backupStorageRedundancy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PeriodicModeProperties, self).__init__(**kwargs)
self.backup_interval_in_minutes = kwargs.get('backup_interval_in_minutes', None)
self.backup_retention_interval_in_hours = kwargs.get('backup_retention_interval_in_hours', None)
self.backup_storage_redundancy = kwargs.get('backup_storage_redundancy', None)
class Permission(msrest.serialization.Model):
"""The set of data plane operations permitted through this Role Definition.
:param data_actions: An array of data actions that are allowed.
:type data_actions: list[str]
:param not_data_actions: An array of data actions that are denied.
:type not_data_actions: list[str]
"""
_attribute_map = {
'data_actions': {'key': 'dataActions', 'type': '[str]'},
'not_data_actions': {'key': 'notDataActions', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(Permission, self).__init__(**kwargs)
self.data_actions = kwargs.get('data_actions', None)
self.not_data_actions = kwargs.get('not_data_actions', None)
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class PrivateEndpointConnection(ProxyResource):
"""A private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param private_endpoint: Private endpoint which the connection belongs to.
:type private_endpoint: ~azure.mgmt.cosmosdb.models.PrivateEndpointProperty
:param private_link_service_connection_state: Connection State of the Private Endpoint
Connection.
:type private_link_service_connection_state:
~azure.mgmt.cosmosdb.models.PrivateLinkServiceConnectionStateProperty
:param group_id: Group id of the private endpoint.
:type group_id: str
:param provisioning_state: Provisioning state of the private endpoint.
:type provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpointProperty'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionStateProperty'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.group_id = kwargs.get('group_id', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""A list of private endpoint connections.
:param value: Array of private endpoint connections.
:type value: list[~azure.mgmt.cosmosdb.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateEndpointProperty(msrest.serialization.Model):
"""Private endpoint which the connection belongs to.
:param id: Resource id of the private endpoint.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointProperty, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class PrivateLinkResource(ARMProxyResource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:ivar required_zone_names: The private link resource required zone names.
:vartype required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
'required_zone_names': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = None
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.cosmosdb.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionStateProperty(msrest.serialization.Model):
"""Connection State of the Private Endpoint Connection.
Variables are only populated by the server, and will be ignored when sending a request.
:param status: The private link service connection status.
:type status: str
:param description: The private link service connection description.
:type description: str
:ivar actions_required: Any action that is required beyond basic workflow (approve/ reject/
disconnect).
:vartype actions_required: str
"""
_validation = {
'actions_required': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionStateProperty, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = None
class RegionForOnlineOffline(msrest.serialization.Model):
"""Cosmos DB region to online or offline.
All required parameters must be populated in order to send to Azure.
:param region: Required. Cosmos DB region, with spaces between words and each word capitalized.
:type region: str
"""
_validation = {
'region': {'required': True},
}
_attribute_map = {
'region': {'key': 'region', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RegionForOnlineOffline, self).__init__(**kwargs)
self.region = kwargs['region']
class RestorableDatabaseAccountGetResult(msrest.serialization.Model):
"""A Azure Cosmos DB restorable database account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param account_name: The name of the global database account.
:type account_name: str
:param creation_time: The creation time of the restorable database account (ISO-8601 format).
:type creation_time: ~datetime.datetime
:param deletion_time: The time at which the restorable database account has been deleted
(ISO-8601 format).
:type deletion_time: ~datetime.datetime
:ivar api_type: The API type of the restorable database account. Possible values include:
"MongoDB", "Gremlin", "Cassandra", "Table", "Sql", "GremlinV2".
:vartype api_type: str or ~azure.mgmt.cosmosdb.models.ApiType
:ivar restorable_locations: List of regions where the of the database account can be restored
from.
:vartype restorable_locations: list[~azure.mgmt.cosmosdb.models.RestorableLocationResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'api_type': {'readonly': True},
'restorable_locations': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'account_name': {'key': 'properties.accountName', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'deletion_time': {'key': 'properties.deletionTime', 'type': 'iso-8601'},
'api_type': {'key': 'properties.apiType', 'type': 'str'},
'restorable_locations': {'key': 'properties.restorableLocations', 'type': '[RestorableLocationResource]'},
}
def __init__(
self,
**kwargs
):
super(RestorableDatabaseAccountGetResult, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.account_name = kwargs.get('account_name', None)
self.creation_time = kwargs.get('creation_time', None)
self.deletion_time = kwargs.get('deletion_time', None)
self.api_type = None
self.restorable_locations = None
class RestorableDatabaseAccountsListResult(msrest.serialization.Model):
"""The List operation response, that contains the restorable database accounts and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of restorable database accounts and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.RestorableDatabaseAccountGetResult]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RestorableDatabaseAccountGetResult]'},
}
def __init__(
self,
**kwargs
):
super(RestorableDatabaseAccountsListResult, self).__init__(**kwargs)
self.value = None
class RestorableLocationResource(msrest.serialization.Model):
"""Properties of the regional restorable account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location_name: The location of the regional restorable account.
:vartype location_name: str
:ivar regional_database_account_instance_id: The instance id of the regional restorable
account.
:vartype regional_database_account_instance_id: str
:ivar creation_time: The creation time of the regional restorable database account (ISO-8601
format).
:vartype creation_time: ~datetime.datetime
:ivar deletion_time: The time at which the regional restorable database account has been
deleted (ISO-8601 format).
:vartype deletion_time: ~datetime.datetime
"""
_validation = {
'location_name': {'readonly': True},
'regional_database_account_instance_id': {'readonly': True},
'creation_time': {'readonly': True},
'deletion_time': {'readonly': True},
}
_attribute_map = {
'location_name': {'key': 'locationName', 'type': 'str'},
'regional_database_account_instance_id': {'key': 'regionalDatabaseAccountInstanceId', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'deletion_time': {'key': 'deletionTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(RestorableLocationResource, self).__init__(**kwargs)
self.location_name = None
self.regional_database_account_instance_id = None
self.creation_time = None
self.deletion_time = None
class RestorableMongodbCollectionGetResult(msrest.serialization.Model):
"""An Azure Cosmos DB MongoDB collection event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource Identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param resource: The resource of an Azure Cosmos DB MongoDB collection event.
:type resource: ~azure.mgmt.cosmosdb.models.RestorableMongodbCollectionPropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource': {'key': 'properties.resource', 'type': 'RestorableMongodbCollectionPropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(RestorableMongodbCollectionGetResult, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.resource = kwargs.get('resource', None)
class RestorableMongodbCollectionPropertiesResource(msrest.serialization.Model):
"""The resource of an Azure Cosmos DB MongoDB collection event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar operation_type: The operation type of this collection event. Possible values include:
"Create", "Replace", "Delete", "SystemOperation".
:vartype operation_type: str or ~azure.mgmt.cosmosdb.models.OperationType
:ivar event_timestamp: The time when this collection event happened.
:vartype event_timestamp: str
:ivar owner_id: The name of this MongoDB collection.
:vartype owner_id: str
:ivar owner_resource_id: The resource ID of this MongoDB collection.
:vartype owner_resource_id: str
"""
_validation = {
'rid': {'readonly': True},
'operation_type': {'readonly': True},
'event_timestamp': {'readonly': True},
'owner_id': {'readonly': True},
'owner_resource_id': {'readonly': True},
}
_attribute_map = {
'rid': {'key': '_rid', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'event_timestamp': {'key': 'eventTimestamp', 'type': 'str'},
'owner_id': {'key': 'ownerId', 'type': 'str'},
'owner_resource_id': {'key': 'ownerResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RestorableMongodbCollectionPropertiesResource, self).__init__(**kwargs)
self.rid = None
self.operation_type = None
self.event_timestamp = None
self.owner_id = None
self.owner_resource_id = None
class RestorableMongodbCollectionsListResult(msrest.serialization.Model):
"""The List operation response, that contains the MongoDB collection events and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of MongoDB collection events and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.RestorableMongodbCollectionGetResult]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RestorableMongodbCollectionGetResult]'},
}
def __init__(
self,
**kwargs
):
super(RestorableMongodbCollectionsListResult, self).__init__(**kwargs)
self.value = None
class RestorableMongodbDatabaseGetResult(msrest.serialization.Model):
"""An Azure Cosmos DB MongoDB database event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource Identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param resource: The resource of an Azure Cosmos DB MongoDB database event.
:type resource: ~azure.mgmt.cosmosdb.models.RestorableMongodbDatabasePropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource': {'key': 'properties.resource', 'type': 'RestorableMongodbDatabasePropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(RestorableMongodbDatabaseGetResult, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.resource = kwargs.get('resource', None)
class RestorableMongodbDatabasePropertiesResource(msrest.serialization.Model):
"""The resource of an Azure Cosmos DB MongoDB database event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar operation_type: The operation type of this database event. Possible values include:
"Create", "Replace", "Delete", "SystemOperation".
:vartype operation_type: str or ~azure.mgmt.cosmosdb.models.OperationType
:ivar event_timestamp: The time when this database event happened.
:vartype event_timestamp: str
:ivar owner_id: The name of this MongoDB database.
:vartype owner_id: str
:ivar owner_resource_id: The resource ID of this MongoDB database.
:vartype owner_resource_id: str
"""
_validation = {
'rid': {'readonly': True},
'operation_type': {'readonly': True},
'event_timestamp': {'readonly': True},
'owner_id': {'readonly': True},
'owner_resource_id': {'readonly': True},
}
_attribute_map = {
'rid': {'key': '_rid', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'event_timestamp': {'key': 'eventTimestamp', 'type': 'str'},
'owner_id': {'key': 'ownerId', 'type': 'str'},
'owner_resource_id': {'key': 'ownerResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RestorableMongodbDatabasePropertiesResource, self).__init__(**kwargs)
self.rid = None
self.operation_type = None
self.event_timestamp = None
self.owner_id = None
self.owner_resource_id = None
class RestorableMongodbDatabasesListResult(msrest.serialization.Model):
"""The List operation response, that contains the MongoDB database events and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of MongoDB database events and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.RestorableMongodbDatabaseGetResult]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RestorableMongodbDatabaseGetResult]'},
}
def __init__(
self,
**kwargs
):
super(RestorableMongodbDatabasesListResult, self).__init__(**kwargs)
self.value = None
class RestorableMongodbResourcesListResult(msrest.serialization.Model):
"""The List operation response, that contains the restorable MongoDB resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of restorable MongoDB resources, including the database and collection names.
:vartype value: list[~azure.mgmt.cosmosdb.models.DatabaseRestoreResource]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DatabaseRestoreResource]'},
}
def __init__(
self,
**kwargs
):
super(RestorableMongodbResourcesListResult, self).__init__(**kwargs)
self.value = None
class RestorableSqlContainerGetResult(msrest.serialization.Model):
"""An Azure Cosmos DB SQL container event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource Identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param resource: The resource of an Azure Cosmos DB SQL container event.
:type resource: ~azure.mgmt.cosmosdb.models.RestorableSqlContainerPropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource': {'key': 'properties.resource', 'type': 'RestorableSqlContainerPropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlContainerGetResult, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.resource = kwargs.get('resource', None)
class RestorableSqlContainerPropertiesResource(msrest.serialization.Model):
"""The resource of an Azure Cosmos DB SQL container event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar operation_type: The operation type of this container event. Possible values include:
"Create", "Replace", "Delete", "SystemOperation".
:vartype operation_type: str or ~azure.mgmt.cosmosdb.models.OperationType
:ivar event_timestamp: The when this container event happened.
:vartype event_timestamp: str
:ivar owner_id: The name of this SQL container.
:vartype owner_id: str
:ivar owner_resource_id: The resource ID of this SQL container.
:vartype owner_resource_id: str
:param container: Cosmos DB SQL container resource object.
:type container: ~azure.mgmt.cosmosdb.models.RestorableSqlContainerPropertiesResourceContainer
"""
_validation = {
'rid': {'readonly': True},
'operation_type': {'readonly': True},
'event_timestamp': {'readonly': True},
'owner_id': {'readonly': True},
'owner_resource_id': {'readonly': True},
}
_attribute_map = {
'rid': {'key': '_rid', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'event_timestamp': {'key': 'eventTimestamp', 'type': 'str'},
'owner_id': {'key': 'ownerId', 'type': 'str'},
'owner_resource_id': {'key': 'ownerResourceId', 'type': 'str'},
'container': {'key': 'container', 'type': 'RestorableSqlContainerPropertiesResourceContainer'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlContainerPropertiesResource, self).__init__(**kwargs)
self.rid = None
self.operation_type = None
self.event_timestamp = None
self.owner_id = None
self.owner_resource_id = None
self.container = kwargs.get('container', None)
class SqlContainerResource(msrest.serialization.Model):
"""Cosmos DB SQL container resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL container.
:type id: str
:param indexing_policy: The configuration of the indexing policy. By default, the indexing is
automatic for all document paths within the container.
:type indexing_policy: ~azure.mgmt.cosmosdb.models.IndexingPolicy
:param partition_key: The configuration of the partition key to be used for partitioning data
into multiple partitions.
:type partition_key: ~azure.mgmt.cosmosdb.models.ContainerPartitionKey
:param default_ttl: Default time to live.
:type default_ttl: int
:param unique_key_policy: The unique key policy configuration for specifying uniqueness
constraints on documents in the collection in the Azure Cosmos DB service.
:type unique_key_policy: ~azure.mgmt.cosmosdb.models.UniqueKeyPolicy
:param conflict_resolution_policy: The conflict resolution policy for the container.
:type conflict_resolution_policy: ~azure.mgmt.cosmosdb.models.ConflictResolutionPolicy
:param analytical_storage_ttl: Analytical TTL.
:type analytical_storage_ttl: long
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'indexing_policy': {'key': 'indexingPolicy', 'type': 'IndexingPolicy'},
'partition_key': {'key': 'partitionKey', 'type': 'ContainerPartitionKey'},
'default_ttl': {'key': 'defaultTtl', 'type': 'int'},
'unique_key_policy': {'key': 'uniqueKeyPolicy', 'type': 'UniqueKeyPolicy'},
'conflict_resolution_policy': {'key': 'conflictResolutionPolicy', 'type': 'ConflictResolutionPolicy'},
'analytical_storage_ttl': {'key': 'analyticalStorageTtl', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(SqlContainerResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.indexing_policy = kwargs.get('indexing_policy', None)
self.partition_key = kwargs.get('partition_key', None)
self.default_ttl = kwargs.get('default_ttl', None)
self.unique_key_policy = kwargs.get('unique_key_policy', None)
self.conflict_resolution_policy = kwargs.get('conflict_resolution_policy', None)
self.analytical_storage_ttl = kwargs.get('analytical_storage_ttl', None)
class RestorableSqlContainerPropertiesResourceContainer(ExtendedResourceProperties, SqlContainerResource):
"""Cosmos DB SQL container resource object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL container.
:type id: str
:param indexing_policy: The configuration of the indexing policy. By default, the indexing is
automatic for all document paths within the container.
:type indexing_policy: ~azure.mgmt.cosmosdb.models.IndexingPolicy
:param partition_key: The configuration of the partition key to be used for partitioning data
into multiple partitions.
:type partition_key: ~azure.mgmt.cosmosdb.models.ContainerPartitionKey
:param default_ttl: Default time to live.
:type default_ttl: int
:param unique_key_policy: The unique key policy configuration for specifying uniqueness
constraints on documents in the collection in the Azure Cosmos DB service.
:type unique_key_policy: ~azure.mgmt.cosmosdb.models.UniqueKeyPolicy
:param conflict_resolution_policy: The conflict resolution policy for the container.
:type conflict_resolution_policy: ~azure.mgmt.cosmosdb.models.ConflictResolutionPolicy
:param analytical_storage_ttl: Analytical TTL.
:type analytical_storage_ttl: long
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
:ivar self_property: A system generated property that specifies the addressable path of the
container resource.
:vartype self_property: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
'self_property': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'indexing_policy': {'key': 'indexingPolicy', 'type': 'IndexingPolicy'},
'partition_key': {'key': 'partitionKey', 'type': 'ContainerPartitionKey'},
'default_ttl': {'key': 'defaultTtl', 'type': 'int'},
'unique_key_policy': {'key': 'uniqueKeyPolicy', 'type': 'UniqueKeyPolicy'},
'conflict_resolution_policy': {'key': 'conflictResolutionPolicy', 'type': 'ConflictResolutionPolicy'},
'analytical_storage_ttl': {'key': 'analyticalStorageTtl', 'type': 'long'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
'self_property': {'key': '_self', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlContainerPropertiesResourceContainer, self).__init__(**kwargs)
self.id = kwargs['id']
self.indexing_policy = kwargs.get('indexing_policy', None)
self.partition_key = kwargs.get('partition_key', None)
self.default_ttl = kwargs.get('default_ttl', None)
self.unique_key_policy = kwargs.get('unique_key_policy', None)
self.conflict_resolution_policy = kwargs.get('conflict_resolution_policy', None)
self.analytical_storage_ttl = kwargs.get('analytical_storage_ttl', None)
self.self_property = None
self.rid = None
self.ts = None
self.etag = None
self.self_property = None
class RestorableSqlContainersListResult(msrest.serialization.Model):
"""The List operation response, that contains the SQL container events and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of SQL container events and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.RestorableSqlContainerGetResult]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RestorableSqlContainerGetResult]'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlContainersListResult, self).__init__(**kwargs)
self.value = None
class RestorableSqlDatabaseGetResult(msrest.serialization.Model):
"""An Azure Cosmos DB SQL database event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource Identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param resource: The resource of an Azure Cosmos DB SQL database event.
:type resource: ~azure.mgmt.cosmosdb.models.RestorableSqlDatabasePropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource': {'key': 'properties.resource', 'type': 'RestorableSqlDatabasePropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlDatabaseGetResult, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.resource = kwargs.get('resource', None)
class RestorableSqlDatabasePropertiesResource(msrest.serialization.Model):
"""The resource of an Azure Cosmos DB SQL database event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar operation_type: The operation type of this database event. Possible values include:
"Create", "Replace", "Delete", "SystemOperation".
:vartype operation_type: str or ~azure.mgmt.cosmosdb.models.OperationType
:ivar event_timestamp: The time when this database event happened.
:vartype event_timestamp: str
:ivar owner_id: The name of the SQL database.
:vartype owner_id: str
:ivar owner_resource_id: The resource ID of the SQL database.
:vartype owner_resource_id: str
:param database: Cosmos DB SQL database resource object.
:type database: ~azure.mgmt.cosmosdb.models.RestorableSqlDatabasePropertiesResourceDatabase
"""
_validation = {
'rid': {'readonly': True},
'operation_type': {'readonly': True},
'event_timestamp': {'readonly': True},
'owner_id': {'readonly': True},
'owner_resource_id': {'readonly': True},
}
_attribute_map = {
'rid': {'key': '_rid', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'event_timestamp': {'key': 'eventTimestamp', 'type': 'str'},
'owner_id': {'key': 'ownerId', 'type': 'str'},
'owner_resource_id': {'key': 'ownerResourceId', 'type': 'str'},
'database': {'key': 'database', 'type': 'RestorableSqlDatabasePropertiesResourceDatabase'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlDatabasePropertiesResource, self).__init__(**kwargs)
self.rid = None
self.operation_type = None
self.event_timestamp = None
self.owner_id = None
self.owner_resource_id = None
self.database = kwargs.get('database', None)
class SqlDatabaseResource(msrest.serialization.Model):
"""Cosmos DB SQL database resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL database.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlDatabaseResource, self).__init__(**kwargs)
self.id = kwargs['id']
class RestorableSqlDatabasePropertiesResourceDatabase(SqlDatabaseResource, ExtendedResourceProperties):
"""Cosmos DB SQL database resource object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
:param id: Required. Name of the Cosmos DB SQL database.
:type id: str
:ivar colls: A system generated property that specified the addressable path of the collections
resource.
:vartype colls: str
:ivar users: A system generated property that specifies the addressable path of the users
resource.
:vartype users: str
:ivar self_property: A system generated property that specifies the addressable path of the
database resource.
:vartype self_property: str
"""
_validation = {
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
'id': {'required': True},
'colls': {'readonly': True},
'users': {'readonly': True},
'self_property': {'readonly': True},
}
_attribute_map = {
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'colls': {'key': '_colls', 'type': 'str'},
'users': {'key': '_users', 'type': 'str'},
'self_property': {'key': '_self', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlDatabasePropertiesResourceDatabase, self).__init__(**kwargs)
self.rid = None
self.ts = None
self.etag = None
self.colls = None
self.users = None
self.self_property = None
self.id = kwargs['id']
self.colls = None
self.users = None
self.self_property = None
class RestorableSqlDatabasesListResult(msrest.serialization.Model):
"""The List operation response, that contains the SQL database events and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of SQL database events and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.RestorableSqlDatabaseGetResult]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RestorableSqlDatabaseGetResult]'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlDatabasesListResult, self).__init__(**kwargs)
self.value = None
class RestorableSqlResourcesListResult(msrest.serialization.Model):
"""The List operation response, that contains the restorable SQL resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of restorable SQL resources, including the database and collection names.
:vartype value: list[~azure.mgmt.cosmosdb.models.DatabaseRestoreResource]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DatabaseRestoreResource]'},
}
def __init__(
self,
**kwargs
):
super(RestorableSqlResourcesListResult, self).__init__(**kwargs)
self.value = None
class RestoreParameters(msrest.serialization.Model):
"""Parameters to indicate the information about the restore.
:param restore_mode: Describes the mode of the restore. Possible values include: "PointInTime".
:type restore_mode: str or ~azure.mgmt.cosmosdb.models.RestoreMode
:param restore_source: The id of the restorable database account from which the restore has to
be initiated. For example:
/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts/{restorableDatabaseAccountName}.
:type restore_source: str
:param restore_timestamp_in_utc: Time to which the account has to be restored (ISO-8601
format).
:type restore_timestamp_in_utc: ~datetime.datetime
:param databases_to_restore: List of specific databases available for restore.
:type databases_to_restore: list[~azure.mgmt.cosmosdb.models.DatabaseRestoreResource]
"""
_attribute_map = {
'restore_mode': {'key': 'restoreMode', 'type': 'str'},
'restore_source': {'key': 'restoreSource', 'type': 'str'},
'restore_timestamp_in_utc': {'key': 'restoreTimestampInUtc', 'type': 'iso-8601'},
'databases_to_restore': {'key': 'databasesToRestore', 'type': '[DatabaseRestoreResource]'},
}
def __init__(
self,
**kwargs
):
super(RestoreParameters, self).__init__(**kwargs)
self.restore_mode = kwargs.get('restore_mode', None)
self.restore_source = kwargs.get('restore_source', None)
self.restore_timestamp_in_utc = kwargs.get('restore_timestamp_in_utc', None)
self.databases_to_restore = kwargs.get('databases_to_restore', None)
class SeedNode(msrest.serialization.Model):
"""SeedNode.
:param ip_address: IP address of this seed node.
:type ip_address: str
"""
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SeedNode, self).__init__(**kwargs)
self.ip_address = kwargs.get('ip_address', None)
class SpatialSpec(msrest.serialization.Model):
"""SpatialSpec.
:param path: The path for which the indexing behavior applies to. Index paths typically start
with root and end with wildcard (/path/*).
:type path: str
:param types: List of path's spatial type.
:type types: list[str or ~azure.mgmt.cosmosdb.models.SpatialType]
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'types': {'key': 'types', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(SpatialSpec, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
self.types = kwargs.get('types', None)
class SqlContainerCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB container.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a container.
:type resource: ~azure.mgmt.cosmosdb.models.SqlContainerResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlContainerResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(SqlContainerCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class SqlContainerGetPropertiesOptions(OptionsResource):
"""SqlContainerGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(SqlContainerGetPropertiesOptions, self).__init__(**kwargs)
class SqlContainerGetPropertiesResource(ExtendedResourceProperties, SqlContainerResource):
"""SqlContainerGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL container.
:type id: str
:param indexing_policy: The configuration of the indexing policy. By default, the indexing is
automatic for all document paths within the container.
:type indexing_policy: ~azure.mgmt.cosmosdb.models.IndexingPolicy
:param partition_key: The configuration of the partition key to be used for partitioning data
into multiple partitions.
:type partition_key: ~azure.mgmt.cosmosdb.models.ContainerPartitionKey
:param default_ttl: Default time to live.
:type default_ttl: int
:param unique_key_policy: The unique key policy configuration for specifying uniqueness
constraints on documents in the collection in the Azure Cosmos DB service.
:type unique_key_policy: ~azure.mgmt.cosmosdb.models.UniqueKeyPolicy
:param conflict_resolution_policy: The conflict resolution policy for the container.
:type conflict_resolution_policy: ~azure.mgmt.cosmosdb.models.ConflictResolutionPolicy
:param analytical_storage_ttl: Analytical TTL.
:type analytical_storage_ttl: long
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'indexing_policy': {'key': 'indexingPolicy', 'type': 'IndexingPolicy'},
'partition_key': {'key': 'partitionKey', 'type': 'ContainerPartitionKey'},
'default_ttl': {'key': 'defaultTtl', 'type': 'int'},
'unique_key_policy': {'key': 'uniqueKeyPolicy', 'type': 'UniqueKeyPolicy'},
'conflict_resolution_policy': {'key': 'conflictResolutionPolicy', 'type': 'ConflictResolutionPolicy'},
'analytical_storage_ttl': {'key': 'analyticalStorageTtl', 'type': 'long'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlContainerGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.indexing_policy = kwargs.get('indexing_policy', None)
self.partition_key = kwargs.get('partition_key', None)
self.default_ttl = kwargs.get('default_ttl', None)
self.unique_key_policy = kwargs.get('unique_key_policy', None)
self.conflict_resolution_policy = kwargs.get('conflict_resolution_policy', None)
self.analytical_storage_ttl = kwargs.get('analytical_storage_ttl', None)
self.rid = None
self.ts = None
self.etag = None
class SqlContainerGetResults(ARMResourceProperties):
"""An Azure Cosmos DB container.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.SqlContainerGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.SqlContainerGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlContainerGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'SqlContainerGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(SqlContainerGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class SqlContainerListResult(msrest.serialization.Model):
"""The List operation response, that contains the containers and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of containers and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.SqlContainerGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SqlContainerGetResults]'},
}
def __init__(
self,
**kwargs
):
super(SqlContainerListResult, self).__init__(**kwargs)
self.value = None
class SqlDatabaseCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB SQL database.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a SQL database.
:type resource: ~azure.mgmt.cosmosdb.models.SqlDatabaseResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlDatabaseResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(SqlDatabaseCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class SqlDatabaseGetPropertiesOptions(OptionsResource):
"""SqlDatabaseGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(SqlDatabaseGetPropertiesOptions, self).__init__(**kwargs)
class SqlDatabaseGetPropertiesResource(SqlDatabaseResource, ExtendedResourceProperties):
"""SqlDatabaseGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
:param id: Required. Name of the Cosmos DB SQL database.
:type id: str
:param colls: A system generated property that specified the addressable path of the
collections resource.
:type colls: str
:param users: A system generated property that specifies the addressable path of the users
resource.
:type users: str
"""
_validation = {
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
'id': {'required': True},
}
_attribute_map = {
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'colls': {'key': '_colls', 'type': 'str'},
'users': {'key': '_users', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlDatabaseGetPropertiesResource, self).__init__(**kwargs)
self.rid = None
self.ts = None
self.etag = None
self.colls = kwargs.get('colls', None)
self.users = kwargs.get('users', None)
self.id = kwargs['id']
self.colls = kwargs.get('colls', None)
self.users = kwargs.get('users', None)
class SqlDatabaseGetResults(ARMResourceProperties):
"""An Azure Cosmos DB SQL database.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.SqlDatabaseGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.SqlDatabaseGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlDatabaseGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'SqlDatabaseGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(SqlDatabaseGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class SqlDatabaseListResult(msrest.serialization.Model):
"""The List operation response, that contains the SQL databases and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of SQL databases and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SqlDatabaseGetResults]'},
}
def __init__(
self,
**kwargs
):
super(SqlDatabaseListResult, self).__init__(**kwargs)
self.value = None
class SqlRoleAssignmentCreateUpdateParameters(msrest.serialization.Model):
"""Parameters to create and update an Azure Cosmos DB SQL Role Assignment.
:param role_definition_id: The unique identifier for the associated Role Definition.
:type role_definition_id: str
:param scope: The data plane resource path for which access is being granted through this Role
Assignment.
:type scope: str
:param principal_id: The unique identifier for the associated AAD principal in the AAD graph to
which access is being granted through this Role Assignment. Tenant ID for the principal is
inferred using the tenant associated with the subscription.
:type principal_id: str
"""
_attribute_map = {
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlRoleAssignmentCreateUpdateParameters, self).__init__(**kwargs)
self.role_definition_id = kwargs.get('role_definition_id', None)
self.scope = kwargs.get('scope', None)
self.principal_id = kwargs.get('principal_id', None)
class SqlRoleAssignmentGetResults(ARMProxyResource):
"""An Azure Cosmos DB Role Assignment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param role_definition_id: The unique identifier for the associated Role Definition.
:type role_definition_id: str
:param scope: The data plane resource path for which access is being granted through this Role
Assignment.
:type scope: str
:param principal_id: The unique identifier for the associated AAD principal in the AAD graph to
which access is being granted through this Role Assignment. Tenant ID for the principal is
inferred using the tenant associated with the subscription.
:type principal_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlRoleAssignmentGetResults, self).__init__(**kwargs)
self.role_definition_id = kwargs.get('role_definition_id', None)
self.scope = kwargs.get('scope', None)
self.principal_id = kwargs.get('principal_id', None)
class SqlRoleAssignmentListResult(msrest.serialization.Model):
"""The relevant Role Assignments.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Role Assignments and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SqlRoleAssignmentGetResults]'},
}
def __init__(
self,
**kwargs
):
super(SqlRoleAssignmentListResult, self).__init__(**kwargs)
self.value = None
class SqlRoleDefinitionCreateUpdateParameters(msrest.serialization.Model):
"""Parameters to create and update an Azure Cosmos DB SQL Role Definition.
:param role_name: A user-friendly name for the Role Definition. Must be unique for the database
account.
:type role_name: str
:param type: Indicates whether the Role Definition was built-in or user created. Possible
values include: "BuiltInRole", "CustomRole".
:type type: str or ~azure.mgmt.cosmosdb.models.RoleDefinitionType
:param assignable_scopes: A set of fully qualified Scopes at or below which Role Assignments
may be created using this Role Definition. This will allow application of this Role Definition
on the entire database account or any underlying Database / Collection. Must have at least one
element. Scopes higher than Database account are not enforceable as assignable Scopes. Note
that resources referenced in assignable Scopes need not exist.
:type assignable_scopes: list[str]
:param permissions: The set of operations allowed through this Role Definition.
:type permissions: list[~azure.mgmt.cosmosdb.models.Permission]
"""
_attribute_map = {
'role_name': {'key': 'properties.roleName', 'type': 'str'},
'type': {'key': 'properties.type', 'type': 'str'},
'assignable_scopes': {'key': 'properties.assignableScopes', 'type': '[str]'},
'permissions': {'key': 'properties.permissions', 'type': '[Permission]'},
}
def __init__(
self,
**kwargs
):
super(SqlRoleDefinitionCreateUpdateParameters, self).__init__(**kwargs)
self.role_name = kwargs.get('role_name', None)
self.type = kwargs.get('type', None)
self.assignable_scopes = kwargs.get('assignable_scopes', None)
self.permissions = kwargs.get('permissions', None)
class SqlRoleDefinitionGetResults(ARMProxyResource):
"""An Azure Cosmos DB SQL Role Definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the database account.
:vartype id: str
:ivar name: The name of the database account.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param role_name: A user-friendly name for the Role Definition. Must be unique for the database
account.
:type role_name: str
:param type_properties_type: Indicates whether the Role Definition was built-in or user
created. Possible values include: "BuiltInRole", "CustomRole".
:type type_properties_type: str or ~azure.mgmt.cosmosdb.models.RoleDefinitionType
:param assignable_scopes: A set of fully qualified Scopes at or below which Role Assignments
may be created using this Role Definition. This will allow application of this Role Definition
on the entire database account or any underlying Database / Collection. Must have at least one
element. Scopes higher than Database account are not enforceable as assignable Scopes. Note
that resources referenced in assignable Scopes need not exist.
:type assignable_scopes: list[str]
:param permissions: The set of operations allowed through this Role Definition.
:type permissions: list[~azure.mgmt.cosmosdb.models.Permission]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'role_name': {'key': 'properties.roleName', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'assignable_scopes': {'key': 'properties.assignableScopes', 'type': '[str]'},
'permissions': {'key': 'properties.permissions', 'type': '[Permission]'},
}
def __init__(
self,
**kwargs
):
super(SqlRoleDefinitionGetResults, self).__init__(**kwargs)
self.role_name = kwargs.get('role_name', None)
self.type_properties_type = kwargs.get('type_properties_type', None)
self.assignable_scopes = kwargs.get('assignable_scopes', None)
self.permissions = kwargs.get('permissions', None)
class SqlRoleDefinitionListResult(msrest.serialization.Model):
"""The relevant Role Definitions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Role Definitions and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SqlRoleDefinitionGetResults]'},
}
def __init__(
self,
**kwargs
):
super(SqlRoleDefinitionListResult, self).__init__(**kwargs)
self.value = None
class SqlStoredProcedureCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB storedProcedure.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a storedProcedure.
:type resource: ~azure.mgmt.cosmosdb.models.SqlStoredProcedureResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlStoredProcedureResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(SqlStoredProcedureCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class SqlStoredProcedureResource(msrest.serialization.Model):
"""Cosmos DB SQL storedProcedure resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL storedProcedure.
:type id: str
:param body: Body of the Stored Procedure.
:type body: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlStoredProcedureResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.body = kwargs.get('body', None)
class SqlStoredProcedureGetPropertiesResource(ExtendedResourceProperties, SqlStoredProcedureResource):
"""SqlStoredProcedureGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL storedProcedure.
:type id: str
:param body: Body of the Stored Procedure.
:type body: str
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlStoredProcedureGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.body = kwargs.get('body', None)
self.rid = None
self.ts = None
self.etag = None
class SqlStoredProcedureGetResults(ARMResourceProperties):
"""An Azure Cosmos DB storedProcedure.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetPropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlStoredProcedureGetPropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(SqlStoredProcedureGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
class SqlStoredProcedureListResult(msrest.serialization.Model):
"""The List operation response, that contains the storedProcedures and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of storedProcedures and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SqlStoredProcedureGetResults]'},
}
def __init__(
self,
**kwargs
):
super(SqlStoredProcedureListResult, self).__init__(**kwargs)
self.value = None
class SqlTriggerCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB trigger.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a trigger.
:type resource: ~azure.mgmt.cosmosdb.models.SqlTriggerResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlTriggerResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(SqlTriggerCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class SqlTriggerResource(msrest.serialization.Model):
"""Cosmos DB SQL trigger resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL trigger.
:type id: str
:param body: Body of the Trigger.
:type body: str
:param trigger_type: Type of the Trigger. Possible values include: "Pre", "Post".
:type trigger_type: str or ~azure.mgmt.cosmosdb.models.TriggerType
:param trigger_operation: The operation the trigger is associated with. Possible values
include: "All", "Create", "Update", "Delete", "Replace".
:type trigger_operation: str or ~azure.mgmt.cosmosdb.models.TriggerOperation
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'trigger_type': {'key': 'triggerType', 'type': 'str'},
'trigger_operation': {'key': 'triggerOperation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlTriggerResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.body = kwargs.get('body', None)
self.trigger_type = kwargs.get('trigger_type', None)
self.trigger_operation = kwargs.get('trigger_operation', None)
class SqlTriggerGetPropertiesResource(ExtendedResourceProperties, SqlTriggerResource):
"""SqlTriggerGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL trigger.
:type id: str
:param body: Body of the Trigger.
:type body: str
:param trigger_type: Type of the Trigger. Possible values include: "Pre", "Post".
:type trigger_type: str or ~azure.mgmt.cosmosdb.models.TriggerType
:param trigger_operation: The operation the trigger is associated with. Possible values
include: "All", "Create", "Update", "Delete", "Replace".
:type trigger_operation: str or ~azure.mgmt.cosmosdb.models.TriggerOperation
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'trigger_type': {'key': 'triggerType', 'type': 'str'},
'trigger_operation': {'key': 'triggerOperation', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlTriggerGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.body = kwargs.get('body', None)
self.trigger_type = kwargs.get('trigger_type', None)
self.trigger_operation = kwargs.get('trigger_operation', None)
self.rid = None
self.ts = None
self.etag = None
class SqlTriggerGetResults(ARMResourceProperties):
"""An Azure Cosmos DB trigger.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.SqlTriggerGetPropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlTriggerGetPropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(SqlTriggerGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
class SqlTriggerListResult(msrest.serialization.Model):
"""The List operation response, that contains the triggers and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of triggers and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.SqlTriggerGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SqlTriggerGetResults]'},
}
def __init__(
self,
**kwargs
):
super(SqlTriggerListResult, self).__init__(**kwargs)
self.value = None
class SqlUserDefinedFunctionCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB userDefinedFunction.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a userDefinedFunction.
:type resource: ~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlUserDefinedFunctionResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(SqlUserDefinedFunctionCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class SqlUserDefinedFunctionResource(msrest.serialization.Model):
"""Cosmos DB SQL userDefinedFunction resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL userDefinedFunction.
:type id: str
:param body: Body of the User Defined Function.
:type body: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlUserDefinedFunctionResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.body = kwargs.get('body', None)
class SqlUserDefinedFunctionGetPropertiesResource(ExtendedResourceProperties, SqlUserDefinedFunctionResource):
"""SqlUserDefinedFunctionGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB SQL userDefinedFunction.
:type id: str
:param body: Body of the User Defined Function.
:type body: str
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SqlUserDefinedFunctionGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.body = kwargs.get('body', None)
self.rid = None
self.ts = None
self.etag = None
class SqlUserDefinedFunctionGetResults(ARMResourceProperties):
"""An Azure Cosmos DB userDefinedFunction.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetPropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'SqlUserDefinedFunctionGetPropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(SqlUserDefinedFunctionGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
class SqlUserDefinedFunctionListResult(msrest.serialization.Model):
"""The List operation response, that contains the userDefinedFunctions and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of userDefinedFunctions and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SqlUserDefinedFunctionGetResults]'},
}
def __init__(
self,
**kwargs
):
super(SqlUserDefinedFunctionListResult, self).__init__(**kwargs)
self.value = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure.mgmt.cosmosdb.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure.mgmt.cosmosdb.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TableCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB Table.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a Table.
:type resource: ~azure.mgmt.cosmosdb.models.TableResource
:param options: A key-value pair of options to be applied for the request. This corresponds to
the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'TableResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(
self,
**kwargs
):
super(TableCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
self.options = kwargs.get('options', None)
class TableGetPropertiesOptions(OptionsResource):
"""TableGetPropertiesOptions.
:param throughput: Value of the Cosmos DB resource throughput or autoscaleSettings. Use the
ThroughputSetting resource when retrieving offer details.
:type throughput: int
:param autoscale_settings: Specifies the Autoscale settings.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettings
"""
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettings'},
}
def __init__(
self,
**kwargs
):
super(TableGetPropertiesOptions, self).__init__(**kwargs)
class TableResource(msrest.serialization.Model):
"""Cosmos DB table resource object.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB table.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TableResource, self).__init__(**kwargs)
self.id = kwargs['id']
class TableGetPropertiesResource(ExtendedResourceProperties, TableResource):
"""TableGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. Name of the Cosmos DB table.
:type id: str
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'id': {'required': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TableGetPropertiesResource, self).__init__(**kwargs)
self.id = kwargs['id']
self.rid = None
self.ts = None
self.etag = None
class TableGetResults(ARMResourceProperties):
"""An Azure Cosmos DB Table.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.TableGetPropertiesResource
:param options:
:type options: ~azure.mgmt.cosmosdb.models.TableGetPropertiesOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'TableGetPropertiesResource'},
'options': {'key': 'properties.options', 'type': 'TableGetPropertiesOptions'},
}
def __init__(
self,
**kwargs
):
super(TableGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
class TableListResult(msrest.serialization.Model):
"""The List operation response, that contains the Table and their properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of Table and their properties.
:vartype value: list[~azure.mgmt.cosmosdb.models.TableGetResults]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TableGetResults]'},
}
def __init__(
self,
**kwargs
):
super(TableListResult, self).__init__(**kwargs)
self.value = None
class ThroughputPolicyResource(msrest.serialization.Model):
"""Cosmos DB resource throughput policy.
:param is_enabled: Determines whether the ThroughputPolicy is active or not.
:type is_enabled: bool
:param increment_percent: Represents the percentage by which throughput can increase every time
throughput policy kicks in.
:type increment_percent: int
"""
_attribute_map = {
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'increment_percent': {'key': 'incrementPercent', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ThroughputPolicyResource, self).__init__(**kwargs)
self.is_enabled = kwargs.get('is_enabled', None)
self.increment_percent = kwargs.get('increment_percent', None)
class ThroughputSettingsResource(msrest.serialization.Model):
"""Cosmos DB resource throughput object. Either throughput is required or autoscaleSettings is required, but not both.
Variables are only populated by the server, and will be ignored when sending a request.
:param throughput: Value of the Cosmos DB resource throughput. Either throughput is required or
autoscaleSettings is required, but not both.
:type throughput: int
:param autoscale_settings: Cosmos DB resource for autoscale settings. Either throughput is
required or autoscaleSettings is required, but not both.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettingsResource
:ivar minimum_throughput: The minimum throughput of the resource.
:vartype minimum_throughput: str
:ivar offer_replace_pending: The throughput replace is pending.
:vartype offer_replace_pending: str
"""
_validation = {
'minimum_throughput': {'readonly': True},
'offer_replace_pending': {'readonly': True},
}
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettingsResource'},
'minimum_throughput': {'key': 'minimumThroughput', 'type': 'str'},
'offer_replace_pending': {'key': 'offerReplacePending', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ThroughputSettingsResource, self).__init__(**kwargs)
self.throughput = kwargs.get('throughput', None)
self.autoscale_settings = kwargs.get('autoscale_settings', None)
self.minimum_throughput = None
self.offer_replace_pending = None
class ThroughputSettingsGetPropertiesResource(ExtendedResourceProperties, ThroughputSettingsResource):
"""ThroughputSettingsGetPropertiesResource.
Variables are only populated by the server, and will be ignored when sending a request.
:param throughput: Value of the Cosmos DB resource throughput. Either throughput is required or
autoscaleSettings is required, but not both.
:type throughput: int
:param autoscale_settings: Cosmos DB resource for autoscale settings. Either throughput is
required or autoscaleSettings is required, but not both.
:type autoscale_settings: ~azure.mgmt.cosmosdb.models.AutoscaleSettingsResource
:ivar minimum_throughput: The minimum throughput of the resource.
:vartype minimum_throughput: str
:ivar offer_replace_pending: The throughput replace is pending.
:vartype offer_replace_pending: str
:ivar rid: A system generated property. A unique identifier.
:vartype rid: str
:ivar ts: A system generated property that denotes the last updated timestamp of the resource.
:vartype ts: float
:ivar etag: A system generated property representing the resource etag required for optimistic
concurrency control.
:vartype etag: str
"""
_validation = {
'minimum_throughput': {'readonly': True},
'offer_replace_pending': {'readonly': True},
'rid': {'readonly': True},
'ts': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'throughput': {'key': 'throughput', 'type': 'int'},
'autoscale_settings': {'key': 'autoscaleSettings', 'type': 'AutoscaleSettingsResource'},
'minimum_throughput': {'key': 'minimumThroughput', 'type': 'str'},
'offer_replace_pending': {'key': 'offerReplacePending', 'type': 'str'},
'rid': {'key': '_rid', 'type': 'str'},
'ts': {'key': '_ts', 'type': 'float'},
'etag': {'key': '_etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ThroughputSettingsGetPropertiesResource, self).__init__(**kwargs)
self.throughput = kwargs.get('throughput', None)
self.autoscale_settings = kwargs.get('autoscale_settings', None)
self.minimum_throughput = None
self.offer_replace_pending = None
self.rid = None
self.ts = None
self.etag = None
class ThroughputSettingsGetResults(ARMResourceProperties):
"""An Azure Cosmos DB resource throughput.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource:
:type resource: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetPropertiesResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'ThroughputSettingsGetPropertiesResource'},
}
def __init__(
self,
**kwargs
):
super(ThroughputSettingsGetResults, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
class ThroughputSettingsUpdateParameters(ARMResourceProperties):
"""Parameters to update Cosmos DB resource throughput.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource belongs.
:type location: str
:param tags: A set of tags. Tags are a list of key-value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128
characters and value no greater than 256 characters. For example, the default experience for a
template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values
also include "Table", "Graph", "DocumentDB", and "MongoDB".
:type tags: dict[str, str]
:param resource: Required. The standard JSON format of a resource throughput.
:type resource: ~azure.mgmt.cosmosdb.models.ThroughputSettingsResource
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource': {'key': 'properties.resource', 'type': 'ThroughputSettingsResource'},
}
def __init__(
self,
**kwargs
):
super(ThroughputSettingsUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs['resource']
class UniqueKey(msrest.serialization.Model):
"""The unique key on that enforces uniqueness constraint on documents in the collection in the Azure Cosmos DB service.
:param paths: List of paths must be unique for each document in the Azure Cosmos DB service.
:type paths: list[str]
"""
_attribute_map = {
'paths': {'key': 'paths', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(UniqueKey, self).__init__(**kwargs)
self.paths = kwargs.get('paths', None)
class UniqueKeyPolicy(msrest.serialization.Model):
"""The unique key policy configuration for specifying uniqueness constraints on documents in the collection in the Azure Cosmos DB service.
:param unique_keys: List of unique keys on that enforces uniqueness constraint on documents in
the collection in the Azure Cosmos DB service.
:type unique_keys: list[~azure.mgmt.cosmosdb.models.UniqueKey]
"""
_attribute_map = {
'unique_keys': {'key': 'uniqueKeys', 'type': '[UniqueKey]'},
}
def __init__(
self,
**kwargs
):
super(UniqueKeyPolicy, self).__init__(**kwargs)
self.unique_keys = kwargs.get('unique_keys', None)
class UsagesResult(msrest.serialization.Model):
"""The response to a list usage request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of usages for the database. A usage is a point in time metric.
:vartype value: list[~azure.mgmt.cosmosdb.models.Usage]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
}
def __init__(
self,
**kwargs
):
super(UsagesResult, self).__init__(**kwargs)
self.value = None
class VirtualNetworkRule(msrest.serialization.Model):
"""Virtual Network ACL Rule object.
:param id: Resource ID of a subnet, for example:
/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
:type id: str
:param ignore_missing_v_net_service_endpoint: Create firewall rule before the virtual network
has vnet service endpoint enabled.
:type ignore_missing_v_net_service_endpoint: bool
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'ignore_missing_v_net_service_endpoint': {'key': 'ignoreMissingVNetServiceEndpoint', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkRule, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.ignore_missing_v_net_service_endpoint = kwargs.get('ignore_missing_v_net_service_endpoint', None)
|
Azure/azure-sdk-for-python
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/_models.py
|
Python
|
mit
| 321,990
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django import template
import weblate.trans.permissions
register = template.Library()
@register.assignment_tag
def can_upload_translation(user, translation):
return weblate.trans.permissions.can_upload_translation(
user, translation
)
@register.assignment_tag
def can_translate(user, translation):
return weblate.trans.permissions.can_translate(
user, translation
)
@register.assignment_tag
def can_suggest(user, translation):
return weblate.trans.permissions.can_suggest(
user, translation
)
@register.assignment_tag
def can_accept_suggestion(user, translation):
return weblate.trans.permissions.can_accept_suggestion(
user, translation
)
@register.assignment_tag
def can_delete_suggestion(user, translation):
return weblate.trans.permissions.can_delete_suggestion(
user, translation
)
@register.assignment_tag
def can_vote_suggestion(user, translation):
return weblate.trans.permissions.can_vote_suggestion(
user, translation
)
@register.assignment_tag
def can_use_mt(user, translation):
return weblate.trans.permissions.can_use_mt(user, translation)
@register.assignment_tag
def can_see_repository_status(user, project):
return weblate.trans.permissions.can_see_repository_status(user, project)
@register.assignment_tag
def can_commit_translation(user, project):
return weblate.trans.permissions.can_commit_translation(user, project)
@register.assignment_tag
def can_update_translation(user, project):
return weblate.trans.permissions.can_update_translation(user, project)
@register.assignment_tag
def can_push_translation(user, project):
return weblate.trans.permissions.can_push_translation(user, project)
@register.assignment_tag
def can_reset_translation(user, project):
return weblate.trans.permissions.can_reset_translation(user, project)
@register.assignment_tag
def can_lock_subproject(user, project):
return weblate.trans.permissions.can_lock_subproject(user, project)
@register.assignment_tag
def can_edit_flags(user, project):
return weblate.trans.permissions.can_edit_flags(user, project)
@register.assignment_tag
def can_edit_priority(user, project):
return weblate.trans.permissions.can_edit_priority(user, project)
@register.assignment_tag
def can_ignore_check(user, project):
return weblate.trans.permissions.can_ignore_check(user, project)
@register.assignment_tag
def can_delete_comment(user, project):
return weblate.trans.permissions.can_delete_comment(user, project)
@register.assignment_tag
def can_manage_acl(user, project):
return weblate.trans.permissions.can_manage_acl(user, project)
@register.assignment_tag
def can_download_changes(user, project):
return weblate.trans.permissions.can_download_changes(user, project)
@register.assignment_tag
def can_view_reports(user, project):
return weblate.trans.permissions.can_view_reports(user, project)
|
jitka/weblate
|
weblate/trans/templatetags/permissions.py
|
Python
|
gpl-3.0
| 3,755
|
# -*- coding: utf-8 -*-
from os import sys, path
import schedule
from time import sleep
from bottle.ext.mongo import MongoPlugin
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from mining.utils import conf, log_it
from mining.tasks import process
log_it("START", "bin-scheduler")
onrun = {}
register = []
def job(cube):
log_it("START JOB: {}".format(cube.get('slug')), "bin-scheduler")
process.delay(cube)
log_it("END JOB: {}".format(cube.get('slug')), "bin-scheduler")
def rules(cube, scheduler_type='minutes', scheduler_interval=59,
dashboard=None):
if scheduler_type:
scheduler_type = cube.get('scheduler_type', 'minutes')
if scheduler_interval:
scheduler_interval = cube.get('scheduler_interval', 59)
log_it("START REGISTER", "bin-scheduler")
log_it("cube: {}".format(cube.get('slug')), "bin-scheduler")
log_it("type: {}".format(scheduler_type), "bin-scheduler")
log_it("interval: {}".format(scheduler_interval), "bin-scheduler")
log_it("END REGISTER", "bin-scheduler")
t = {}
if scheduler_type == 'minutes':
env = schedule.every(int(scheduler_interval))
t = env.minutes
elif scheduler_type == 'hour':
env = schedule.every()
t = env.hour
elif scheduler_type == 'day':
env = schedule.every()
t = env.day
else:
return False
jobn = cube.get("slug")
try:
t.do(job, cube=cube)
if dashboard:
jobn = u"{}-{}".format(cube.get("slug"), dashboard)
onrun[jobn] = env
register.append(jobn)
if cube.get('run') != 'run':
process.delay(cube)
except Exception, e:
if jobn in register:
register.remove(jobn)
if onrun.get(jobn):
del onrun[jobn]
log_it("ERROR {}: {}".format(cube.get('slug'), e))
return True
def scheduler_app():
mongo = MongoPlugin(
uri=conf("mongodb")["uri"],
db=conf("mongodb")["db"],
json_mongo=True).get_mongo()
for cube in mongo['cube'].find({'scheduler_status': True}):
rules(cube)
for dashboard in mongo['dashboard'].find({'scheduler_status': True}):
elements = [e['id'] for e in dashboard['element']]
for e in elements:
element = mongo['element'].find_one({'slug': e})
cube = mongo['cube'].find_one({'slug': element['cube']})
rules(cube, dashboard['scheduler_type'],
dashboard['scheduler_interval'])
while True:
for cube in mongo['cube'].find({'scheduler_status': True}):
if cube['slug'] not in register:
rules(cube)
for dashboard in mongo['dashboard'].find({'scheduler_status': True}):
elements = [e['id'] for e in dashboard['element']]
for e in elements:
element = mongo['element'].find_one({'slug': e})
cube = mongo['cube'].find_one({'slug': element['cube']})
if cube['slug'] not in register:
rules(cube, dashboard['scheduler_type'],
dashboard['scheduler_interval'],
dashboard['slug'])
for cube in mongo['cube'].find({'scheduler_status': False}):
if cube['slug'] in register:
schedule.cancel_job(onrun[cube['slug']])
del onrun[cube['slug']]
register.remove(cube['slug'])
for dashboard in mongo['dashboard'].find({'scheduler_status': False}):
elements = [e['id'] for e in dashboard['element']]
for e in elements:
try:
element = mongo['element'].find_one({'slug': e})
cube = mongo['cube'].find_one({'slug': element['cube']})
jobn = u"{}-{}".format(cube['slug'], dashboard['slug'])
if jobn in register:
schedule.cancel_job(onrun[jobn])
del onrun[jobn]
register.remove(jobn)
except:
pass
schedule.run_pending()
sleep(1)
|
chrisdamba/mining
|
mining/bin/scheduler.py
|
Python
|
mit
| 4,168
|
#!usr/bin/env python
import re
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("mirbase_pre script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.mirbase_pre_input)
utils.now_time("Output_file: " + p.mirbase_pre_output)
input_file = open(p.mirbase_pre_input,'r')
output_file = open(p.mirbase_pre_output,'w')
flg = 0
seq = ""
for line in input_file:
line = line.rstrip()
if re.match(r"^>",line): #Header
data = line.split()
mir_id = data[0]
mir_id = mir_id.replace('>','')
symbol = data[1]
infor = mir_id + '|' + symbol
if flg == 1:
print (seq,file=output_file,end="\n")
print (infor,file=output_file,end="\t")
flg = 1
seq = ""
else: #Sequence
seq += line
print (seq,file=output_file,end="\n")
utils.now_time("mirbase_pre script was successfully finished!!")
input_file.close()
output_file.close()
if __name__ == '__main__':
main()
|
Naoto-Imamachi/MIRAGE
|
scripts/module/preparation/mirbase_pre.py
|
Python
|
mit
| 1,190
|
from gi.repository import Gtk
from gaphas.view import GtkView
def scroll_tool(view: GtkView, speed: int = 5) -> Gtk.EventControllerScroll:
"""Scroll tool recognized 2 finger scroll gestures."""
ctrl = (
Gtk.EventControllerScroll.new(
view,
Gtk.EventControllerScrollFlags.BOTH_AXES,
)
if Gtk.get_major_version() == 3
else Gtk.EventControllerScroll.new(Gtk.EventControllerScrollFlags.BOTH_AXES)
)
ctrl.connect("scroll", on_scroll, speed)
return ctrl
def on_scroll(controller, dx, dy, speed):
view = controller.get_widget()
m = view.matrix
m.translate(-dx * speed, -dy * speed)
view.request_update((), view.model.get_all_items())
|
amolenaar/gaphas
|
gaphas/tool/scroll.py
|
Python
|
lgpl-2.1
| 724
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from bs4 import BeautifulSoup
def test():
html = """
<body>
<p>
天!仅此一天哦→<br />
<a href="https://item.taobao.com/item.htm?id=539146861037" target="_blank">
<img src="file://C:\\Users\gide\AppData\Local\Temp\[5UQ[BL(6~BS2JV6W}N6[%S.png" />https://item.taobao.com/item.htm?id=539146861037</a><br />
</p>
</body>
"""
soup = BeautifulSoup(html, 'html.parser')
tag_img = soup.find_all('img')
del_bad_img(tag_img)
print(soup.prettify())
def del_bad_img(tag_imgs):
for tag in tag_imgs:
if tag['src'].strip().startswith('file'):
print(tag)
tag.decompose()
if __name__ == '__main__':
test()
|
dormouse/read
|
test/test_img.py
|
Python
|
lgpl-3.0
| 752
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
# Internal modules #
# First party modules #
from autopaths.file_path import FilePath
################################################################################
def check_blocked_request(tree):
"""
Check if the request was denied by the server.
And raise an exception if it was.
"""
# Modules #
from lxml import etree
# Did we get a filepath? #
if isinstance(tree, FilePath):
if tree.count_bytes > 1000000: return
tree = tree.contents
# Did we get a tree or raw text? #
if isinstance(tree, str): tree = etree.HTML(tree)
# By default we are good #
blocked = False
# Try Incapsula #
blocked = blocked or check_incapsula(tree)
# If we were indeed blocked, we can stop here #
if blocked: raise Exception("The request was flagged and blocked by the server.")
################################################################################
def check_incapsula(tree):
# By default we are good #
blocked = False
# Result type 1 from Incapsula #
meta = tree.xpath("//head/meta[@name='ROBOTS']")
if meta and 'NOINDEX' in meta[0].get('content'): blocked = True
# Result type 2 from Incapsula #
meta = tree.xpath("//head/meta[@name='robots']")
if meta and 'noindex' in meta[0].get('content'): blocked = True
# If we were indeed blocked, we can stop here #
return blocked
|
xapple/plumbing
|
plumbing/scraping/blockers.py
|
Python
|
mit
| 1,536
|
# Licensed under the Apache License:
# http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/cdunklau/fbemissary/blob/master/NOTICE.txt
"""
fbemissary - A bot framework for the Facebook Messenger platform
"""
from .core import FacebookPageMessengerBot
from .conversation import (
ConversationalistFactory,
SerialConversationalist
)
from .models import (
ReceivedMessage,
AttachmentType,
MediaAttachment,
LocationAttachment
)
|
cdunklau/fbemissary
|
fbemissary/__init__.py
|
Python
|
apache-2.0
| 476
|
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
|
victorkeophila/alien4cloud-cloudify3-provider
|
src/main/resources/recipe/velocity/includes/download_artifacts.py
|
Python
|
apache-2.0
| 1,225
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import os,datetime,string
import chardet
import sys
import re
import time
from bs4 import BeautifulSoup
import hashlib
#md5=hashlib.md5(‘字符串’.encode(‘utf-8′)).hexdigest()
#print(md5)
import hashlib
#a = {'a':'aaa','b':'bbb'}
#a['c']='ccc'
#>>> a
#{'a': 'aaa', 'c': 'ccc', 'b': 'bbb'}
#>>> a.has_key('a')
#True
#>>> a.has_key('d')
#False
#>>> a = {}
#>>> a
#{}
#>>> a['a']='aaa'
#>>> a
#{'a': 'aaa'}
class Info(object):
def __init__(self,title=' ',href = ' ',addr = ' ',description=' ',shijian = 3600*60,price = 0,pic = '无'):
self.title = title
self.href = href
self.addr = addr
#self.time_str = ''
self.description=description
self.shijian = shijian
self.price = price
self.pic = pic
def __str__(self):
res = 'title'+self.title+'\n'
res = res + 'href'+self.href+'\n'
res = res + 'addr'+self.addr+'\n'
res = res + 'description'+self.description
return res
def get_addr(self):
return self.addr
#__repr__=__str__
##赶集网搜索的结果在首页上并没有给出详细的时间,只是‘一小时以内’之类的,并且有些时间,即使在详细页面上也没有
##真心搞不懂他们是怎么搞的,所以这里的策略是,对标题的链接进行hash,放入一个容器,定时重新刷新容器,防止容器过大
##所以这里有个想法,就是只要用户提供正确的网址,程序自动匹配正确的爬虫进行分析
class tc58(object):
def __init__(self,url):
self.__url = url
self.__addr_infos = []
self.__first = True
self.__last_time = 0.0
self.__loop_time = 5.0
self.__map = {}
self.__base_url = ''
def load_page(self):
opener = urllib2.build_opener()
f = opener.open(self.__url)
html = f.read()
self.__html = html
f.close()
## html_file = open('ganji.txt','w')
## html_file.write(html)
## html_file.close()
## time.sleep(20)
return html
def load_page1(self):
html_file = open('ganji.txt','r')
html =html_file.read()
html_file.close()
self.__html = html
#print html
#time.sleep(20)
return html
def get_res(self):
##返回的结果
res = []
#self.__addr_infos = []
soup = BeautifulSoup(self.__html)
lis = soup.find_all('li',class_='list-img clearfix')
#print len(lis)
for li in lis:
info = Info()
titles_all = li.find_all('div',class_='info-title')
#print len(titles_all)
if(titles_all is not None and len(titles_all)>0):
title_all = titles_all[0]
title = title_all.a.get_text().strip()
#print title
href = title_all.a['href'].strip()
#print href
info.title = title
info.href = self.__base_url + href
info.description = li.get_text().strip()
#print info.description
##下面开始计算新的信息
href_md5 = hashlib.md5(info.href.encode('utf-8')).hexdigest()
if not self.__map.has_key(href_md5):
res.append(info)
self.__map[href_md5] = info
print 'res len:'
print len(res)
return res
def send_mail(self):
pass
def get_addr_infos(self):
return self.__addr_infos
def run(self):
self.load_page()
return self.get_res()
def resize_map(self):
seed = random.randint(1,30)
i = 0
for key in self.__map.keys():
if not i%seed:
del self.__map[key]
i = i+1
def fun_loop(self):
num = 0
##初始化填充map
self.run()
for i in range(100000):
res = self.run()
if len(self.__map)>1000:
resize_map()
#self.run()
continue
if(len(res)>0):
print 'hello we recv a new recore!!'
print '**'*20
for i in res:
print i.href
time.sleep(5)
#租房
if __name__ == '__main__':
tc = tc58('http://sz.ganji.com/fang1/_%E8%85%BE%E8%AE%AF/')
tc.fun_loop()
#s = u'11月2日'
#s.encode('utf-8')
#print s[0]
#print s[1]
#print s[2]
#print s[3]
#print s[4] == u'日'
|
raiet/spider-mvc
|
subscribe/paser_test/ganji_2.py
|
Python
|
gpl-2.0
| 3,829
|
#!/usr/bin/env python3
import re
import subprocess
import pprint
import logging
import sys
import time
logging.basicConfig()
log = logging.getLogger('procbot')
class XMPPAdapter(object):
def __init__(self, bot, config):
log.debug('Using XMPP adapter with config: ' + pprint.pformat(config))
self.bot = bot
self.config = config
def run(self):
log.info('Connecting to XMPP server.')
try:
import sleekxmpp
except ImportError:
log.fatal('Unable to load sleekxmpp!')
sys.exit(1)
client = sleekxmpp.ClientXMPP(self.config['jid'], self.config['password'])
client.register_plugin('xep_0045')
client.register_plugin('xep_0030')
client.register_plugin(
'xep_0199',
pconfig={
'keepalive': True,
'frequency': 60,
'timeout': 30,
},
)
client.resource = 'bot'
client.auto_authorize = True
client.auto_subscribe = True
def start(event):
client.send_presence()
client.get_roster()
for room in self.config['rooms']:
client.plugin['xep_0045'].joinMUC(room, self.config['full_name'], wait=True)
client.add_event_handler('session_start', start)
def message(msg):
if msg['type'] in ('normal', 'chat'):
log.debug('got message ' + pprint.pformat(msg))
responses = self.bot.proc('friend', msg['body'])
for response in responses:
if response.strip() != '':
msg.reply(response).send()
client.add_event_handler('message', message)
def room_message(msg):
if msg['mucnick'] != self.config['full_name']:
log.debug('got MUC message ' + pprint.pformat(msg))
responses = self.bot.proc(msg['mucnick'], msg['body'])
for response in responses:
if response.strip() != '':
client.send_message(mto=msg['from'].bare, mbody=response, mtype='groupchat')
# deal with it
time.sleep(0.5)
client.add_event_handler('groupchat_message', room_message)
if client.connect((self.config['server'], self.config['port'])):
log.info('Procbot started. Connected to XMPP server.')
client.process(block=True)
log.info('Procbot exiting')
class SimpleAdapter(object):
def __init__(self, bot):
log.debug('Using simple adapter')
self.bot = bot
def run(self):
log.info('Procbot started')
while True:
try:
inp = input()
except InterruptedError:
break
except EOFError:
break
except KeyboardInterrupt:
break
if ':' in inp:
user, message = inp.split(':')
else:
user = 'admin'
message = inp
for res in self.bot.proc(user, message):
if res.strip() != '':
print(res)
log.info('Procbot exiting')
class ProcBot(object):
def __init__(self, config):
log.debug('Processing configuration')
nick = config.get('nick', 'procbot')
self.nick = nick
nick_reg = config.get('nick_reg', '(pb|procbot)')
self.nick_reg = nick_reg
log.debug('Using nick ' + nick)
scripts = []
for key in config['scripts']:
log.debug('Processing configuration for ' + key)
key_config = config['scripts'][key]
proc_key_config = {}
if 'trigger' not in key_config and 'triggers' not in key_config:
log.error('No trigger in configuration for ' + key)
continue
if 'command' not in key_config:
log.error('No command in configuration for ' + key)
continue
if 'help' not in key_config:
log.warn('No help in configuration for ' + key)
trigger_strs = []
if 'triggers' in key_config:
trigger_strs += key_config['triggers']
if 'trigger' in key_config:
trigger_strs.append(key_config['trigger'])
filtered_triggers = filter(lambda s: len(s) > 0, trigger_strs)
replaced_triggers = list(map(lambda t: t.replace('%NICK', nick_reg), filtered_triggers))
log.debug('{} trigger regexes are {}'.format(key, replaced_triggers))
proc_key_config = {
'key': key,
'triggers': list(map(lambda t: re.compile(t, re.I), replaced_triggers)),
'command': key_config['command'],
'help': key_config.get('help', ''),
}
if 'transform' in key_config:
try:
proc_key_config['transform'] = (
re.compile(key_config['transform']['in'], re.I|re.M),
key_config['transform']['out']
)
except KeyError:
log.error('Missing in or out transform on key ' + key)
continue
scripts.append(proc_key_config)
log.debug('Processed config for {}:\n{}'.format(
key, pprint.pformat(proc_key_config))
)
scripts.append(self.gen_help(scripts))
self.scripts = scripts
def gen_help(self, scripts):
help_texts = [
(
script['key'],
'|'.join(t.pattern for t in script['triggers']),
script['help']
)
for script in scripts
]
help_accumulator = ''
col_width = {}
help_texts.sort()
for help_text in help_texts:
for i, text in enumerate(help_text):
col_width[i] = max(len(text) + 1,col_width.get(i,0))
for help_text in help_texts:
help_accumulator += "".join(
word.ljust(col_width[i]) for i,word in enumerate(help_text)
) + '\n'
command = ['echo', help_accumulator]
trigger = [re.compile('^{},? help$'.format(self.nick_reg), re.I)]
return {
'key': 'help',
'triggers': trigger,
'command': command,
'help': 'RECURSION!',
}
def proc(self, user, message):
log.info('Received message "{}" from {}'.format(message, user))
for script in self.scripts:
log.debug('Testing message against trigger for ' + script['key'])
match = None
for t in script['triggers']:
match = t.match(message)
if match is not None:
break
if match is not None:
log.debug('Message matches trigger for ' + script['key'])
log.debug('found groups for this trigger: ' + pprint.pformat(match.groups()))
if len(match.groups()) != 0:
args = [arg.format(*match.groups(), user=user, message=message) for arg in script['command']]
else:
args = script['command']
log.debug('Calling subprocess with args: ' + pprint.pformat(args))
try:
results = subprocess.check_output(
args,
universal_newlines=True,
timeout=5,
)
except subprocess.CalledProcessError:
log.error('Error from subprocess.')
yield 'Error!'
continue
except subprocess.TimeoutExpired:
log.error('Subprocess timed out.')
yield 'Timeout!'
continue
log.debug('Results from subprocess: ' + results)
if 'transform' in script:
log.debug('Tranforming output')
in_re, out_fmt = script['transform']
t_match = in_re.search(results)
if t_match is None:
log.debug('Transform had no matches!')
continue
log.debug('transform matches: ' + pprint.pformat(t_match.groups()))
yield out_fmt.format(*t_match.groups(), user=user, message=message)
continue
if results.strip() != '':
yield results.strip()
log.debug('Finished testing scripts.')
if __name__ == '__main__':
import argparse
import json
fmt = 'json'
parser = argparse.ArgumentParser(
description='Another stupid bot, this time with Subprocesses!'
)
parser.add_argument(
'-c',
'--config',
type=argparse.FileType('r'),
help='A configuration file to read. Default: ./procbot.' + fmt,
default=None
)
parser.add_argument(
'-d',
'--debug',
action='store_true',
help='Enable debugging output.',
default=False
)
args = parser.parse_args()
log.level = logging.DEBUG if args.debug else logging.INFO
log.info('ProcBot starting')
if args.config == None:
log.debug('Defaulting to ./procbot.{} for configuration'.format(fmt))
args.config = open('procbot.{}'.format(fmt), 'r')
config = json.load(args.config)
log.debug('Configuration loaded: \n' + pprint.pformat(config))
args.config.close()
bot = ProcBot(config)
if config['adapter'] == 'simple':
adp = SimpleAdapter(bot)
elif config['adapter'] == 'xmpp':
adp = XMPPAdapter(bot, config['xmpp'])
adp.run()
|
jakebasile/procbot
|
procbot.py
|
Python
|
bsd-2-clause
| 9,903
|
from mc_objects import (ENCHANT_WEAPONS, ENCHANT_ARMOR, ENCHANT_HELMETS,
ENCHANT_BOOTS, ENCHANT_TOOLS, ENCHANT_BOWS, ENCHANT_SHIELDS,
ENCHANT_ELYTRA, MCEnchant, register_enchant, register_item, MCItem,
WEAPONS, BOOTS, HELMETS, ARMOR, TOOLS, BOWS, SHIELDS, ELYTRA, AXES,
ENCHANT_AXES, register_attribute, MCAttribute, ITEM_ATTRIBUTES)
from os.path import join, dirname, abspath
materials = ['adamantine', 'aquarium', 'brass', 'bronze', 'coldiron',
'copper', 'electrum', 'invar', 'lead', 'mithril', 'nickel',
'silver', 'starsteel', 'steel', 'tin']
items_to_register = [('chestplate', ARMOR), ('leggings', ARMOR),
('boots', BOOTS), ('helmet', HELMETS), ('sword', WEAPONS),
('shovel', TOOLS), ('pickaxe', TOOLS), ('hoe', TOOLS),
('axe', AXES)]
RESOURCE_ADD = 'basemetals'
def get_texture_location(name):
return join(dirname(abspath(__file__)), 'textures', 'items', name)
def register_items():
for mat in materials:
for item, item_type in items_to_register:
item_name = mat + '_' + item
register_item(MCItem(RESOURCE_ADD, item_name, item_type,
get_texture_location(item_name + '.png')))
def register():
register_items()
|
Kovak/KivyNBT
|
mc_data/basemetals/__init__.py
|
Python
|
mit
| 1,297
|
import urllib2
import json
import os
import glob
import time
from ISStreamer.Streamer import Streamer
# --------- User Settings ---------
STATE = "TN"
CITY = "Nashville"
WUNDERGROUND_API_KEY = "Wunderground_API_Key_Here"
BUCKET_NAME = ":partly_sunny: " + CITY + " Weather"
BUCKET_KEY = "wunderground"
ACCESS_KEY = "Your_Access_Key_Here"
MINUTES_BETWEEN_READS = 0.5
# ---------------------------------
def isFloat(string):
try:
float(string)
return True
except ValueError:
return False
def get_conditions():
api_conditions_url = "http://api.wunderground.com/api/" + WUNDERGROUND_API_KEY + "/conditions/q/" + STATE + "/" + CITY + ".json"
try:
f = urllib2.urlopen(api_conditions_url)
except:
print "Failed to get conditions"
return False
json_conditions = f.read()
f.close()
return json.loads(json_conditions)
def get_astronomy():
api_astronomy_url = "http://api.wunderground.com/api/" + WUNDERGROUND_API_KEY + "/astronomy/q/" + STATE + "/" + CITY + ".json"
try:
f = urllib2.urlopen(api_astronomy_url)
except:
print "Failed to get astronomy"
return False
json_astronomy = f.read()
f.close()
return json.loads(json_astronomy)
def is_night(astronomy):
sunrise_hour = int(astronomy['moon_phase']['sunrise']['hour'])
sunrise_min = int(astronomy['moon_phase']['sunrise']['minute'])
sunset_hour = int(astronomy['moon_phase']['sunset']['hour'])
sunset_min = int(astronomy['moon_phase']['sunset']['minute'])
current_hour = int(astronomy['moon_phase']['current_time']['hour'])
current_min = int(astronomy['moon_phase']['current_time']['minute'])
if ( (current_hour < sunrise_hour) or
(current_hour > sunset_hour) or
((current_hour == sunrise_hour) and
(current_min < sunrise_min)) or
((current_hour == sunset_hour) and
(current_min > sunset_min)) ):
return True
return False
def moon_icon(moon_phase):
icon = {
"New Moon" : ":new_moon:",
"Waxing Crescent" : ":waxing_crescent_moon:",
"First Quarter" : ":first_quarter_moon:",
"Waxing Gibbous" : ":waxing_gibbous_moon:",
"Full Moon" : ":full_moon:",
"Full" : ":full_moon:",
"Waning Gibbous" : ":waning_gibbous_moon:",
"Last Quarter" : ":last_quarter_moon:",
"Waning Crescent" : ":waning_crescent_moon:",
}
return icon.get(moon_phase,":crescent_moon:")
def weather_icon(weather_conditions):
icon = {
"clear" : ":sun_with_face:",
"cloudy" : ":cloud:",
"flurries" : ":snowflake:",
"fog" : ":foggy:",
"hazy" : ":foggy:",
"mostlycloudy" : ":cloud:",
"mostlysunny" : ":sun_with_face:",
"partlycloudy" : ":partly_sunny:",
"partlysunny" : ":partly_sunny:",
"sleet" : ":sweat_drops: :snowflake:",
"rain" : ":umbrella:",
"snow" : ":snowflake:",
"sunny" : ":sun_with_face:",
"tstorms" : ":zap: :umbrella:",
"unknown" : ":sun_with_face:",
}
return icon.get(weather_conditions,":sun_with_face:")
def weather_status_icon (conditions, astronomy):
moon_phase = astronomy['moon_phase']['phaseofMoon']
weather_conditions = conditions['current_observation']['icon']
icon = weather_icon(weather_conditions)
if is_night(astronomy):
if ((icon == ":sunny:") or
(icon == ":partly_sunny:") or
(icon == ":sun_with_face:")):
return moon_icon(moon_phase)
return icon
def wind_dir_icon (conditions, astronomy):
icon = {
"East" : ":arrow_right:",
"ENE" : ":arrow_upper_right:",
"ESE" : ":arrow_lower_right:",
"NE" : ":arrow_upper_right:",
"NNE" : ":arrow_upper_right:",
"NNW" : ":arrow_upper_left:",
"North" : ":arrow_up:",
"NW" : ":arrow_upper_left:",
"SE" : ":arrow_lower_right:",
"South" : ":arrow_down:",
"SSE" : ":arrow_lower_right:",
"SSW" : ":arrow_lower_left:",
"SW" : ":arrow_lower_left:",
"Variable" : ":arrows_counterclockwise:",
"West" : ":arrow_left:",
"WNW" : ":arrow_upper_left:",
"WSW" : ":arrow_lower_left:",
}
return icon.get(conditions['current_observation']['wind_dir'],":crescent_moon:")
conditions = get_conditions()
astronomy = get_astronomy()
streamer = Streamer(bucket_name=BUCKET_NAME, bucket_key=BUCKET_KEY, access_key=ACCESS_KEY)
streamer.log(":house: Location",conditions['current_observation']['display_location']['full'])
f = open("status.txt","w")
while True:
conditions = get_conditions()
astronomy = get_astronomy()
if ((conditions != False) and (astronomy != False)):
humidity_pct = conditions['current_observation']['relative_humidity']
humidity = humidity_pct.replace("%","")
# Output conditions to a file
f.write("Time: " + astronomy['moon_phase']['current_time']['hour'] + ":" + astronomy['moon_phase']['current_time']['minute'] + "\n")
f.write("Location: " + conditions['current_observation']['display_location']['full'] + "\n")
f.write("Weather Condition: " + weather_status_icon(conditions, astronomy) + "\n")
f.write("Moon Phase: " + moon_icon(astronomy['moon_phase']['phaseofMoon']) + "\n")
f.write("Wind Direction: " + wind_dir_icon(conditions, astronomy) + "\n")
f.write("Temperature(F): " + str(conditions['current_observation']['temp_f']) + "\n")
f.write("Dewpoint(F): " + str(conditions['current_observation']['dewpoint_f']) + "\n")
f.write("Wind Speed(MPH): " + str(conditions['current_observation']['wind_mph']) + "\n")
f.write("Wind Gust(MPH): " + str(conditions['current_observation']['wind_gust_mph']) + "\n")
f.write("Humidity(%): " + str(humidity) + "\n")
f.write("Pressure(IN): " + str(conditions['current_observation']['pressure_in']) + "\n")
f.write("Precip 1 Hour(IN): " + str(conditions['current_observation']['precip_1hr_in']) + "\n")
f.write("Precip Today(IN): " + str(conditions['current_observation']['precip_today_in']) + "\n")
f.write("Solar Radiation (W/M^2): " + str(conditions['current_observation']['solarradiation']) + "\n")
f.write("UV Index: " + str(conditions['current_observation']['UV']) + "\n")
# Stream valid conditions to Initial State
streamer.log(":clock3: Updated Time",astronomy['moon_phase']['current_time']['hour'] + ":" + astronomy['moon_phase']['current_time']['minute'])
streamer.log(":cloud: Weather Conditions",weather_status_icon(conditions, astronomy))
streamer.log(":crescent_moon: Moon Phase",moon_icon(astronomy['moon_phase']['phaseofMoon']))
streamer.log(":dash: Wind Direction",wind_dir_icon(conditions, astronomy))
if isFloat(conditions['current_observation']['temp_f']):
streamer.log("Temperature(F)",conditions['current_observation']['temp_f'])
if isFloat(conditions['current_observation']['dewpoint_f']):
streamer.log("Dewpoint(F)",conditions['current_observation']['dewpoint_f'])
if isFloat(conditions['current_observation']['wind_mph']):
streamer.log(":dash: Wind Speed(MPH)",conditions['current_observation']['wind_mph'])
if isFloat(conditions['current_observation']['wind_gust_mph']):
streamer.log(":dash: Wind Gust(MPH)",conditions['current_observation']['wind_gust_mph'])
if isFloat(humidity):
streamer.log(":droplet: Humidity(%)",humidity)
if isFloat(conditions['current_observation']['pressure_in']):
streamer.log("Pressure(IN)",conditions['current_observation']['pressure_in'])
if isFloat(conditions['current_observation']['precip_1hr_in']):
streamer.log(":umbrella: Precip 1 Hour(IN)",conditions['current_observation']['precip_1hr_in'])
if isFloat(conditions['current_observation']['precip_today_in']):
streamer.log(":umbrella: Precip Today(IN)",conditions['current_observation']['precip_today_in'])
if isFloat(conditions['current_observation']['solarradiation']):
streamer.log(":sunny: Solar Radiation (watt/m^2)",conditions['current_observation']['solarradiation'])
if isFloat(conditions['current_observation']['UV']):
streamer.log(":sunny: UV Index:",conditions['current_observation']['UV'])
streamer.flush()
time.sleep(60*MINUTES_BETWEEN_READS)
|
InitialState/piot-athome
|
wunderground.py
|
Python
|
mit
| 8,055
|
from __future__ import division
from pySDC.Hooks import hooks
from pySDC.Stats import stats
import matplotlib.pyplot as plt
import numpy as np
class particles_output(hooks):
def __init__(self):
"""
Initialization of particles output
"""
super(particles_output,self).__init__()
# add figure object for further use
fig = plt.figure()
self.ax = fig.add_subplot(111)
self.ax.set_xlim([-1.5,1.5])
self.ax.set_ylim([-1.5,1.5])
plt.ion()
self.sframe = None
def dump_step(self,status):
"""
Overwrite standard dump per step
Args:
status: status object per step
"""
super(particles_output,self).dump_step(status)
# some abbreviations
L = self.level
u = L.uend
R = np.linalg.norm(u.pos.values)
H = 1/2*np.dot(u.vel.values,u.vel.values)+0.02/R
stats.add_to_stats(step=status.step, time=status.time, type='energy', value=H)
oldcol = self.sframe
# self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1],L.uend.pos.values[2])
self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1])
# Remove old line collection before drawing
if oldcol is not None:
self.ax.collections.remove(oldcol)
plt.pause(0.00001)
return None
|
torbjoernk/pySDC
|
examples/spiraling_particle/HookClass.py
|
Python
|
bsd-2-clause
| 1,411
|
import numpy as np
from typing import Dict, List, Tuple
from collections import OrderedDict
from orderedset._orderedset import OrderedSet
from npf.variable import is_numeric, get_numeric
from npf import npf
import natsort
import csv
class Run:
def __init__(self, variables):
self.variables = variables
def format_variables(self, hide=None):
if hide is None:
hide = {}
s = []
for k, v in self.variables.items():
if k in hide: continue
if type(v) is tuple:
s.append('%s = %s' % (k, v[1]))
else:
s.append('%s = %s' % (k, v))
return ', '.join(s)
def print_variable(self, k, default=None):
v = self.variables.get(k,default)
if type(v) is tuple:
return v[1]
else:
return v
def copy(self):
newrun = Run(self.variables.copy())
return newrun
def inside(self, o):
for k, v in self.variables.items():
if not k in o.variables:
return False
ov = o.variables[k]
if type(v) is tuple:
v = v[1]
if type(ov) is tuple:
ov = ov[1]
if is_numeric(v) and is_numeric(ov):
if not get_numeric(v) == get_numeric(ov):
return False
else:
if not v == ov:
return False
return True
def intersect(self, common):
difs = set.difference(set(self.variables.keys()), common)
for dif in difs:
del self.variables[dif]
return self
def __eq__(self, o):
return self.inside(o) and o.inside(self)
def __hash__(self):
n = 0
for k, v in self.variables.items():
if type(v) is tuple:
v = v[1]
if is_numeric(v):
n += get_numeric(v).__hash__()
else:
n += str(v).__hash__()
n += k.__hash__()
return n
def __repr__(self):
return "Run(" + self.format_variables() + ")"
def __cmp__(self, o):
for k, v in self.variables.items():
if not k in o.variables: return 1
ov = o.variables[k]
if is_numeric(v) and is_numeric(ov):
return get_numeric(v) - get_numeric(ov)
if type(v) is str or type(ov) is str:
if str(v) < str(ov):
return -1
if str(v) > str(ov):
return 1
else:
if v < ov:
return -1
if v > ov:
return 1
return 0
def __lt__(self, o):
return self.__cmp__(o) < 0
def __len__(self):
return len(self.variables)
Dataset = Dict[Run, Dict[str, List]]
ResultType = str
XYEB = Tuple
AllXYEB = Dict[ResultType, List[XYEB]]
def var_divider(testie: 'Testie', key: str, result_type = None):
div = testie.config.get_dict_value("var_divider", key, result_type=result_type, default=1)
if is_numeric(div):
return float(div)
if div.lower() == 'g':
return 1024 * 1024 * 1024
elif div.lower() == 'm':
return 1024 * 1024
elif div.lower() == 'k':
return 1024
return 1
def group_val(result, t):
if t == 'mean':
return np.mean(result)
elif t == 'avg':
return np.average(result)
elif t == 'min':
return np.min(result)
elif t == 'max':
return np.max(result)
elif t[:4] == 'perc':
return np.percentile(result, int(t[4:]))
elif t == 'median' or t == 'med':
return np.median(result)
elif t == 'std':
return np.std(result)
elif t == 'nres' or t == 'n':
return len(result)
elif t == 'first':
return result[0]
elif t == 'last':
return result[-1]
elif t == 'all':
return result
else:
print("WARNING : Unknown format %s" % t)
return np.nan
def write_output(datasets, statics, options, run_list, kind=None):
if options.output is None:
return
all_result_types = OrderedSet()
for testie,build,all_results in datasets:
for run, run_results in all_results.items():
for result_type,results in run_results.items():
all_result_types.add(result_type)
for testie, build, all_results in datasets:
csvs = OrderedDict()
for run in run_list:
results_types = all_results.get(run, OrderedDict())
for result_type in all_result_types:
if result_type in csvs:
type_filename,csvfile,wr = csvs[result_type]
else:
type_filename = npf.build_filename(testie, build, options.output if options.output != 'graph' else options.graph_filename, statics, 'csv', type_str=result_type, show_serie=(len(datasets) > 1 or options.show_serie), force_ext=True, data_folder=True, prefix = kind + '-' if kind else None)
csvfile = open(type_filename, 'w')
wr = csv.writer(csvfile, delimiter=' ',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvs[result_type] = (type_filename,csvfile,wr)
result = results_types.get(result_type,None)
if result is not None:
row = []
for t in options.output_columns:
if t == 'x':
for var,val in run.variables.items():
if var in statics:
continue
row.append(val)
elif t == 'all_x':
for var,val in run.variables.items():
row.append(val)
elif t == 'raw':
row.extend(result)
else:
yval = group_val(result,t)
if yval is not None:
try:
it = iter(yval)
row.extend(yval)
except TypeError as te:
row.append(yval)
if row:
wr.writerow(row)
for result_type in csvs.keys():
if options.output is not None:
print("Output written to %s" % csvs[result_type][0])
csvs[result_type][1].close()
def convert_to_xyeb(datasets: List[Tuple['Testie', 'Build' , Dataset]], run_list, key, do_x_sort, statics, options, max_series = None, series_sort=None, y_group={}, color=[], kind = None) -> AllXYEB:
write_output(datasets, statics, options, run_list, kind)
data_types = OrderedDict()
all_result_types = OrderedSet()
for testie,build,all_results in datasets:
for run, run_results in all_results.items():
for result_type,results in run_results.items():
all_result_types.add(result_type)
for testie, build, all_results in datasets:
x = OrderedDict()
y = OrderedDict()
e = OrderedDict()
for run in run_list:
if len(run) == 0:
xval = build.pretty_name()
else:
xval = run.print_variable(key, build.pretty_name())
results_types = all_results.get(run, OrderedDict())
for result_type in all_result_types:
#ydiv = var_divider(testie, "result", result_type) results are now divided before
xdiv = var_divider(testie, key)
result = results_types.get(result_type,None)
if xdiv != 1 and is_numeric(xval):
x.setdefault(result_type, []).append(get_numeric(xval) / xdiv)
else:
x.setdefault(result_type, []).append(xval)
if result is not None:
yval = group_val(result, y_group[result_type] if result_type in y_group else ( y_group['result'] if 'result' in y_group else 'mean'))
y.setdefault(result_type, []).append(yval)
std = np.std(result)
mean = np.mean(result)
e.setdefault(result_type, []).append((mean, std, result))
else:
y.setdefault(result_type, []).append(np.nan)
e.setdefault(result_type, []).append((np.nan, np.nan, [np.nan]))
for result_type in x.keys():
try:
if not do_x_sort:
ox = x[result_type]
oy = y[result_type]
oe = e[result_type]
else:
order = np.argsort(x[result_type])
ox = np.array(x[result_type])[order]
oy = np.array(y[result_type])[order]
oe = [e[result_type][i] for i in order]
data_types.setdefault(result_type, []).append((ox,oy,oe,build))
except Exception as err:
print("ERROR while transforming data")
print(err)
print("x",x[result_type])
print("y",y[result_type])
print("e",e[result_type])
if series_sort is not None and series_sort != "":
if type(series_sort) is str and series_sort.startswith('-'):
inverted = True
series_sort = series_sort[1:]
else:
inverted = False
new_data_types = OrderedDict()
for result_type, data in data_types.items():
avg = []
max = []
min = []
for x,y,e,build in data:
if not np.isnan(np.sum(y)):
avg.append(np.sum(y))
else:
avg.append(0)
max.append(np.max(y))
min.append(np.min(y))
if type(series_sort) is list:
ok = True
for i,so in enumerate(series_sort):
if is_numeric(so):
o = so
if o >= len(data):
print("ERROR: sorting for %s is invalid, %d is out of range" % (result_type,o))
ok = False
break
elif so in [x for x,y,e,build in data]:
o = [x for x,y,e,build in data].index(so)
elif so in [build.pretty_name() for x,y,e,build in data]:
o = [build.pretty_name() for x,y,e,build in data].index(so)
else:
print("ERROR: sorting for %s is invalid, %s is not in list" % (result_type,so))
ok = False
break
series_sort[i] = o
if ok:
order = series_sort
else:
order = np.argsort(np.asarray(avg))
elif series_sort == 'avg':
order = np.argsort(np.asarray(avg))
elif series_sort == 'max':
order = np.argsort(- np.asarray(max))
elif series_sort == 'min':
order = np.argsort(np.asarray(min))
elif series_sort == 'natsort':
order = natsort.index_natsorted(data,key=lambda x: x[3].pretty_name())
elif series_sort == 'color':
order = np.argsort(color)
else:
raise Exception("Unknown sorting : %s" % series_sort)
if inverted:
order = np.flip(order,0)
data = [data[i] for i in order]
new_data_types[result_type] = data
data_types = new_data_types
if max_series:
new_data_types = OrderedDict()
for i,(result_type,data) in enumerate(data_types.items()):
new_data_types[result_type] = data[:max_series]
data_types = new_data_types
return data_types
|
tbarbette/clickwatcher
|
npf/types/dataset.py
|
Python
|
gpl-3.0
| 12,766
|
from django.db import models
# Create your models here.
class Event(models.Model):
url = models.URLField(null=True)
img_url = models.URLField(null=True)
title = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.title
|
kermit666/posterwall
|
posterwall/apps/events/models.py
|
Python
|
agpl-3.0
| 294
|
# -*- coding: utf-8 -*-
""""
Folium Colormap Module
----------------------
"""
import folium.colormap as cm
def test_simple_step():
step = cm.StepColormap(['green', 'yellow', 'red'],
vmin=3., vmax=10.,
index=[3, 4, 8, 10], caption='step')
step = cm.StepColormap(['r', 'y', 'g', 'c', 'b', 'm'])
step._repr_html_()
def test_simple_linear():
linear = cm.LinearColormap(['green', 'yellow', 'red'], vmin=3., vmax=10.)
linear = cm.LinearColormap(['red', 'orange', 'yellow', 'green'],
index=[0, 0.1, 0.9, 1.])
linear._repr_html_()
def test_linear_to_step():
some_list = [30.6, 50, 51, 52, 53, 54, 55, 60, 70, 100]
lc = cm.linear.YlOrRd
lc.to_step(n=12)
lc.to_step(index=[0, 2, 4, 6, 8, 10])
lc.to_step(data=some_list, n=12)
lc.to_step(data=some_list, n=12, method='linear')
lc.to_step(data=some_list, n=12, method='log')
lc.to_step(data=some_list, n=30, method='quantiles')
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1])
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1], round_method='int')
lc.to_step(data=some_list, quantiles=[0, 0.3, 0.7, 1],
round_method='log10')
def test_step_to_linear():
step = cm.StepColormap(['green', 'yellow', 'red'],
vmin=3., vmax=10.,
index=[3, 4, 8, 10], caption='step')
step.to_linear()
def test_linear_object():
cm.linear.OrRd._repr_html_()
cm.linear.PuBu.to_step(12)
cm.linear.YlGn.scale(3, 12)
cm.linear._repr_html_()
|
BibMartin/folium
|
tests/test_colormap.py
|
Python
|
mit
| 1,615
|
# -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import gettext
import os
import gtk
import gtk.glade
from twisted.python import util
from twisted.internet import defer
from zope.interface import implements
from flumotion.common import errors, log, messages
from flumotion.common.i18n import N_, gettexter
from flumotion.configure import configure
from flumotion.twisted import flavors
from flumotion.ui.fgtk import ProxyWidgetMapping
_ = gettext.gettext
__version__ = "$Rev$"
T_ = gettexter()
class BaseAdminGtkNode(log.Loggable):
"""
I am a base class for all GTK+-based Admin UI nodes.
I am a view on a set of properties for a component.
@ivar widget: the main widget representing this node
@type widget: L{gtk.Widget}
@ivar wtree: the widget tree representation for this node
"""
implements(flavors.IStateListener)
logCategory = "admingtk"
gladeFile = None ## Relative path of the glade file.
## e.g. "flumotion/ui.glade"
gettextDomain = configure.PACKAGE
def __init__(self, state, admin, title=None):
"""
@param state: state of component this is a UI node for
@type state: L{flumotion.common.planet.AdminComponentState}
@param admin: the admin model that interfaces with the manager for us
@type admin: L{flumotion.admin.admin.AdminModel}
@param title: the (translated) title to show this node with
@type title: str
"""
self._debugEnabled = False
self.state = state
self.admin = admin
self.statusbar = None
self.title = title
self.nodes = util.OrderedDict()
self.wtree = None # glade.XML instance (optionally set)
self.widget = None # the top level widget that will be visible
self.uiState = None # set if we are listening
self._pendingUIState = None # set if we are waiting for the ui
# to load
## Absolute path to the glade file.
## e.g. "/home/flu/.flumotion/cache/test/80...df7/flumotion/ui.glade
self._gladefilepath = None
def setDebugEnabled(self, enabled):
"""Set if debug should be enabled.
Not all pages are visible unless debugging is set to true
@param enabled: whether debug should be enabled
@type enabled: bool
"""
self._debugEnabled = enabled
def cleanup(self):
if self.uiState:
self.uiState.removeListener(self)
def status_push(self, str):
if self.statusbar:
return self.statusbar.push('notebook', str)
def status_pop(self, mid):
if self.statusbar:
return self.statusbar.remove('notebook', mid)
def callRemote(self, methodName, *args, **kwargs):
return self.admin.componentCallRemote(self.state, methodName,
*args, **kwargs)
# FIXME: do this automatically if there is a gladeFile class attr set
def loadGladeFile(self, gladeFile, domain=configure.PACKAGE):
"""
Returns: a deferred returning the widget tree from the glade file.
"""
def _getBundledFileCallback(result, gladeFile):
path = result
if not os.path.exists(path):
self.warning("Glade file %s not found in path %s" % (
gladeFile, path))
self.debug("loading widget tree from %s" % path)
old = gtk.glade.textdomain()
self.debug("Switching glade text domain from %s to %s" % (
old, domain))
self._gladefilepath = path
gtk.glade.textdomain(domain)
self.wtree = gtk.glade.XML(path,
typedict=ProxyWidgetMapping())
self.debug("Switching glade text domain back from %s to %s" % (
domain, old))
gtk.glade.textdomain(old)
return self.wtree
# The manager is always using / as a path separator, to avoid
# confusion, convert os.path.sep -> / here.
gladeFile = gladeFile.replace(os.path.sep, '/')
# FIXME: this does needless roundtrips; should instead be
# loading from the already-downloaded paths
self.debug("requesting bundle for glade file %s" % gladeFile)
d = self.admin.bundleLoader.getFile(gladeFile)
d.addCallback(_getBundledFileCallback, gladeFile)
return d
def getWidget(self, name):
if not self.wtree:
raise IndexError
widget = self.wtree.get_widget(name)
if not widget:
self.warning('Could not get widget %s' % name)
return widget
def createWidget(self, name):
"""
Create a new widget instance from the glade file.
Can be used to make multiple instances of the same widget.
"""
if not self._gladefilepath:
raise IndexError
wtree = gtk.glade.XML(self._gladefilepath, name,
typedict=ProxyWidgetMapping())
widget = wtree.get_widget(name)
if not widget:
self.warning('Could not create widget %s' % name)
return widget
def haveWidgetTree(self):
"""
I am called when the widget tree has been gotten from the glade
file. Responsible for setting self.widget.
Override me to act on it.
"""
pass
def gotUIState(self, state):
if self.widget:
self.setUIState(state)
else:
self._pendingUIState = state
def setUIState(self, state):
"""
Called by the BaseAdminGtk when it gets the UI state and the GUI
is ready. Chain up if you provide your own implementation.
"""
self.uiState = state
state.addListener(self, set_=self.stateSet, append=self.stateAppend,
remove=self.stateRemove, setitem=self.stateSetitem,
delitem=self.stateDelitem)
def stateSet(self, state, key, value):
"Override me"
pass
def stateAppend(self, state, key, value):
"Override me"
pass
def stateRemove(self, state, key, value):
"Override me"
pass
def stateSetitem(self, state, key, subkey, value):
"Override me"
pass
def stateDelitem(self, state, key, subkey, value):
"Override me"
pass
def render(self):
"""
Render the GTK+ admin view for this component.
Returns: a deferred returning the main widget for embedding
"""
self.debug('BaseAdminGtkNode.render() for %s' % self.title)
# clear up previous error messages
allmessages = self.state.get('messages', [])
for message in allmessages:
# since we can have multiple nodes, only remove the one from
# ours; this assumes each node's title is unique for a component
if message.id == 'render-%s' % self.title:
self.debug('Removing previous messages %r' % message)
self.state.observe_remove('messages', message)
def error(debug):
# add an error message to the component and return
# an error label, given a debug string
self.warning("error rendering component UI; debug %s", debug)
m = messages.Error(T_(N_(
"Internal error in component UI's '%s' tab. "
"Please file a bug against the component."), self.title),
debug=debug, mid="render-%s" % self.title)
self.addMessage(m)
label = gtk.Label(_("Internal error.\nSee component error "
"message\nfor more details."))
# if we don't set this error as our label, we will raise
# a TypeError below and obscure this more meaningful error
self.widget = label
return label
def loadGladeFile():
if not self.gladeFile:
return defer.succeed(None)
def haveWtree(wtree):
self.wtree = wtree
self.debug('render: calling haveWidgetTree')
try:
self.haveWidgetTree()
except Exception, e:
return error(log.getExceptionMessage(e))
self.debug('render: loading glade file %s in text domain %s',
self.gladeFile, self.gettextDomain)
d = self.loadGladeFile(self.gladeFile, self.gettextDomain)
d.addCallback(haveWtree)
return d
def loadGladeFileErrback(failure):
if failure.check(RuntimeError):
return error(
'Could not load glade file %s.' % self.gladeFile)
if failure.check(errors.NoBundleError):
return error(
'No bundle found containing %s.' % self.gladeFile)
return failure
def renderFinishedCallback(_):
if not self.widget:
self.debug('render: no self.widget, failing')
raise TypeError(
'%r.haveWidgetTree should have set self.widget' %
self.__class__)
if self._pendingUIState:
self.debug('render: calling setUIState on the node')
self.setUIState(self._pendingUIState)
self.debug('renderFinished: returning widget %s', self.widget)
return self.widget
def renderFinishedErrback(failure):
return error(log.getFailureMessage(failure))
d = loadGladeFile()
d.addErrback(loadGladeFileErrback)
d.addCallback(renderFinishedCallback)
d.addErrback(renderFinishedErrback)
return d
def addMessage(self, message):
"""
Add a message to the component.
Since this is called in a component view and only relevant to the
component view, the message only exists in the view, and is not
replicated to the manager state.
The message will be displayed in the usual message view.
@type message: L{flumotion.common.messages.Message}
"""
self.state.observe_append('messages', message)
|
timvideos/flumotion
|
flumotion/component/base/baseadminnode.py
|
Python
|
lgpl-2.1
| 10,955
|
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
from numpy.testing import assert_raises
from mne import io, read_events, pick_types
from mne.utils import requires_scipy_version, run_tests_if_main
from mne.viz.utils import _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def _get_raw():
raw = io.Raw(raw_fname, preload=True)
raw.pick_channels(raw.ch_names[:9])
return raw
def _get_events():
return read_events(event_name)
def test_plot_raw():
"""Test plotting of raw data
"""
import matplotlib.pyplot as plt
raw = _get_raw()
events = _get_events()
plt.close('all') # ensure all are closed
with warnings.catch_warnings(record=True):
fig = raw.plot(events=events, show_options=True)
# test mouse clicks
x = fig.get_axes()[0].lines[1].get_xdata().mean()
y = fig.get_axes()[0].lines[1].get_ydata().mean()
data_ax = fig.get_axes()[0]
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad channel
_fake_click(fig, data_ax, [x, y], xform='data') # unmark a bad channel
_fake_click(fig, data_ax, [0.5, 0.999]) # click elsewhere in 1st axes
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
_fake_click(fig, fig.get_axes()[1], [0.5, 0.5]) # change time
_fake_click(fig, fig.get_axes()[2], [0.5, 0.5]) # change channels
_fake_click(fig, fig.get_axes()[3], [0.5, 0.5]) # open SSP window
fig.canvas.button_press_event(1, 1, 1) # outside any axes
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
# sadly these fail when no renderer is used (i.e., when using Agg):
# ssp_fig = set(plt.get_fignums()) - set([fig.number])
# assert_equal(len(ssp_fig), 1)
# ssp_fig = plt.figure(list(ssp_fig)[0])
# ax = ssp_fig.get_axes()[0] # only one axis is used
# t = [c for c in ax.get_children() if isinstance(c,
# matplotlib.text.Text)]
# pos = np.array(t[0].get_position()) + 0.01
# _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data') # off
# _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data') # on
# test keypresses
fig.canvas.key_press_event('escape')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('right')
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('end')
fig.canvas.key_press_event('?')
fig.canvas.key_press_event('f11')
fig.canvas.key_press_event('escape')
# Color setting
assert_raises(KeyError, raw.plot, event_color={0: 'r'})
assert_raises(TypeError, raw.plot, event_color={'foo': 'r'})
fig = raw.plot(events=events, event_color={-1: 'r', 998: 'b'})
plt.close('all')
@requires_scipy_version('0.10')
def test_plot_raw_filtered():
"""Test filtering of raw plots
"""
raw = _get_raw()
assert_raises(ValueError, raw.plot, lowpass=raw.info['sfreq'] / 2.)
assert_raises(ValueError, raw.plot, highpass=0)
assert_raises(ValueError, raw.plot, lowpass=1, highpass=1)
assert_raises(ValueError, raw.plot, lowpass=1, filtorder=0)
assert_raises(ValueError, raw.plot, clipping='foo')
raw.plot(lowpass=1, clipping='transparent')
raw.plot(highpass=1, clipping='clamp')
raw.plot(highpass=1, lowpass=2)
@requires_scipy_version('0.12')
def test_plot_raw_psd():
"""Test plotting of raw psds
"""
import matplotlib.pyplot as plt
raw = _get_raw()
# normal mode
raw.plot_psd(tmax=2.0)
# specific mode
picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
raw.plot_psd(picks=picks, area_mode='range')
ax = plt.axes()
# if ax is supplied, picks must be, too:
assert_raises(ValueError, raw.plot_psd, ax=ax)
raw.plot_psd(picks=picks, ax=ax)
plt.close('all')
run_tests_if_main()
|
trachelr/mne-python
|
mne/viz/tests/test_raw.py
|
Python
|
bsd-3-clause
| 4,667
|
import click
import os
import sys
from .linter import lint_css
from .inliner import inline_css
@click.command()
@click.argument('css_file', required=True, type=click.File('r'))
def lint(css_file):
"""Lints email css and prints issues per client."""
css = css_file.read()
issues = lint_css(css) or []
for issue in issues:
click.echo(issue)
@click.command()
@click.option('--allow_invalid_css', '-i', is_flag=True, help="Allows css that doesn't lint.")
@click.option('--remove_classes', '-c', is_flag=True, help="Strip all class attributes after inlining")
@click.argument('html_file', required=True, type=click.File('r'))
@click.argument('css_file', required=True, type=click.File('r'))
def inline(html_file, css_file, allow_invalid_css, remove_classes):
"""Inlines css into html to make it safe for email."""
files = {}
for extension, f in [("html", html_file), ("css", css_file)]:
files[extension] = f.read()
html = inline_css(*list(files.values()),
strip_unsupported_css=(not allow_invalid_css),
remove_classes=remove_classes)
click.echo(html)
|
Parsely/emailipy
|
emailipy/cli.py
|
Python
|
apache-2.0
| 1,149
|
from django.utils.timezone import utc as timezone_utc
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.timestamp import floor_to_hour, floor_to_day, ceiling_to_hour, \
ceiling_to_day, timestamp_to_datetime, datetime_to_timestamp, \
TimezoneNotUTCException, convert_to_UTC
from datetime import datetime, timedelta
from dateutil import parser
import pytz
class TestTimestamp(ZulipTestCase):
def test_datetime_and_timestamp_conversions(self) -> None:
timestamp = 1483228800
for dt in [
parser.parse('2017-01-01 00:00:00.123 UTC'),
parser.parse('2017-01-01 00:00:00.123').replace(tzinfo=timezone_utc),
parser.parse('2017-01-01 00:00:00.123').replace(tzinfo=pytz.utc)]:
self.assertEqual(timestamp_to_datetime(timestamp), dt-timedelta(microseconds=123000))
self.assertEqual(datetime_to_timestamp(dt), timestamp)
for dt in [
parser.parse('2017-01-01 00:00:00.123+01:00'),
parser.parse('2017-01-01 00:00:00.123')]:
with self.assertRaises(TimezoneNotUTCException):
datetime_to_timestamp(dt)
def test_convert_to_UTC(self) -> None:
utc_datetime = parser.parse('2017-01-01 00:00:00.123 UTC')
for dt in [
parser.parse('2017-01-01 00:00:00.123').replace(tzinfo=timezone_utc),
parser.parse('2017-01-01 00:00:00.123'),
parser.parse('2017-01-01 05:00:00.123+05')]:
self.assertEqual(convert_to_UTC(dt), utc_datetime)
def test_enforce_UTC(self) -> None:
non_utc_datetime = parser.parse('2017-01-01 00:00:00.123')
for function in [floor_to_hour, floor_to_day, ceiling_to_hour, ceiling_to_hour]:
with self.assertRaises(TimezoneNotUTCException):
function(non_utc_datetime)
|
jackrzhang/zulip
|
zerver/tests/test_timestamp.py
|
Python
|
apache-2.0
| 1,871
|
import pygame
import os
from graphics import *
# class to define the easy difficulty map
class Tile(pygame.sprite.Sprite):
def __init__(self, gridX, gridY, x, y):
pygame.sprite.Sprite.__init__(self)
#self.tiles = []
self.image = grassTile
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.tileNum = 0 #start as grass tile
self.gridX = gridX
self.gridY = gridY
self.isEmpty = True
def update(self, tileNum):
self.tileNum = tileNum
return tileNum
def placeTower(self):
self.isEmpty = False
class EasyMap:
def __init__(self):
self.square = 40 # 1 side of the square (square x square dimensions)
self.margin = 1
self.xLoc = 50 #x location to start drawing map at
self.yLoc = 135 # y location to start drawing map at
self.gridX = 10 # how many tiles to draw horizontally
self.gridY = 15 # how many tiles to draw vertically
self.grid = [] # map grid
self.grassTiles = []
self.startX = 50 # starting x for enemies
self.startY = 225 # starting y for enemies
self.grid = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,0,1,1,1,0,0,0],
[1,1,1,0,1,0,0,1,0,1,0,1,0,0,0],
[0,0,1,0,1,0,0,1,0,1,0,1,0,0,0],
[0,0,1,0,1,0,0,1,0,1,0,1,0,0,0],
[0,0,1,0,1,0,0,1,0,1,0,1,0,0,0],
[0,0,1,0,1,0,0,1,0,1,0,1,0,0,0],
[0,0,1,0,1,0,0,1,0,1,0,1,1,1,1],
[0,0,1,1,1,0,0,1,1,1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
#self.grassTile = pygame.image.load("C:\\Python34\\CrystalDefense1.0\\grassTile.png")
#self.roadTile = pygame.image.load("C:\\Python34\\CrystalDefense1.0\\roadTile.png")
def drawMap(self, screen):
i = 0
x = self.xLoc
y = self.yLoc
currRow = 0
currCol = 0
for i in range(0, self.gridX):
for j in range(0, self.gridY):
#pygame.draw.rect(screen, (255,255,255), (x, y, gridWidth, gridHeight))
if self.grid[i][j] == 0:
screen.blit(grassTile, (x,y,self.square,self.square))
tile = Tile(i, j, x, y)
self.grassTiles.append(tile)
#grassTile.rect = grassTile.get_rect()
#self.grassTiles.append(tile.rect)
if self.grid[i][j] == 1:
screen.blit(roadTile, (x,y,self.square,self.square))
# Level 1 Towers
if self.grid[i][j] == 2:
screen.blit(redTower1, (x,y,self.square,self.square))
if self.grid[i][j] == 3:
screen.blit(blueTower1, (x,y,self.square,self.square))
if self.grid[i][j] == 4:
screen.blit(greenTower1, (x,y,self.square,self.square))
# Level 2 Towers
if self.grid[i][j] == 5:
screen.blit(redTower2, (x,y,self.square,self.square))
if self.grid[i][j] == 6:
screen.blit(blueTower2, (x,y,self.square,self.square))
if self.grid[i][j] == 7:
screen.blit(greenTower2, (x,y,self.square,self.square))
# Level 3 Towers
if self.grid[i][j] == 8:
screen.blit(redTower3, (x,y,self.square,self.square))
if self.grid[i][j] == 9:
screen.blit(blueTower3, (x,y,self.square,self.square))
if self.grid[i][j] == 10:
screen.blit(greenTower3, (x,y,self.square,self.square))
x += self.square + self.margin
#pygame.draw.rect(screen, (255,255,255),(gridWidth, gridHeight), (xLoc, yLoc))
#pygame.draw.rect(screen, (255,255,255),(gridWidth, gridWidth, gridHeight, gridHeight))
x = self.xLoc
y += self.square + self.margin
# Set the turning points for enemies to follow
# if modifying map (self.grid), these will need to be modified as well
def directions(self, sprite):
enemy = sprite
x = enemy.rect.center[0]
y = enemy.rect.center[1]
#Turn 1
if enemy.turn == 1 and x == 152:# and y == 222:
enemy.turn += 1
enemy.xVel = 0
enemy.yVel = 1
#Turn 2
if enemy.turn == 2 and y == 475:#if x == 142 and y == 470:
enemy.turn += 1
enemy.xVel = 1
enemy.yVel = 0
#Turn 3
if enemy.turn == 3 and x == 234:#x == 226 and y == 470:
enemy.turn += 1
enemy.xVel = 0
enemy.yVel = -1
#Turn 4
if enemy.turn == 4 and y == 188:# x == 226 and y == 180:
enemy.turn += 1
enemy.xVel = 1
enemy.yVel = 0
#Turn 5
if enemy.turn == 5 and x == 358:#x == 346 and y == 180:
enemy.turn += 1
enemy.xVel = 0
enemy.yVel = 1
#Turn 6
if enemy.turn == 6 and y == 475:#x == 346 and y == 470:
enemy.turn += 1
enemy.xVel = 1
enemy.yVel = 0
#Turn 7
if enemy.turn == 7 and x == 439:#x == 429 and y == 470:
enemy.turn += 1
enemy.xVel = 0
enemy.yVel = -1
#Turn 8
if enemy.turn == 8 and y == 187:#x == 429 and y == 180:
enemy.turn += 1
enemy.xVel = 1
enemy.yVel = 0
#Turn 9
if enemy.turn == 9 and x == 521:#x == 510 and y == 180:
enemy.turn += 1
enemy.xVel = 0
enemy.yVel = 1
#Turn 10
if enemy.turn == 10 and y == 437:#x == 510 and y == 430:
enemy.turn += 1
enemy.xVel = 1
enemy.yVel = 0
if enemy.turn == 11 and x == 650:#x == 644 and y == 430:
#enemy.turn += 1
enemy.xVel = 0
enemy.dead = True
enemy.end = True
def swapTile(self, i, j, newTileNum):
swappedTile = False
if newTileNum > 0:
if newTileNum == self.grid[i][j]: #if this tile already exists
return swappedTile
if self.grid[i][j] >= 2: #if ANY tower is on this spot... don't swap it
return swappedTile
self.grid[i][j] = newTileNum
swappedTile = True
else: # we sold a tower, graphically remove it from tile
self.grid[i][j] = 0
#print("swapped tile!")
return swappedTile
|
taytam/crystaldefense
|
easymap.py
|
Python
|
mit
| 5,333
|
from guizero import App, Window, PushButton
from guizero.utilities import GUIZeroImage
from tkinter import PhotoImage
app = App(title="Main window")
app.icon = "guizero.gif"
window = Window(app, title="2nd window", visible=False)
open_window_button = PushButton(app, text="Open window", command=window.show)
close_window_button = PushButton(window, text="Close", command=window.hide)
app.display()
|
lawsie/guizero
|
examples/app_icon.py
|
Python
|
bsd-3-clause
| 405
|
# -*- coding: utf-8 -*-
# Copyright 2020-2022 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://www.furaffinity.net/"""
from .common import Extractor, Message
from .. import text, util
BASE_PATTERN = r"(?:https?://)?(?:www\.|sfw\.)?furaffinity\.net"
class FuraffinityExtractor(Extractor):
"""Base class for furaffinity extractors"""
category = "furaffinity"
directory_fmt = ("{category}", "{user!l}")
filename_fmt = "{id}{title:? //}.{extension}"
archive_fmt = "{id}"
cookiedomain = ".furaffinity.net"
root = "https://www.furaffinity.net"
_warning = True
def __init__(self, match):
Extractor.__init__(self, match)
self.user = match.group(1)
self.offset = 0
if self.config("descriptions") == "html":
self._process_description = str.strip
layout = self.config("layout")
if layout and layout != "auto":
self._new_layout = False if layout == "old" else True
else:
self._new_layout = None
def items(self):
if self._warning:
if not self._check_cookies(("a", "b")):
self.log.warning("no 'a' and 'b' session cookies set")
FuraffinityExtractor._warning = False
external = self.config("external", False)
metadata = self.metadata()
for post_id in util.advance(self.posts(), self.offset):
post = self._parse_post(post_id)
if post:
if metadata:
post.update(metadata)
yield Message.Directory, post
yield Message.Url, post["url"], post
if external:
for url in text.extract_iter(
post["_description"], 'href="http', '"'):
yield Message.Queue, "http" + url, post
def metadata(self):
return None
def skip(self, num):
self.offset += num
return num
def _parse_post(self, post_id):
url = "{}/view/{}/".format(self.root, post_id)
extr = text.extract_from(self.request(url).text)
if self._new_layout is None:
self._new_layout = ("http-equiv=" not in extr("<meta ", ">"))
path = extr('href="//d', '"')
if not path:
self.log.warning(
"Unable to download post %s (\"%s\")",
post_id, text.remove_html(
extr('System Message', '</section>') or
extr('System Message', '</table>')
)
)
return None
pi = text.parse_int
rh = text.remove_html
data = text.nameext_from_url(path, {
"id" : pi(post_id),
"url": "https://d" + path,
})
if self._new_layout:
data["tags"] = text.split_html(extr(
'class="tags-row">', '</section>'))
data["title"] = text.unescape(extr("<h2><p>", "</p></h2>"))
data["artist"] = extr("<strong>", "<")
data["_description"] = extr('class="section-body">', '</div>')
data["views"] = pi(rh(extr('class="views">', '</span>')))
data["favorites"] = pi(rh(extr('class="favorites">', '</span>')))
data["comments"] = pi(rh(extr('class="comments">', '</span>')))
data["rating"] = rh(extr('class="rating">', '</span>'))
data["fa_category"] = rh(extr('>Category</strong>', '</span>'))
data["theme"] = rh(extr('>', '<'))
data["species"] = rh(extr('>Species</strong>', '</div>'))
data["gender"] = rh(extr('>Gender</strong>', '</div>'))
data["width"] = pi(extr("<span>", "x"))
data["height"] = pi(extr("", "p"))
else:
# old site layout
data["title"] = text.unescape(extr("<h2>", "</h2>"))
data["artist"] = extr(">", "<")
data["fa_category"] = extr("<b>Category:</b>", "<").strip()
data["theme"] = extr("<b>Theme:</b>", "<").strip()
data["species"] = extr("<b>Species:</b>", "<").strip()
data["gender"] = extr("<b>Gender:</b>", "<").strip()
data["favorites"] = pi(extr("<b>Favorites:</b>", "<"))
data["comments"] = pi(extr("<b>Comments:</b>", "<"))
data["views"] = pi(extr("<b>Views:</b>", "<"))
data["width"] = pi(extr("<b>Resolution:</b>", "x"))
data["height"] = pi(extr("", "<"))
data["tags"] = text.split_html(extr(
'id="keywords">', '</div>'))[::2]
data["rating"] = extr('<img alt="', ' ')
data["_description"] = extr("</table>", "</table>")
data["artist_url"] = data["artist"].replace("_", "").lower()
data["user"] = self.user or data["artist_url"]
data["date"] = text.parse_timestamp(data["filename"].partition(".")[0])
data["description"] = self._process_description(data["_description"])
return data
@staticmethod
def _process_description(description):
return text.unescape(text.remove_html(description, "", ""))
def _pagination(self, path):
num = 1
while True:
url = "{}/{}/{}/{}/".format(
self.root, path, self.user, num)
page = self.request(url).text
post_id = None
for post_id in text.extract_iter(page, 'id="sid-', '"'):
yield post_id
if not post_id:
return
num += 1
def _pagination_favorites(self):
path = "/favorites/{}/".format(self.user)
while path:
page = self.request(self.root + path).text
yield from text.extract_iter(page, 'id="sid-', '"')
path = text.extract(page, 'right" href="', '"')[0]
def _pagination_search(self, query):
url = self.root + "/search/"
data = {
"page" : 0,
"next_page" : "Next",
"order-by" : "relevancy",
"order-direction": "desc",
"range" : "all",
"rating-general" : "on",
"rating-mature" : "on",
"rating-adult" : "on",
"type-art" : "on",
"type-music" : "on",
"type-flash" : "on",
"type-story" : "on",
"type-photo" : "on",
"type-poetry" : "on",
"mode" : "extended",
}
data.update(query)
if "page" in query:
data["page"] = text.parse_int(query["page"])
while True:
page = self.request(url, method="POST", data=data).text
post_id = None
for post_id in text.extract_iter(page, 'id="sid-', '"'):
yield post_id
if not post_id:
return
data["page"] += 1
class FuraffinityGalleryExtractor(FuraffinityExtractor):
"""Extractor for a furaffinity user's gallery"""
subcategory = "gallery"
pattern = BASE_PATTERN + r"/gallery/([^/?#]+)"
test = ("https://www.furaffinity.net/gallery/mirlinthloth/", {
"pattern": r"https://d\d?\.f(uraffinity|acdn)\.net"
r"/art/mirlinthloth/\d+/\d+.\w+\.\w+",
"range": "45-50",
"count": 6,
})
def posts(self):
return self._pagination("gallery")
class FuraffinityScrapsExtractor(FuraffinityExtractor):
"""Extractor for a furaffinity user's scraps"""
subcategory = "scraps"
directory_fmt = ("{category}", "{user!l}", "Scraps")
pattern = BASE_PATTERN + r"/scraps/([^/?#]+)"
test = ("https://www.furaffinity.net/scraps/mirlinthloth/", {
"pattern": r"https://d\d?\.f(uraffinity|acdn)\.net"
r"/art/[^/]+(/stories)?/\d+/\d+.\w+.",
"count": ">= 3",
})
def posts(self):
return self._pagination("scraps")
class FuraffinityFavoriteExtractor(FuraffinityExtractor):
"""Extractor for a furaffinity user's favorites"""
subcategory = "favorite"
directory_fmt = ("{category}", "{user!l}", "Favorites")
pattern = BASE_PATTERN + r"/favorites/([^/?#]+)"
test = ("https://www.furaffinity.net/favorites/mirlinthloth/", {
"pattern": r"https://d\d?\.f(uraffinity|acdn)\.net"
r"/art/[^/]+/\d+/\d+.\w+\.\w+",
"range": "45-50",
"count": 6,
})
def posts(self):
return self._pagination_favorites()
class FuraffinitySearchExtractor(FuraffinityExtractor):
"""Extractor for furaffinity search results"""
subcategory = "search"
directory_fmt = ("{category}", "Search", "{search}")
pattern = BASE_PATTERN + r"/search(?:/([^/?#]+))?/?[?&]([^#]+)"
test = (
("https://www.furaffinity.net/search/?q=cute", {
"pattern": r"https://d\d?\.f(uraffinity|acdn)\.net"
r"/art/[^/]+/\d+/\d+.\w+\.\w+",
"range": "45-50",
"count": 6,
}),
("https://www.furaffinity.net/search/cute&rating-general=0", {
"range": "1",
"count": 1,
}),
)
def __init__(self, match):
FuraffinityExtractor.__init__(self, match)
self.query = text.parse_query(match.group(2))
if self.user and "q" not in self.query:
self.query["q"] = text.unquote(self.user)
def metadata(self):
return {"search": self.query.get("q")}
def posts(self):
return self._pagination_search(self.query)
class FuraffinityPostExtractor(FuraffinityExtractor):
"""Extractor for individual posts on furaffinity"""
subcategory = "post"
pattern = BASE_PATTERN + r"/(?:view|full)/(\d+)"
test = (
("https://www.furaffinity.net/view/21835115/", {
"pattern": r"https://d\d*\.f(uraffinity|acdn)\.net/(download/)?art"
r"/mirlinthloth/music/1488278723/1480267446.mirlinthlot"
r"h_dj_fennmink_-_bude_s_4_ever\.mp3",
"keyword": {
"artist" : "mirlinthloth",
"artist_url" : "mirlinthloth",
"date" : "dt:2016-11-27 17:24:06",
"description": "A Song made playing the game Cosmic DJ.",
"extension" : "mp3",
"filename" : r"re:\d+\.\w+_dj_fennmink_-_bude_s_4_ever",
"id" : 21835115,
"tags" : list,
"title" : "Bude's 4 Ever",
"url" : r"re:https://d\d?\.f(uraffinity|acdn)\.net/art",
"user" : "mirlinthloth",
"views" : int,
"favorites" : int,
"comments" : int,
"rating" : "General",
"fa_category": "Music",
"theme" : "All",
"species" : "Unspecified / Any",
"gender" : "Any",
"width" : 120,
"height" : 120,
},
}),
# 'external' option (#1492)
("https://www.furaffinity.net/view/42166511/", {
"options": (("external", True),),
"pattern": r"https://d\d*\.f(uraffinity|acdn)\.net/"
r"|http://www\.postybirb\.com",
"count": 2,
}),
# no tags (#2277)
("https://www.furaffinity.net/view/45331225/", {
"keyword": {
"artist": "Kota_Remminders",
"artist_url": "kotaremminders",
"date": "dt:2022-01-03 17:49:33",
"fa_category": "Adoptables",
"filename": "1641232173.kotaremminders_chidopts1",
"gender": "Any",
"height": 905,
"id": 45331225,
"rating": "General",
"species": "Unspecified / Any",
"tags": [],
"theme": "All",
"title": "REMINDER",
"width": 1280,
},
}),
("https://furaffinity.net/view/21835115/"),
("https://sfw.furaffinity.net/view/21835115/"),
("https://www.furaffinity.net/full/21835115/"),
)
def posts(self):
post_id = self.user
self.user = None
return (post_id,)
class FuraffinityUserExtractor(FuraffinityExtractor):
"""Extractor for furaffinity user profiles"""
subcategory = "user"
cookiedomain = None
pattern = BASE_PATTERN + r"/user/([^/?#]+)"
test = (
("https://www.furaffinity.net/user/mirlinthloth/", {
"pattern": r"/gallery/mirlinthloth/$",
}),
("https://www.furaffinity.net/user/mirlinthloth/", {
"options": (("include", "all"),),
"pattern": r"/(gallery|scraps|favorites)/mirlinthloth/$",
"count": 3,
}),
)
def items(self):
base = "{}/{{}}/{}/".format(self.root, self.user)
return self._dispatch_extractors((
(FuraffinityGalleryExtractor , base.format("gallery")),
(FuraffinityScrapsExtractor , base.format("scraps")),
(FuraffinityFavoriteExtractor, base.format("favorites")),
), ("gallery",))
class FuraffinityFollowingExtractor(FuraffinityExtractor):
"""Extractor for a furaffinity user's watched users"""
subcategory = "following"
pattern = BASE_PATTERN + "/watchlist/by/([^/?#]+)"
test = ("https://www.furaffinity.net/watchlist/by/mirlinthloth/", {
"pattern": FuraffinityUserExtractor.pattern,
"range": "176-225",
"count": 50,
})
def items(self):
url = "{}/watchlist/by/{}/".format(self.root, self.user)
data = {"_extractor": FuraffinityUserExtractor}
while True:
page = self.request(url).text
for path in text.extract_iter(page, '<a href="', '"'):
yield Message.Queue, self.root + path, data
path = text.rextract(page, 'action="', '"')[0]
if url.endswith(path):
return
url = self.root + path
|
mikf/gallery-dl
|
gallery_dl/extractor/furaffinity.py
|
Python
|
gpl-2.0
| 14,255
|
"""
This module contains the CharmmWriter class and associated methods,
which outputs a psf/pdb file with CHARMM names and parameters.
It does this by converting atom names to CHARMM names, writing
intermediate files as necessary to invoke the vmd psfgen plugin.
Author: Robin Betz
Copyright (C) 2015 Robin Betz
"""
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any # later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330
# Boston, MA 02111-1307, USA.
from __future__ import print_function
import os
import tempfile
from pkg_resources import resource_filename
from vmd import atomsel, evaltcl, molecule
from Dabble.param import CharmmMatcher
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CONSTANTS #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
_acids = ('ACE ALA ARG ASN ASP CYS CYX GLN GLU GLY HIE HIS HSP HSE '
'HSD ILE LEU LYS MET NMA PHE PRO SER THR TRP TYR VAL')
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CLASSES #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class CharmmWriter(object):
"""
An object that handles all the conversions to a psf file
by interfacing with psfgen.
Writes a pdb/psf file pair from the current molecule using the
CHARMM36 topology and atom names/types. Interfaces with psfgen by
dynamically generating the .tcl file that psfgen takes as input.
Prompts the user for additional topology files and helps with
matching atom names that cannot be automatically translated to the
charmm naming conventions.
Attributes:
file (file handle): Temporary file to write TCL script that invokes
psfgen
tmp_dir (string): Directory where temporary files are stored
psf_name (str): Prefix for the pdb/psf output files, extension will
be appended
molid (str,optional): the VMD molecule id to write. Defaults to 0.
lipid_sel (str,optional): atomselect string describing what should count
as "lipid". Defaults to "lipid"
topologies (list of str): Topology files that were used in creating the
psf
prompt_topos (bool): Whether to ask for more topology files
matcher (CharmmMatcher): Molecular graph matcher object
"""
#==========================================================================
def __init__(self, tmp_dir, molid, lipid_sel="lipid", **kwargs):
# Create TCL temp file and directory
self.tmp_dir = tmp_dir
self.filename = tempfile.mkstemp(suffix='.tcl', prefix='dabble_psfgen',
dir=self.tmp_dir)[1]
self.lipid_sel = lipid_sel
self.file = open(self.filename, 'w')
self.molid = molid
self.psf_name = ""
# Default parameter sets
if kwargs.get("override_defaults", False):
self.topologies = []
else:
self.topologies = [
"top_all36_caps.rtf",
"top_water_ions.rtf",
"top_all36_cgenff.rtf",
"top_all36_prot.rtf",
"top_all36_lipid.rtf",
"top_all36_carb.rtf",
"top_all36_na.rtf",
"toppar_all36_prot_na_combined.str",
"toppar_all36_prot_fluoro_alkanes.str",
]
for i, top in enumerate(self.topologies):
self.topologies[i] = resource_filename(__name__,
os.path.join("charmm_parameters",
top))
if kwargs.get("extra_topos"):
self.topologies.extend(kwargs.get("extra_topos"))
# Initialize graph matcher with topologies we know about
self.matcher = CharmmMatcher(self.topologies)
#=========================================================================
def write(self, psf_name):
"""
Writes the pdb/psf file.
Args:
psf_name (str): Prefix for the pdb/psf output files, extension
will be appended
Returns:
topologies (list of str): Topology files that were used in creating
the psf
"""
# Clean up all temp files from previous runs if present
# An earlier check will exit if it's not okay to overwrite here
self.psf_name = psf_name
try:
os.remove('%s.pdb'% self.psf_name)
os.remove('%s.psf'% self.psf_name)
except OSError:
pass
# Finds the psfgen package and sets the output file name
string = '''
set dir [file join $env(VMDDIR) plugins [vmdinfo arch] tcl psfgen1.6]
package ifneeded psfgen 1.6 [list load [file join $dir libpsfgen.so]]
package require psfgen
set output "%s"
resetpsf
''' % self.psf_name
self.file.write(string)
# Put our molecule on top
old_top = molecule.get_top()
molecule.set_top(self.molid)
# Print out topology files
self.file.write('\n')
print("Using the following topologies:")
for top in self.topologies:
print(" - %s" % top.split("/")[-1])
self.file.write(' topology %s\n' % top)
# Mark all atoms as unsaved with the user field
atomsel('all', molid=self.molid).set('user', 1.0)
check_atom_names(molid=self.molid)
# Now ions if present, changing the atom names
if len(atomsel('element Na Cl K', molid=self.molid)) > 0:
self._write_ion_blocks()
# Save water 10k molecules at a time
if len(atomsel('water', molid=self.molid)):
self._write_water_blocks()
# Now lipid
if len(atomsel(self.lipid_sel)):
self._write_lipid_blocks()
if not len(atomsel("resname %s" % _acids, molid=self.molid)):
print("\tDidn't find any protein.\n")
# Now handle the protein
# Save and reload the protein so residue looping is correct
prot_molid = self._renumber_protein_chains(molid=self.molid)
extpatches = set()
for frag in sorted(set(atomsel("resname %s" % _acids,
molid=prot_molid).get('fragment'))):
extpatches.update(self._write_protein_blocks(prot_molid, frag))
atomsel("same fragment as resname %s" % _acids,
molid=self.molid).set("user", 0.0)
# List all patches applied to the protein
print("Applying the following patches:\n")
print("\t%s" % "\t".join(extpatches))
self.file.write(''.join(extpatches))
self.file.write("\n")
# Regenerate angles and dihedrals after applying patches
# Angles must be regenerated FIRST!
# See http://www.ks.uiuc.edu/Research/namd/mailing_list/namd-l.2009-2010/4137.html
self.file.write("regenerate angles\nregenerate dihedrals\n")
# Check if there is anything else and let the user know about it
leftovers = atomsel('user 1.0', molid=self.molid)
for lig in set(leftovers.get('resname')):
residues = self._find_single_residue_names(resname=lig, molid=self.molid)
self._write_generic_block(residues)
# Write the output files and run
string = '''
writepsf x-plor cmap ${output}.psf
writepdb ${output}.pdb'''
self.file.write(string)
self.file.close()
evaltcl('play %s' % self.filename)
self._check_psf_output()
# Reset top molecule
molecule.set_top(old_top)
return self.topologies
#=========================================================================
# Private methods #
#=========================================================================
def _write_water_blocks(self):
"""
Writes a lot of temporary files with 10000 waters each, to bypass
psfgen being stupid with files containing more than 10000 of a residue.
"""
# Put current molecule on top to simplify atom selection language
old_top = molecule.get_top()
molecule.set_top(self.molid)
# Set consistent residue and atom names, crystal waters
# can be named HOH, etc
atomsel('water').set('resname', 'TIP3')
atomsel('resname TIP3').set('chain', 'W')
atomsel('resname TIP3 and element O').set('name', 'OH2')
# Dowser can name water hydrogens strangely
atomsel('resname TIP3 and name HW1').set('name', 'H1')
atomsel('resname TIP3 and name HW2').set('name', 'H2')
# Select all the waters. We'll use the user field to track which
# ones have been written
allw = atomsel('water and user 1.0')
print("Found %d water residues" % len(set(atomsel('water and user 1.0').get('residue'))))
# Find the problem waters with unordered indices
problems = []
for r in set(allw.get('residue')):
widx = atomsel('residue %s' % r).get("index")
if max(widx) - min(widx) != 2:
problems.append(r)
atomsel('residue %s' % r).set("user", 0.0) # get it out of allw
allw.update()
num_written = int(len(allw)/(9999*3))+1
print("Going to write %d files for %d water atoms"
% (num_written, len(allw)))
# Pull out and write 10k waters at a time if we have normal waters
if allw:
for i in range(num_written):
temp = tempfile.mkstemp(suffix='_%d.pdb' % i, prefix='psf_wat_',
dir=self.tmp_dir)[1]
residues = list(set(allw.get('residue')))[:9999]
batch = atomsel('residue %s' % ' '.join([str(x) for x in residues]))
try:
batch.set('resid', [k for k in range(1, int(len(batch)/3)+1)
for _ in range(3)])
except ValueError:
print("\nERROR! You have some waters missing hydrogens!\n"
"Found %d water residues, but %d water atoms. Check "
" your crystallographic waters in the input structure."
% (len(residues), len(batch)))
quit(1)
batch.set('user', 0.0)
batch.write('pdb', temp)
allw.update()
# Now write the problem waters
self._write_unorderedindex_waters(problems, self.molid)
string = '''
set waterfiles [glob -directory %s psf_wat_*.pdb]
set i 0
foreach watnam $waterfiles {
segment W${i} {
auto none
first none
last none
pdb $watnam
}
coordpdb $watnam W${i}
incr i
}
''' % self.tmp_dir
self.file.write(string)
molecule.set_top(old_top)
return num_written
#==========================================================================
def _write_unorderedindex_waters(self, residues, molid):
"""
Renumbers and sorts the specified waters manually. This is much less
efficient but is necessary in cases where atoms within a water molecule
are not sequential in index, preventing quick renaming with VMD.
Identify problem waters, then call this on them. It'll write its own
psf_wat_* file with just those waters, minimizing inefficiency.
Args:
residues (list of int): Problem water molecules
molid (int): VMD molecule ID to write
Returns:
(int) Number of waters written
"""
temp = tempfile.mkstemp(suffix='_indexed.pdb', prefix='psf_wat_',
dir=self.tmp_dir)[1]
fileh = open(temp, 'w')
idx = 1
for ridx, residue in enumerate(residues):
res = atomsel('residue %d' % residue, molid=molid)
for i in res.get('index'):
a = atomsel('index %d' % i, molid) # pylint: disable=invalid-name
entry = ('%-6s%5d %-5s%-4s%c%4d %8.3f%8.3f%8.3f%6.2f%6.2f'
' %-4s%2s\n' % ('ATOM', idx, a.get('name')[0],
a.get('resname')[0],
a.get('chain')[0],
ridx+1,
a.get('x')[0],
a.get('y')[0],
a.get('z')[0],
0.0, 0.0, a.get('segname')[0],
a.get('element')[0]))
idx += 1
fileh.write(entry)
fileh.write('END\n')
fileh.close()
return idx
#==========================================================================
def _write_lipid_blocks(self):
"""
Writes a temporary PDB file containing the lipids for later use by
psfgen. Renumbers the lipid residues because some can have **** instead
of an integer for resid in large systems, which will crash psfgen. Also
sets atom names for some common lipids (currently POPC)
Raises:
NotImplementedError if more than 10,000 lipids are present since it
doesn't support feeding multiple lipid blocks to psfgen currently
NotImplementedError if lipid other than POPC,POPE,POPG is found
"""
# Put current molecule on top to simplify atom selection
old_top = molecule.get_top()
molecule.set_top(self.molid)
# Collect lipid residues up
alll = atomsel('(%s) and user 1.0' % self.lipid_sel)
residues = list(set(alll.get('residue')))
residues.sort()
# Sanity check for < 10k lipids
if len(residues) >= 10000:
raise NotImplementedError("More than 10k lipids found")
# Rename lipid residues by resname
# This assumes all lipids with the same resname are the same
# If that's not the case, the system is really broken in some way
# for resname in set(alll.get('resname')):
# ressel = atomsel("(%s) and user 1.0 and resname '%s'"
# % (self.lipid_sel, resname))
#
# # Get naming dictionary for one representative residue
# repsel = atomsel('residue %s' % ressel.get('residue')[0])
# (newname, atomnames) = self.matcher.get_names(sel)
#
# # Apply naming dictionary to all of these residues
# for idx, name in atomnames.items():
# oldname = atomsel('index %s' % idx).get('name')
# if oldname != name:
# Loop through all residues and renumber and correctly name them
counter = 1
for res in residues:
# Renumber residue
sel = atomsel('residue %s' % res)
sel.set('resid', counter)
counter = counter + 1
# Rename residue
# (newname, atomnames) = self.matcher.get_names(sel,
# print_warning=False)
#
# for idx, name in atomnames.items():
# atom = atomsel('index %s' % idx)
# if atom.get('name')[0] != name:
# print("Renaming %s:%s: %s -> %s" % (sel.get('resname')[0],
# sel.get('resid')[0],
# atom.get('name')[0],
# name))
# atom.set('name', name)
# sel.set('resname', newname)
# Write temporary lipid pdb
temp = tempfile.mkstemp(suffix='.pdb', prefix='psf_lipid_',
dir=self.tmp_dir)[1]
alll.set('user', 0.0)
alll.write('pdb', temp)
# Write to file
string = '''
set lipidfile %s
set mid [mol new $lipidfile]
segment L {
first none
last none
pdb $lipidfile
}
coordpdb $lipidfile L
mol delete $mid
''' % temp
self.file.write(string)
# Put old top back
molecule.set_top(old_top)
#==========================================================================
def _write_ion_blocks(self):
"""
Writes a PDB file containing correctly named ions for use by
psfgen, and instructs psfgen to use it in TCL code.
"""
# Put our molecule on top to simplify atom selection language
old_top = molecule.get_top()
molecule.set_top(self.molid)
# Get ion resids that aren't associated w other molecules
# because some ligands have Na, Cl, K
total = atomsel('element Na Cl K')
not_ions = atomsel("(same fragment as element Na Cl K) and (not index %s)"
% " ".join([str(s) for s in set(total.get('index'))]))
ions = set(total.get('residue')) - set(not_ions.get('residue'))
if not len(ions):
return
ionstr = "residue " + " ".join([str(s) for s in ions])
# Fix the names
atomsel('%s and name NA' % ionstr).set('name', 'SOD')
atomsel('%s and name CL' % ionstr).set('name', 'CLA')
atomsel('%s and name K' % ionstr).set('name', 'POT')
atomsel('%s and name NA' % ionstr).set('resname', 'SOD')
atomsel('%s and name CL' % ionstr).set('resname', 'CLA')
atomsel('%s and name K' % ionstr).set('resname', 'POT')
# Renumber the residues since some may be above 10k
residues = atomsel('name SOD CLA POT').get('residue')
batch = atomsel('residue %s' % ' '.join([str(s) for s in set(residues)]))
batch.set('resid', [k for k in range(1, len(batch)+1)])
# Save the temporary ions file
temp = tempfile.mkstemp(suffix='.pdb', prefix='psf_ions_',
dir=self.tmp_dir)[1]
atomsel('name SOD CLA POT').set('user', 0.0) # mark as saved
atomsel('name SOD CLA POT').write('pdb', temp)
string = '''
set ionfile %s
segment I {
pdb $ionfile
first none
last none
}
coordpdb $ionfile I
''' % temp
self.file.write(string)
molecule.set_top(old_top)
#==========================================================================
def _find_single_residue_names(self, resname, molid):
"""
Uses graph matcher and available topologies to match up
ligand names automatically. Tries to use graphs, and if there's an
uneven number of atoms tries to match manually to suggest which atoms
are most likely missing.
Args:
resname (str): Residue name of the ligand that will be written.
All ligands will be checked separately against the graphs.
molid (int): VMD molecule ID to consider
Returns:
(list of ints): Residue numbers (not resid) of all input ligands
that were successfully matched. Need to do it this way since
residue names can be changed in here to different things.
Raises:
ValueError if number of resids does not match number of residues as
interpreted by VMD
NotImplementedError if a residue could not be matched to a graph.
"""
# Put our molecule on top
old_top = molecule.get_top()
molecule.set_top(molid)
# Sanity check that there is no discrepancy between defined resids and
# residues as interpreted by VMD.
for chain in set(atomsel("user 1.0 and resname '%s'" % resname).get('chain')):
residues = list(set(atomsel("user 1.0 and resname '%s' and chain %s"
% (resname, chain)).get('residue')))
resids = list(set(atomsel("user 1.0 and resname '%s' and chain %s"
% (resname, chain)).get('resid')))
if len(residues) != len(resids):
raise ValueError("VMD found %d residues for resname '%s', but there "
"are %d resids! Check input." % (len(residues), resname,
len(resids)))
for residue in residues:
sel = atomsel("residue %s and resname '%s' and user 1.0" % (residue, resname))
(newname, atomnames) = self.matcher.get_names(sel, print_warning=True)
if not newname:
(resname, patch, atomnames) = self.matcher.get_patches(sel)
if not newname:
print("ERROR: Could not find a residue definition for %s:%s"
% (resname, residue))
raise NotImplementedError("No residue definition for %s:%s"
% (resname, residue))
print("\tApplying patch %s to ligand %s" % (patch, newname))
# Do the renaming
for idx, name in atomnames.items():
atom = atomsel('index %s' % idx)
if atom.get('name')[0] != name and "+" not in name and \
"-" not in name:
print("Renaming %s:%s: %s -> %s" % (resname, residue,
atom.get('name')[0],
name))
atom.set('name', name)
sel.set('resname', newname)
#logger.info("Renamed %d atoms for all resname %s->%s" % (num_renamed, resname, name))
molecule.set_top(old_top)
return residues
#==========================================================================
def _write_generic_block(self, residues):
"""
Matches ligands to available topology file, renames atoms, and then
writes temporary files for the ligands
Args:
residues (list of int): Residue numbers to be written. Will all
be written to one segment.
Returns:
True if successful
"""
# Put our molecule on top to simplify atom selection language
old_top = molecule.get_top()
molecule.set_top(self.molid)
alig = atomsel('user 1.0 and residue %s' % " ".join([str(x) for x in residues]))
# Write temporary file containg the residues and update tcl commands
temp = tempfile.mkstemp(suffix='.pdb', prefix='psf_block_',
dir=self.tmp_dir)[1]
string = '''
set blockfile %s
segment B%s {
pdb $blockfile
first none
last none
}
coordpdb $blockfile B%s
''' % (temp, residues[0], residues[0])
alig.write('pdb', temp)
alig.set('user', 0.0)
self.file.write(string)
if old_top != -1:
molecule.set_top(old_top)
return True
#==========================================================================
def _write_protein_blocks(self, molid, frag):
"""
Writes a protein fragment to a pdb file for input to psfgen
Automatically assigns amino acid names
Args:
molid (int): VMD molecule ID of renumbered protein
frag (str): Fragment to write
Returns:
(list of str): Patches to add to the psfgen input file
after all proteins have been loaded
"""
print("Setting protein atom names")
# Put our molecule on top to simplify atom selection language
old_top = molecule.get_top()
molecule.set_top(molid)
patches = set()
extpatches = set()
seg = "P%s" % frag
residues = list(set(atomsel("fragment '%s'" % frag).get('residue')))
for residue in residues:
sel = atomsel('residue %s' % residue)
resid = sel.get('resid')[0]
# Only try to match single amino acid if there are 1 or 2 bonds
if len(self.matcher.get_extraresidue_atoms(sel)) < 3:
(newname, atomnames) = self.matcher.get_names(sel,
print_warning=False)
# See if it's a disulfide bond participant
else:
(newname, patchline, atomnames) = \
self.matcher.get_disulfide("residue %d" % residue,
frag, molid)
if newname:
extpatches.add(patchline)
# Couldn't find a match. See if it's a patched residue
if not newname:
(newname, patch, atomnames) = self.matcher.get_patches(sel)
if newname:
patches.add("patch %s %s:%d\n" % (patch, seg, resid))
# Fall through to error condition
if not newname:
raise ValueError("Couldn't find a patch for %s:%s"
% (sel.get('resname')[0], resid))
# Do the renaming
for idx, name in atomnames.items():
atom = atomsel('index %s' % idx)
if atom.get('name')[0] != name and "+" not in name and \
"-" not in name:
atom.set('name', name)
sel.set('resname', newname)
# Save protein chain in the correct order
filename = self.tmp_dir + '/psf_protein_%s.pdb' % seg
_write_ordered_pdb(filename, "fragment '%s'" % frag, molid)
print("\tWrote %d atoms to the protein segment %s"
% (len(atomsel("fragment %s" % frag)), seg))
# Now write to psfgen input file
string = '''
set protnam %s
segment %s {
first none
last none
pdb $protnam
}
''' % (filename, seg)
self.file.write(string)
print("Applying the following single-residue patches to P%s:\n" % frag)
print("\t%s" % "\t".join(patches))
self.file.write(''.join(patches))
self.file.write("\n")
self.file.write("coordpdb $protnam %s\n" % seg)
if old_top != -1:
molecule.set_top(old_top)
return extpatches
#==========================================================================
def _renumber_protein_chains(self, molid):
"""
Pulls all protein fragments and renumbers the residues
so that ACE and NMA caps appear to be different residues to VMD.
This is necessary so that they don't appear as patches. Proteins with
non standard capping groups will have the patches applied.
Args:
molid (int): VMD molecule ID of entire system
Returns:
(int): Molid of loaded fragment
"""
# Put our molecule on top and grab selection
old_top = molecule.get_top()
molecule.set_top(molid)
for frag in set(atomsel("protein or resname ACE NMA").get("fragment")):
fragment = atomsel('fragment %s' % frag, molid=molid)
print("Checking capping groups resids on protein fragment %d" % frag)
for resid in sorted(set(fragment.get("resid"))):
# Handle bug where capping groups in same residue as the
# neighboring amino acid Maestro writes it this way for some
# reason but it causes problems down the line when psfgen doesn't
# understand the weird combined residue
rid = atomsel("fragment '%s' and resid '%d'"
% (frag, resid)).get('residue')[0]
names = set(atomsel('residue %d'% rid).get('resname'))
assert len(names) < 3, ("More than 2 residues with same number... "
"currently unhandled. Report a bug")
if len(names) > 1:
if 'ACE' in names and 'NMA' in names:
print("ERROR: Both ACE and NMA were given the same resid"
"Check your input structure")
quit(1)
if 'ACE' in names:
# Set ACE residue number as one less
resid = atomsel('residue %d and not resname ACE' % rid).get('resid')[0]
if len(atomsel("fragment '%s' and resid %d" % (frag, resid-1))):
raise ValueError('ACE resid collision number %d' % (resid-1))
atomsel('residue %d and resname ACE'
% rid).set('resid', resid-1)
print("\tACE %d -> %d" % (resid, resid-1))
elif 'NMA' in names:
# Set NMA residue number as one more
resid = int(atomsel('residue %d and not resname NMA' % rid).get('resid')[0])
if len(atomsel("fragment '%s' and resid %d" % (frag, resid+1))):
raise ValueError("NMA resid collision number %d" % (resid+1))
atomsel('residue %d and resname NMA'
% rid).set('resid', resid+1)
print("\tNMA %d -> %d" % (resid, resid+1))
# Have to save and reload so residues are parsed correctly by VMD
temp = tempfile.mkstemp(suffix='_renum.mae',
prefix='psf_prot_', dir=self.tmp_dir)[1]
atomsel("same fragment as protein or resname ACE NMA").write('mae', temp)
prot_molid = molecule.load('mae', temp)
# Put things back the way they were
if old_top != -1:
molecule.set_top(old_top)
return prot_molid
#==========================================================================
def _check_psf_output(self):
"""
Scans the output psf from psfgen for atoms where the coordinate
could not be set, indicating an unmatched atom. This checek is necessary
because sometimes psfgen will run with no errors or warnings but will
have unmatched atoms that are all at (0,0,0).
"""
# Check file was written at all
if not os.path.isfile('%s.pdb'% self.psf_name):
print("\nERROR: psf file failed to write.\n"
" Please see log above.\n")
quit(1)
# Open the pdb file in VMD and check for atoms with no occupancy
fileh = molecule.load('pdb', '%s.pdb' % self.psf_name)
errors = atomsel("occupancy=-1", molid=fileh)
# Print out error messages
if len(errors):
print("\nERROR: Couldn't find the following atoms.")
for i in range(len(errors)):
print(" %s%s:%s" % (errors.get("resname")[i], errors.get("resid")[i],
errors.get("name")[i]))
print("Check if they are present in the original structure.\n"
"If they are, check dabble name translation or file a "
"bug report to Robin.\n")
quit(1)
else:
print("\nChecked output pdb/psf has all atoms present "
"and correct.\n")
#==========================================================================
def _find_residue_in_rtf(self, resname, molid):
"""
Scans the input topology files to find a name match for the given
residue name, then pulls out the atoms involved and checks that they
are all present in the input coordinates, prompting the user to correct
the names of atoms that could not be matched.
Residue ID is used because there can be multiple copies of a residue
with the same name, but only one has missing or extra atoms.
Args:
resname (str): Residue name to check
molid (int): VMD molecule ID
Returns:
True if all matching was successful
False if the residue name cannot be found
"""
print("Finding residue name '%s'" % resname)
for top in self.topologies:
topfile = open(top, 'r')
topo_atoms = _get_atoms_from_rtf(text=topfile.readlines(),
resname=resname)
# Use first definition found of this residue
if len(topo_atoms):
break
topfile.close()
if not len(topo_atoms):
return False
print("Successfully found residue %s in input topologies" % resname)
# Match up atoms with python sets
pdb_atoms = set(atomsel("resname '%s' and user 1.0"
% resname, molid=molid).get('name'))
pdb_only = pdb_atoms - topo_atoms
topo_only = topo_atoms - pdb_atoms
# If uneven number of atoms, there are missing or additional atoms
if len(pdb_atoms) > len(topo_atoms):
print("\nERROR: Cannot process modified residue %s.\n"
" There are %d extra atoms in the input structure "
"that are undefined in the topology file. The "
"following atoms could not be matched and may "
"either be misnamed, or additional atoms. Please "
"check your input."
% (resname, len(pdb_atoms)-len(topo_atoms)))
print(" [ %s ]\n" % ' '.join(pdb_only))
print(" Cannot continue.\n")
quit(1)
if len(topo_atoms) > len(pdb_atoms):
print("\nERROR: Cannot process modified residue %s.\n"
" There are %d missing atoms in the input structure "
" that are defined in the topology file. The "
" following atoms could not be matched and may "
" either be misnamed or deleted atoms. Please "
" check your input."
% (resname, len(topo_atoms)-len(pdb_atoms)))
print(" [ %s ]\n" % ' '.join(topo_only))
print(" Cannot continue.\n")
print("Found is %s\n" % pdb_atoms)
quit(1)
# Offer to rename atoms that couldn't be matched to the topology
if len(pdb_only):
print("\nWARNING: Having some trouble with modified residue %s.\n"
" The following atom names cannot be matched up "
" to the input topologies. They are probably "
" misnamed.\n" % resname)
print(" To help you, here are the atom names that "
" should be present according to the topology "
" but were not found:\n")
print(" [ %s ]\n" % ' '.join([str(t) for t in topo_only]))
print(" Please enter a valid name for each atom as "
"it appears or CTRL+D to quit..\n")
for unmatched in pdb_only:
print("Unmatched topology names: [ %s ]"
% ' '.join(topo_only))
newname = raw_input(" %s -> " % unmatched)
while newname not in topo_only:
print("'%s' is not an available name in the topology."
"Please try again.\n" % newname)
newname = raw_input(" %s -> " % unmatched)
atomsel("resname '%s' and user 1.0 and name '%s'"
% (resname, unmatched)).set('name', newname)
pdb_atoms = set(atomsel("resname '%s' and user 1.0"
% resname).get('name'))
topo_only = topo_atoms-pdb_atoms
resname = newname
# Recurse to check that everything is assigned correctly
self._find_residue_in_rtf(resname, molid)
print("Matched up all atom names for resname '%s'\n" % resname)
return True
#==========================================================================
def _get_patch(self, seg, resid):
"""
Prompts the user for a patch to apply for the given residue.
Gathers available patches from topology files
Args:
seg (str): Segment to apply the patch to
resid (int): Residue ID to apply the patch to
Returns:
(str) patch line to put in the psfgen input file
"""
avail_patches = self._get_avail_patches()
print("What is the patch name I should apply?")
print("Type NONE for no patch, if your residue is completely "
"defined in a str file")
print("Or type HELP for a list of all patches I know about")
patchname = raw_input("> ")
if patchname == "HELP":
print(" PATCH COMMENT")
print(" ----- -------")
for patch in avail_patches:
print("%7s %s" % (patch, avail_patches[patch]))
patchname = raw_input("> ")
while (patchname not in avail_patches) and (patchname != "NONE"):
print("I don't know about patch %s" % patchname)
patchname = raw_input("Try again > ")
if patchname == "NONE":
return ""
return "patch %s %s:%d\n" % (patchname, seg, resid)
#==========================================================================
def _get_avail_patches(self):
"""
Gathers the patches defined in all topology files.
Returns:
(dict str -> str): Patch names as keys, comment as value
"""
avail_patches = {}
for top in self.topologies:
topfile = open(top, 'r')
for line in topfile:
tokens = line.split()
if not len(tokens):
continue
if tokens[0] == "PRES":
comment = ' '.join(tokens[tokens.index("!")+1:])
avail_patches[tokens[1]] = comment
return avail_patches
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# FUNCTIONS #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _write_ordered_pdb(filename, sel, molid):
"""
Writes a pdb file in order of residues, renumbering the atoms
accordingly, since psfgen wants each residue sequentially while
VMD will write them in the same order as input, which from Maestro
created files has some guessed atoms at the end.
Args:
filename (str): Name of the pdb file to write
sel (str): VMD atomsel string for atoms that will be written
molid (int): VMD molecule ID to write from
"""
old_top = molecule.get_top()
molecule.set_top(molid)
fileh = open(filename, 'w')
# Use resids since order can be wrong when sorting by residue
# Then, use residue to pull out each one since it is much much
# faster then trying to pull out residues
resids = set(atomsel(sel).get('resid'))
# Add additional residue constraint to selection since pulling out
# by resid can match something in a different chain
resstr = ' '.join([str(x) for x in set(atomsel(sel).get('residue'))])
idx = 1
# For renumbering capping groups
for resid in sorted(resids):
rid = atomsel("resid '%s' and residue %s"
% (resid, resstr)).get('residue')[0]
for i in atomsel('residue %d' % rid).get('index'):
a = atomsel('index %d' % i) # pylint: disable=invalid-name
entry = ('%-6s%5d %-5s%-4s%c%4d %8.3f%8.3f%8.3f%6.2f%6.2f'
' %-4s%2s\n' % ('ATOM', idx, a.get('name')[0],
a.get('resname')[0],
a.get('chain')[0],
a.get('resid')[0],
a.get('x')[0],
a.get('y')[0],
a.get('z')[0],
0.0, 0.0, a.get('segname')[0],
a.get('element')[0]))
idx += 1
fileh.write(entry)
fileh.write('END\n')
atomsel(sel).set('user', 0.0) # Mark as written
fileh.close()
molecule.set_top(old_top)
#==========================================================================
def _get_atoms_from_rtf(text, resname):
"""
Scans the input text for the residue with a given name. Once found,
pulls out all the atom names that comprise that residue.
Args:
text (str): Contents of an rtf file to scan
resname (str): Residue to look for
Returns:
atoms (set of str): Atom names in this residue, or the empyty set if
the residue was not found.
"""
atoms = []
found = False
for line in text:
words = line.split()
if not len(words):
continue
if not found and words[0] == 'RESI' \
and words[1] == resname:
found = True
elif found and words[0] == 'ATOM':
atoms.append(words[1])
elif found and words[0] == 'RESI':
break
return set(atoms)
#==========================================================================
def get_bonded_atoms(molid, index):
"""
Returns the element of all atoms bonded to the current atom.
Args:
molid (int): VMD molecule ID to consider
index (int): Atom index to look at bonded atoms
Returns:
(list of str) elements of atoms bound to the current atom
"""
asel = atomsel('index %d' % index, molid=molid)
bound = []
for atom in asel.bonds[0]:
bound.append(atomsel('index %d' % atom).get('element')[0])
return bound
#==========================================================================
def check_atom_names(molid):
"""
Checks that there are no spaces in atom names. If spaces are
found, they are removed and a warning is printed
"""
names = set(atomsel(molid=molid).get('name'))
for name in names:
if ' ' in name:
print("\nWARNING: Found space character in name '%s'\n"
" Incompatible with charmm formats, removing it"
% name)
atomsel("name '%s'", molid=molid).set('name', name.replace(' ', ''))
|
drorlab/dabble
|
Dabble/param/charmm.py
|
Python
|
gpl-2.0
| 43,577
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.constants import LOGFEEDER_CONF_DIR
from resource_management.libraries.script import Script
from resource_management.libraries.script.script import get_config_lock_file
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_stack_version, get_major_version
from string import lower
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
dfs_type = default("/clusterLevelParams/dfs_type", "")
is_parallel_execution_enabled = int(default("/agentLevelParams/agentConfigParams/agent/parallel_execution", 0)) == 1
host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
sudo = AMBARI_SUDO_BINARY
stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)
# service name
service_name = config['serviceName']
# logsearch configuration
logsearch_logfeeder_conf = LOGFEEDER_CONF_DIR
agent_cache_dir = config['agentLevelParams']['agentCacheDir']
service_package_folder = config['serviceLevelParams']['service_package_folder']
logsearch_service_name = service_name.lower().replace("_", "-")
logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
# default hadoop params
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
versioned_stack_root = '/usr/hdp/current'
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#java params
java_home = config['ambariLevelParams']['java_home']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#users and groups
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
user_group = config['configurations']['cluster-env']['user_group']
namenode_host = default("/clusterHostInfo/namenode_hosts", [])
has_namenode = not len(namenode_host) == 0
if has_namenode or dfs_type == 'HCFS':
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
mount_table_xml_inclusion_file_full_path = None
mount_table_content = None
if 'viewfs-mount-table' in config['configurations']:
xml_inclusion_file_name = 'viewfs-mount-table.xml'
mount_table = config['configurations']['viewfs-mount-table']
if 'content' in mount_table and mount_table['content'].strip():
mount_table_xml_inclusion_file_full_path = os.path.join(hadoop_conf_dir, xml_inclusion_file_name)
mount_table_content = mount_table['content']
link_configs_lock_file = get_config_lock_file()
stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
upgrade_suspended = default("/roleParams/upgrade_suspended", False)
|
sekikn/ambari
|
ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
|
Python
|
apache-2.0
| 5,216
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2018 Paul Culley
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Filter rule to match persons with a particular event.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib.eventroletype import EventRoleType
from gramps.gui.editors.filtereditor import MySelect, MyBoolean
from gramps.gen.filters.rules import Rule
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
class Roletype(MySelect):
""" Provide a Role type selector """
def __init__(self, db):
MySelect.__init__(self, EventRoleType, db.get_event_roles())
class NoMatch(MyBoolean):
""" Provide a negation switch """
def __init__(self, db):
MyBoolean.__init__(self, _("Does NOT match with selected Role"))
self.set_tooltip_text(_("Finds the items that don't have event Roles "
"of the selected type."))
#-------------------------------------------------------------------------
#
# HasEvent
#
#-------------------------------------------------------------------------
class HasPersonEventRole(Rule):
"""Rule that checks for a person with a selected event role"""
labels = [(_('Role'), Roletype),
(_('Inverse'), NoMatch)]
name = _('People with events with the <role>')
description = _("Matches people with an event with a selected role")
category = _('Event filters')
def apply(self, dbase, person):
if not self.list[0]:
return False
for event_ref in person.get_event_ref_list():
if not event_ref:
continue
if self.list[1] == '1':
if event_ref.role.xml_str() != self.list[0]:
return True
else:
if event_ref.role.xml_str() == self.list[0]:
return True
return False
class HasFamilyEventRole(Rule):
"""Rule that checks for a family with a selected event role"""
labels = [(_('Role'), Roletype),
(_('Inverse'), NoMatch)]
name = _('Families with events with the <role>')
description = _("Matches families with an event with a selected role")
category = _('Event filters')
def apply(self, dbase, family):
if not self.list[0]:
return False
for event_ref in family.get_event_ref_list():
if not event_ref:
continue
if self.list[1] == '1':
if event_ref.role.xml_str() != self.list[0]:
return True
else:
if event_ref.role.xml_str() == self.list[0]:
return True
return False
|
gramps-project/addons-source
|
FilterRules/hasrolerule.py
|
Python
|
gpl-2.0
| 3,783
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 25, 2014
@author: noe
'''
import numpy as np
import math
import itertools
from . import types
import warnings
def _confidence_interval_1d(data, alpha):
"""
Computes the mean and alpha-confidence interval of the given sample set
Parameters
----------
data : ndarray
a 1D-array of samples
alpha : float in [0,1]
the confidence level, i.e. percentage of data included in the interval
Returns
-------
(m, l, r) : m is the mean of the data, and (l, r) are the m-alpha/2
and m+alpha/2 confidence interval boundaries.
"""
if alpha < 0 or alpha > 1:
raise ValueError('Not a meaningful confidence level: '+str(alpha))
if np.any(np.isnan(data)):
return np.nan, np.nan, np.nan
dmin = np.min(data)
dmax = np.max(data)
# if dmin == dmax:
if np.isclose(dmin, dmax):
warnings.warn('confidence interval for constant data is not meaningful')
return dmin, dmin, dmin
# compute mean
m = np.mean(data)
# sort data
sdata = np.sort(data)
# index of the mean
im = np.searchsorted(sdata, m)
if im == 0 or im == len(sdata) or (np.isinf(m-sdata[im-1]) and np.isinf(sdata[im]-sdata[im-1])):
pm = im
else:
pm = (im-1) + (m-sdata[im-1])/(sdata[im]-sdata[im-1])
# left interval boundary
pl = pm - alpha*pm
il1 = max(0, int(math.floor(pl)))
il2 = min(len(sdata)-1, int(math.ceil(pl)))
if sdata[il1] == sdata[il2]: # catch infs
l = sdata[il1]
else:
l = sdata[il1] + (pl - il1)*(sdata[il2] - sdata[il1])
# right interval boundary
pr = pm + alpha*(len(data)-im)
ir1 = max(0, int(math.floor(pr)))
ir2 = min(len(sdata)-1, int(math.ceil(pr)))
if sdata[ir1] == sdata[ir2]: # catch infs
r = sdata[ir1]
else:
r = sdata[ir1] + (pr - ir1)*(sdata[ir2] - sdata[ir1])
# return
return m, l, r
def _indexes(arr):
""" Returns the list of all indexes of the given array.
Currently works for one and two-dimensional arrays
"""
myarr = np.array(arr)
if myarr.ndim == 1:
return list(range(len(myarr)))
elif myarr.ndim == 2:
return tuple(itertools.product(list(range(arr.shape[0])),
list(range(arr.shape[1]))))
else:
raise NotImplementedError('Only supporting arrays of dimension 1 and 2 as yet.')
def _column(arr, indexes):
""" Returns a column with given indexes from a deep array
For example, if the array is a matrix and indexes is a single int, will
return arr[:,indexes]. If the array is an order 3 tensor and indexes is a
pair of ints, will return arr[:,indexes[0],indexes[1]], etc.
"""
if arr.ndim == 2 and types.is_int(indexes):
return arr[:, indexes]
elif arr.ndim == 3 and len(indexes) == 2:
return arr[:, indexes[0], indexes[1]]
else:
raise NotImplementedError('Only supporting arrays of dimension 2 and 3 as yet.')
def confidence_interval(data, conf=0.95):
r""" Computes element-wise confidence intervals from a sample of ndarrays
Given a sample of arbitrarily shaped ndarrays, computes element-wise
confidence intervals
Parameters
----------
data : array-like of dimension 1 to 3
array of numbers or arrays. The first index is used as the sample
index, the remaining indexes are specific to the array of interest
conf : float, optional, default = 0.95
confidence interval
Return
------
lower : ndarray(shape)
element-wise lower bounds
upper : ndarray(shape)
element-wise upper bounds
"""
if conf < 0 or conf > 1:
raise ValueError('Not a meaningful confidence level: '+str(conf))
try:
data = types.ensure_ndarray(data, kind='numeric')
except:
# if 1D array of arrays try to fuse it
if isinstance(data, np.ndarray) and np.ndim(data) == 1:
newshape = tuple([len(data)] + list(data[0].shape))
newdata = np.zeros(newshape)
for i in range(len(data)):
newdata[i, :] = data[i]
data = newdata
types.assert_array(data, kind='numeric')
if np.ndim(data) == 1:
m, lower, upper = _confidence_interval_1d(data, conf)
return lower, upper
else:
I = _indexes(data[0])
lower = np.zeros(data[0].shape)
upper = np.zeros(data[0].shape)
for i in I:
col = _column(data, i)
m, lower[i], upper[i] = _confidence_interval_1d(col, conf)
# return
return lower, upper
def _maxlength(X):
""" Returns the maximum length of signal trajectories X """
N = 0
for x in X:
if len(x) > N:
N = len(x)
return N
def statistical_inefficiency(X, truncate_acf=True):
r""" Estimates the statistical inefficiency from univariate time series X
The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal.
Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are
only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should
be used in order to compute statistical uncertainties. See [2]_ for a review.
The statistical inefficiency is computed as :math:`I = (2 \tau)^{-1}` using the damped autocorrelation time
..1: \tau = \frac{1}{2}+\sum_{K=1}^{N} A(k) \left(1-\frac{k}{N}\right)
where
..1: A(k) = \frac{\langle x_t x_{t+k} \rangle_t - \langle x^2 \rangle_t}{\mathrm{var}(x)}
is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple
trajectories.
Parameters
----------
X : float array or list of float arrays
Univariate time series (single or multiple trajectories)
truncate_acf : bool, optional, default=True
When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating
random noise
References
----------
.. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971)
.. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation
Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes,
J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich
NIC Series 10, pp. 423-445, 2002.
"""
# check input
assert np.ndim(X[0]) == 1, 'Data must be 1-dimensional'
N = _maxlength(X) # length
# mean-free data
xflat = np.concatenate(X)
Xmean = np.mean(xflat)
X0 = [x-Xmean for x in X]
# moments
x2m = np.mean(xflat ** 2)
# integrate damped autocorrelation
corrsum = 0.0
for lag in range(N):
acf = 0.0
n = 0.0
for x in X0:
Nx = len(x) # length of this trajectory
if (Nx > lag): # only use trajectories that are long enough
acf += np.sum(x[0:Nx-lag] * x[lag:Nx])
n += float(Nx-lag)
acf /= n
if acf <= 0 and truncate_acf: # zero autocorrelation. Exit
break
elif lag > 0: # start integrating at lag 1 (effect of lag 0 is contained in the 0.5 below
corrsum += acf * (1.0 - (float(lag)/float(N)))
# compute damped correlation time
corrtime = 0.5 + corrsum / x2m
# return statistical inefficiency
return 1.0 / (2 * corrtime)
|
markovmodel/PyEMMA
|
pyemma/util/statistics.py
|
Python
|
lgpl-3.0
| 8,401
|
"""
This is a class that makes it possible to bulk-save cache entries.
For restclients methods that use threading, this can be used to prevent
innodb gap locks from deadlocking sequential inserts.
"""
__manage_bulk_inserts = False
__bulk_insert_queue = []
from django.db import IntegrityError
def store_cache_entry(entry):
global __manage_bulk_inserts
global __bulk_insert_queue
if __manage_bulk_inserts:
__bulk_insert_queue.append(entry)
return
else:
entry.save()
def save_all_queued_entries():
global __bulk_insert_queue
seen_urls = {}
bulk_create = []
try:
for entry in __bulk_insert_queue:
if not entry.url in seen_urls:
entry.save()
seen_urls[entry.url] = True
except Exception as ex:
print "Error bulk saving cache entries: ", ex
__bulk_insert_queue = []
def enable_cache_entry_queueing():
global __manage_bulk_inserts
__manage_bulk_inserts = True
def disable_cache_entry_queueing():
global __manage_bulk_inserts
__manage_bulk_inserts = False
save_all_queued_entries()
|
jeffFranklin/uw-restclients
|
restclients/cache_manager.py
|
Python
|
apache-2.0
| 1,128
|
"""
Salt returner that reports execution results back to sentry. The returner will
inspect the payload to identify errors and flag them as such.
Pillar needs something like:
.. code-block:: yaml
raven:
servers:
- http://192.168.1.1
- https://sentry.example.com
public_key: deadbeefdeadbeefdeadbeefdeadbeef
secret_key: beefdeadbeefdeadbeefdeadbeefdead
project: 1
tags:
- os
- master
- saltversion
- cpuarch
or using a dsn:
.. code-block:: yaml
raven:
dsn: https://aaaa:bbbb@app.getsentry.com/12345
tags:
- os
- master
- saltversion
- cpuarch
https://pypi.python.org/pypi/raven must be installed.
The pillar can be hidden on sentry return by setting hide_pillar: true.
The tags list (optional) specifies grains items that will be used as sentry
tags, allowing tagging of events in the sentry ui.
To report only errors to sentry, set report_errors_only: true.
"""
import logging
import salt.utils.jid
try:
from raven import Client
from raven.transport.http import HTTPTransport
has_raven = True
except ImportError:
has_raven = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "sentry"
def __virtual__():
if not has_raven:
return (
False,
"Could not import sentry returner; raven python client is not installed.",
)
return __virtualname__
def returner(ret):
"""
Log outcome to sentry. The returner tries to identify errors and report
them as such. All other messages will be reported at info level.
Failed states will be appended as separate list for convenience.
"""
try:
_connect_sentry(_get_message(ret), ret)
except Exception as err: # pylint: disable=broad-except
log.error("Can't run connect_sentry: %s", err, exc_info=True)
def _ret_is_not_error(result):
if result.get("return") and isinstance(result["return"], dict):
result_dict = result["return"]
is_staterun = all("-" in key for key in result_dict.keys())
if is_staterun:
failed_states = {}
for state_id, state_result in result_dict.items():
if not state_result["result"]:
failed_states[state_id] = state_result
if failed_states:
result["failed_states"] = failed_states
return False
return True
if result.get("success"):
return True
return False
def _get_message(ret):
if not ret.get("fun_args"):
return "salt func: {}".format(ret["fun"])
arg_string = " ".join([arg for arg in ret["fun_args"] if isinstance(arg, str)])
kwarg_string = ""
if isinstance(ret["fun_args"], list) and len(ret["fun_args"]) > 0:
kwargs = ret["fun_args"][-1]
if isinstance(kwargs, dict):
kwarg_string = " ".join(
sorted(
"{}={}".format(k, v)
for k, v in kwargs.items()
if not k.startswith("_")
)
)
return "salt func: {fun} {argstr} {kwargstr}".format(
fun=ret["fun"], argstr=arg_string, kwargstr=kwarg_string
).strip()
def _connect_sentry(message, result):
"""
Connect to the Sentry server
"""
pillar_data = __salt__["pillar.raw"]()
grains = __salt__["grains.items"]()
raven_config = pillar_data["raven"]
hide_pillar = raven_config.get("hide_pillar")
sentry_data = {
"result": result,
"pillar": "HIDDEN" if hide_pillar else pillar_data,
"grains": grains,
}
data = {"platform": "python", "culprit": message, "level": "error"}
tags = {}
if "tags" in raven_config:
for tag in raven_config["tags"]:
tags[tag] = grains[tag]
if _ret_is_not_error(result):
data["level"] = "info"
if raven_config.get("report_errors_only") and data["level"] != "error":
return
if raven_config.get("dsn"):
client = Client(raven_config.get("dsn"), transport=HTTPTransport)
else:
try:
servers = []
for server in raven_config["servers"]:
servers.append(server + "/api/store/")
client = Client(
servers=servers,
public_key=raven_config["public_key"],
secret_key=raven_config["secret_key"],
project=raven_config["project"],
transport=HTTPTransport,
)
except KeyError as missing_key:
log.error("Sentry returner needs key '%s' in pillar", missing_key)
return
try:
msgid = client.capture(
"raven.events.Message",
message=message,
data=data,
extra=sentry_data,
tags=tags,
)
log.info("Message id %s written to sentry", msgid)
except Exception as exc: # pylint: disable=broad-except
log.error("Can't send message to sentry: %s", exc, exc_info=True)
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
"""
Do any work necessary to prepare a JID, including sending a custom id
"""
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/sentry_return.py
|
Python
|
apache-2.0
| 5,367
|
# coding: utf8
from django.contrib import admin
from import_export import resources
from import_export.admin import ExportMixin
from .models import Event, Speaker, Survey
from .tasks import event_notification
class EventAdmin(admin.ModelAdmin):
pass
admin.site.register(Event, EventAdmin)
class SpeakerAdmin(admin.ModelAdmin):
list_display = ('event', 'idx', 'name', 'keyword', 'topic')
admin.site.register(Speaker, SpeakerAdmin)
class SurveyResource(resources.ModelResource):
class Meta:
model = Survey
fields = ('event__name', 'user__email', 'is_approved', 'is_notified', 'is_attended', 'created_at', 'updated_at')
class SurveyAdmin(ExportMixin, admin.ModelAdmin):
list_display = ('id', 'event', 'user_detail', 'is_approved', 'is_notified', 'is_attended', 'created_at', 'updated_at')
list_display_links = ('id', 'user_detail',)
list_editable = ('is_approved', 'is_attended')
list_filter = ('is_approved', 'is_notified', 'is_attended')
ordering = ('created_at', 'updated_at')
search_fields = ('user__email',)
actions = ['send_approve_email']
exclude = ('props',)
def user_detail(self, survey):
return survey.user.email
def props_detail(self, survey):
return '\n\n'.join(u'Q : {question}\nA : {answer}'.format(**prop) for prop in survey.props)
def send_approve_email(self, request, queryset):
count = 0
for survey in queryset:
if survey.is_approved and survey.event.approve_email_content and survey.user.email:
lines = survey.event.approve_email_content.strip().splitlines()
title = lines[0]
content = '\n'.join(lines[1:])
event_notification.s(survey.id, title, content, [survey.user.email]).delay()
count += 1
if count == 0:
self.message_user(request, u'전송할 유저가 없습니다.')
else:
self.message_user(request, u'{} 명의 유저에게 이메일을 전송 중입니다.'.format(count))
send_approve_email.short_description = u'선택/승인된 survey 유저에게 승인메일 보내기'
resource_class = SurveyResource
def get_readonly_fields(self, request, obj=None):
return ('event', 'user', 'props_detail')
def has_add_permission(self, request):
return False
admin.site.register(Survey, SurveyAdmin)
|
gdgand/Festi
|
festi/survey/admin.py
|
Python
|
mit
| 2,401
|
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
import logging
import sys
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host_lc as request_host, \
effective_request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
# 2.4 raises SyntaxError due to generator / try/finally use
if sys.version_info[:2] > (2,4):
try:
import sqlite3
except ImportError:
pass
else:
from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
logger = logging.getLogger("mechanize")
if logger.level is logging.NOTSET:
logger.setLevel(logging.CRITICAL)
del logger
|
deanhiller/databus
|
webapp/play1.3.x/samples-and-tests/i-am-a-developer/mechanize/__init__.py
|
Python
|
mpl-2.0
| 3,800
|
#
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
from antlr4.atn.ATNState import StarLoopEntryState
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.dfa.DFAState import DFAState
from antlr4.error.Errors import IllegalStateException
class DFA(object):
def __init__(self, atnStartState, decision=0):
# From which ATN state did we create this DFA?
self.atnStartState = atnStartState
self.decision = decision
# A set of all DFA states. Use {@link Map} so we can get old state back
# ({@link Set} only allows you to see if it's there).
self._states = dict()
self.s0 = None
# {@code true} if this DFA is for a precedence decision; otherwise,
# {@code false}. This is the backing field for {@link #isPrecedenceDfa},
# {@link #setPrecedenceDfa}.
self.precedenceDfa = False
if isinstance(atnStartState, StarLoopEntryState):
if atnStartState.isPrecedenceDecision:
self.precedenceDfa = True
precedenceState = DFAState(configs=ATNConfigSet())
precedenceState.edges = []
precedenceState.isAcceptState = False
precedenceState.requiresFullContext = False
self.s0 = precedenceState
# Get the start state for a specific precedence value.
#
# @param precedence The current precedence.
# @return The start state corresponding to the specified precedence, or
# {@code null} if no start state exists for the specified precedence.
#
# @throws IllegalStateException if this is not a precedence DFA.
# @see #isPrecedenceDfa()
def getPrecedenceStartState(self, precedence):
if not self.precedenceDfa:
raise IllegalStateException("Only precedence DFAs may contain a precedence start state.")
# s0.edges is never null for a precedence DFA
if precedence < 0 or precedence >= len(self.s0.edges):
return None
return self.s0.edges[precedence]
# Set the start state for a specific precedence value.
#
# @param precedence The current precedence.
# @param startState The start state corresponding to the specified
# precedence.
#
# @throws IllegalStateException if this is not a precedence DFA.
# @see #isPrecedenceDfa()
#
def setPrecedenceStartState(self, precedence, startState):
if not self.precedenceDfa:
raise IllegalStateException("Only precedence DFAs may contain a precedence start state.")
if precedence < 0:
return
# synchronization on s0 here is ok. when the DFA is turned into a
# precedence DFA, s0 will be initialized once and not updated again
# s0.edges is never null for a precedence DFA
if precedence >= len(self.s0.edges):
ext = [None] * (precedence + 1 - len(self.s0.edges))
self.s0.edges.extend(ext)
self.s0.edges[precedence] = startState
#
# Sets whether this is a precedence DFA. If the specified value differs
# from the current DFA configuration, the following actions are taken;
# otherwise no changes are made to the current DFA.
#
# <ul>
# <li>The {@link #states} map is cleared</li>
# <li>If {@code precedenceDfa} is {@code false}, the initial state
# {@link #s0} is set to {@code null}; otherwise, it is initialized to a new
# {@link DFAState} with an empty outgoing {@link DFAState#edges} array to
# store the start states for individual precedence values.</li>
# <li>The {@link #precedenceDfa} field is updated</li>
# </ul>
#
# @param precedenceDfa {@code true} if this is a precedence DFA; otherwise,
# {@code false}
def setPrecedenceDfa(self, precedenceDfa):
if self.precedenceDfa != precedenceDfa:
self._states = dict()
if precedenceDfa:
precedenceState = DFAState(configs=ATNConfigSet())
precedenceState.edges = []
precedenceState.isAcceptState = False
precedenceState.requiresFullContext = False
self.s0 = precedenceState
else:
self.s0 = None
self.precedenceDfa = precedenceDfa
@property
def states(self):
return self._states
# Return a list of all states in this DFA, ordered by state number.
def sortedStates(self):
return sorted(self._states.keys(), key=lambda state: state.stateNumber)
def __str__(self):
return unicode(self)
def __unicode__(self):
return self.toString(None)
def toString(self, literalNames=None, symbolicNames=None):
if self.s0 is None:
return ""
from antlr4.dfa.DFASerializer import DFASerializer
serializer = DFASerializer(self, literalNames, symbolicNames)
return unicode(serializer)
def toLexerString(self):
if self.s0 is None:
return ""
from antlr4.dfa.DFASerializer import LexerDFASerializer
serializer = LexerDFASerializer(self)
return unicode(serializer)
|
Pursuit92/antlr4
|
runtime/Python2/src/antlr4/dfa/DFA.py
|
Python
|
bsd-3-clause
| 5,280
|
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import test_vat
|
syci/partner-contact
|
base_vat_sanitized/tests/__init__.py
|
Python
|
agpl-3.0
| 89
|
from Tribler.Core.Socks5.connection import Socks5Connection, ConnectionState
from Tribler.Test.Core.base_test import MockObject
from Tribler.Test.test_as_server import AbstractServer
from twisted.internet.defer import inlineCallbacks
class MockTransport(MockObject):
"""
This object mocks the transport of the socks5 connection.
"""
def __init__(self):
self.connected = True
self.written_data = []
self.mock_host = MockObject()
self.mock_host.host = '123.123.123.123'
def loseConnection(self):
self.connected = False
def write(self, data):
self.written_data.append(data)
def getHost(self):
return self.mock_host
class TestSocks5Connection(AbstractServer):
"""
Test the basic functionality of the socks5 connection.
"""
@inlineCallbacks
def setUp(self):
yield super(TestSocks5Connection, self).setUp()
self.connection = Socks5Connection(None)
self.connection.transport = MockTransport()
@inlineCallbacks
def tearDown(self):
if self.connection._udp_socket: # Close opened UDP sockets
yield self.connection._udp_socket.close()
yield super(TestSocks5Connection, self).tearDown()
def test_invalid_version(self):
"""
Test passing an invalid version to the socks5 server
"""
self.connection.dataReceived('040100'.decode('hex'))
self.assertFalse(self.connection.transport.connected)
def test_method_request(self):
"""
Test sending a method request to the socks5 server
"""
self.connection.dataReceived('050100'.decode('hex'))
self.assertTrue(self.connection.transport.written_data)
self.assertEqual(self.connection.state, ConnectionState.CONNECTED)
def test_udp_associate(self):
"""
Test sending a udp associate request to the socks5 server
"""
self.connection.dataReceived('050100'.decode('hex'))
self.connection.dataReceived('05030001000000000000'.decode('hex'))
self.assertEqual(len(self.connection.transport.written_data), 2)
self.assertEqual(self.connection.state, ConnectionState.PROXY_REQUEST_RECEIVED)
def test_bind(self):
"""
Test sending a bind request to the socks5 server
"""
self.connection.dataReceived('050100'.decode('hex'))
self.connection.dataReceived('0502000100000000263f'.decode('hex'))
self.assertEqual(len(self.connection.transport.written_data), 2)
def test_connect(self):
"""
Test sending a connect command (which should be denied, we don't support TCP over our SOCKS5)
"""
self.connection.dataReceived('050100'.decode('hex'))
self.connection.dataReceived('05010003096c6f63616c686f73740050'.decode('hex'))
self.assertEqual(len(self.connection.transport.written_data), 2)
def test_unknown_command(self):
"""
Test sending an unknown command to the socks5 server after handshake
"""
self.connection.dataReceived('050100'.decode('hex'))
self.connection.dataReceived('05490003096c6f63616c686f73740050'.decode('hex'))
self.assertEqual(len(self.connection.transport.written_data), 2)
self.assertEqual(self.connection.state, ConnectionState.CONNECTED)
def test_invalid_methods(self):
"""
Test sending an invalid methods packet
"""
self.connection.dataReceived('0501'.decode('hex'))
self.assertEqual(len(self.connection.buffer), 2) # We are still waiting for data
|
Captain-Coder/tribler
|
Tribler/Test/Core/Socks5/test_connection.py
|
Python
|
lgpl-3.0
| 3,620
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gzip
import json
import os
import textwrap
import pandas as pd
from sqlalchemy import DateTime, String
from superset import db, security_manager
from superset.connectors.sqla.models import SqlMetric, TableColumn
from superset.utils.core import get_or_create_main_db
from .helpers import (
config,
Dash,
DATA_FOLDER,
get_slice_json,
merge_slice,
Slice,
TBL,
update_slice_ids,
)
def load_birth_names():
"""Loading birth name dataset from a zip file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'birth_names.json.gz')) as f:
pdf = pd.read_json(f)
pdf.ds = pd.to_datetime(pdf.ds, unit='ms')
pdf.to_sql(
'birth_names',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
'gender': String(16),
'state': String(10),
'name': String(255),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [birth_names] reference')
obj = db.session.query(TBL).filter_by(table_name='birth_names').first()
if not obj:
obj = TBL(table_name='birth_names')
obj.main_dttm_col = 'ds'
obj.database = get_or_create_main_db()
obj.filter_select_enabled = True
if not any(col.column_name == 'num_california' for col in obj.columns):
obj.columns.append(TableColumn(
column_name='num_california',
expression="CASE WHEN state = 'CA' THEN num ELSE 0 END",
))
if not any(col.metric_name == 'sum__num' for col in obj.metrics):
obj.metrics.append(SqlMetric(
metric_name='sum__num',
expression='SUM(num)',
))
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
defaults = {
'compare_lag': '10',
'compare_suffix': 'o10Y',
'limit': '25',
'granularity_sqla': 'ds',
'groupby': [],
'metric': 'sum__num',
'metrics': ['sum__num'],
'row_limit': config.get('ROW_LIMIT'),
'since': '100 years ago',
'until': 'now',
'viz_type': 'table',
'where': '',
'markup_type': 'markdown',
}
admin = security_manager.find_user('admin')
print('Creating some slices')
slices = [
Slice(
slice_name='Girls',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
row_limit=50,
timeseries_limit_metric='sum__num')),
Slice(
slice_name='Boys',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['boy'],
}],
row_limit=50)),
Slice(
slice_name='Participants',
viz_type='big_number',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='big_number', granularity_sqla='ds',
compare_lag='5', compare_suffix='over 5Y')),
Slice(
slice_name='Genders',
viz_type='pie',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='pie', groupby=['gender'])),
Slice(
slice_name='Genders by State',
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
adhoc_filters=[
{
'clause': 'WHERE',
'expressionType': 'SIMPLE',
'filterOptionName': '2745eae5',
'comparator': ['other'],
'operator': 'not in',
'subject': 'state',
},
],
viz_type='dist_bar',
metrics=[
{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'sum_boys',
'type': 'BIGINT(20)',
},
'aggregate': 'SUM',
'label': 'Boys',
'optionName': 'metric_11',
},
{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'sum_girls',
'type': 'BIGINT(20)',
},
'aggregate': 'SUM',
'label': 'Girls',
'optionName': 'metric_12',
},
],
groupby=['state'])),
Slice(
slice_name='Trends',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line', groupby=['name'],
granularity_sqla='ds', rich_tooltip=True, show_legend=True)),
Slice(
slice_name='Average and Sum Trends',
viz_type='dual_line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='dual_line',
metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num',
'type': 'BIGINT(20)',
},
'aggregate': 'AVG',
'label': 'AVG(num)',
'optionName': 'metric_vgops097wej_g8uff99zhk7',
},
metric_2='sum__num',
granularity_sqla='ds')),
Slice(
slice_name='Title',
viz_type='markup',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='markup', markup_type='html',
code="""\
<div style='text-align:center'>
<h1>Birth Names Dashboard</h1>
<p>
The source dataset came from
<a href='https://github.com/hadley/babynames' target='_blank'>[here]</a>
</p>
<img src='/static/assets/images/babytux.jpg'>
</div>
""")),
Slice(
slice_name='Name Cloud',
viz_type='word_cloud',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='word_cloud', size_from='10',
series='name', size_to='70', rotation='square',
limit='100')),
Slice(
slice_name='Pivot Table',
viz_type='pivot_table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='pivot_table', metrics=['sum__num'],
groupby=['name'], columns=['state'])),
Slice(
slice_name='Number of Girls',
viz_type='big_number_total',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='big_number_total', granularity_sqla='ds',
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
subheader='total female participants')),
Slice(
slice_name='Number of California Births',
viz_type='big_number_total',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
viz_type='big_number_total',
granularity_sqla='ds')),
Slice(
slice_name='Top 10 California Names Timeseries',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metrics=[{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
}],
viz_type='line',
granularity_sqla='ds',
groupby=['name'],
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
limit='10')),
Slice(
slice_name='Names Sorted by Num in California',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
row_limit=50,
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
})),
Slice(
slice_name='Num Births Trend',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line')),
Slice(
slice_name='Daily Totals',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
created_by=admin,
params=get_slice_json(
defaults,
groupby=['ds'],
since='40 years ago',
until='now',
viz_type='table')),
]
for slc in slices:
merge_slice(slc)
print('Creating a dashboard')
dash = db.session.query(Dash).filter_by(dashboard_title='Births').first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
{
"CHART-0dd270f0": {
"meta": {
"chartId": 51,
"width": 2,
"height": 50
},
"type": "CHART",
"id": "CHART-0dd270f0",
"children": []
},
"CHART-a3c21bcc": {
"meta": {
"chartId": 52,
"width": 2,
"height": 50
},
"type": "CHART",
"id": "CHART-a3c21bcc",
"children": []
},
"CHART-976960a5": {
"meta": {
"chartId": 53,
"width": 2,
"height": 25
},
"type": "CHART",
"id": "CHART-976960a5",
"children": []
},
"CHART-58575537": {
"meta": {
"chartId": 54,
"width": 2,
"height": 25
},
"type": "CHART",
"id": "CHART-58575537",
"children": []
},
"CHART-e9cd8f0b": {
"meta": {
"chartId": 55,
"width": 8,
"height": 38
},
"type": "CHART",
"id": "CHART-e9cd8f0b",
"children": []
},
"CHART-e440d205": {
"meta": {
"chartId": 56,
"width": 8,
"height": 50
},
"type": "CHART",
"id": "CHART-e440d205",
"children": []
},
"CHART-59444e0b": {
"meta": {
"chartId": 57,
"width": 3,
"height": 38
},
"type": "CHART",
"id": "CHART-59444e0b",
"children": []
},
"CHART-e2cb4997": {
"meta": {
"chartId": 59,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-e2cb4997",
"children": []
},
"CHART-e8774b49": {
"meta": {
"chartId": 60,
"width": 12,
"height": 50
},
"type": "CHART",
"id": "CHART-e8774b49",
"children": []
},
"CHART-985bfd1e": {
"meta": {
"chartId": 61,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-985bfd1e",
"children": []
},
"CHART-17f13246": {
"meta": {
"chartId": 62,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-17f13246",
"children": []
},
"CHART-729324f6": {
"meta": {
"chartId": 63,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-729324f6",
"children": []
},
"COLUMN-25a865d6": {
"meta": {
"width": 4,
"background": "BACKGROUND_TRANSPARENT"
},
"type": "COLUMN",
"id": "COLUMN-25a865d6",
"children": [
"ROW-cc97c6ac",
"CHART-e2cb4997"
]
},
"COLUMN-4557b6ba": {
"meta": {
"width": 8,
"background": "BACKGROUND_TRANSPARENT"
},
"type": "COLUMN",
"id": "COLUMN-4557b6ba",
"children": [
"ROW-d2e78e59",
"CHART-e9cd8f0b"
]
},
"GRID_ID": {
"type": "GRID",
"id": "GRID_ID",
"children": [
"ROW-8515ace3",
"ROW-1890385f",
"ROW-f0b64094",
"ROW-be9526b8"
]
},
"HEADER_ID": {
"meta": {
"text": "Births"
},
"type": "HEADER",
"id": "HEADER_ID"
},
"MARKDOWN-00178c27": {
"meta": {
"width": 5,
"code": "<div style=\\"text-align:center\\">\\n <h1>Birth Names Dashboard</h1>\\n <p>\\n The source dataset came from\\n <a href=\\"https://github.com/hadley/babynames\\" target=\\"_blank\\">[here]</a>\\n </p>\\n <img src=\\"/static/assets/images/babytux.jpg\\">\\n</div>\\n",
"height": 38
},
"type": "MARKDOWN",
"id": "MARKDOWN-00178c27",
"children": []
},
"ROOT_ID": {
"type": "ROOT",
"id": "ROOT_ID",
"children": [
"GRID_ID"
]
},
"ROW-1890385f": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-1890385f",
"children": [
"CHART-e440d205",
"CHART-0dd270f0",
"CHART-a3c21bcc"
]
},
"ROW-8515ace3": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-8515ace3",
"children": [
"COLUMN-25a865d6",
"COLUMN-4557b6ba"
]
},
"ROW-be9526b8": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-be9526b8",
"children": [
"CHART-985bfd1e",
"CHART-17f13246",
"CHART-729324f6"
]
},
"ROW-cc97c6ac": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-cc97c6ac",
"children": [
"CHART-976960a5",
"CHART-58575537"
]
},
"ROW-d2e78e59": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-d2e78e59",
"children": [
"MARKDOWN-00178c27",
"CHART-59444e0b"
]
},
"ROW-f0b64094": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-f0b64094",
"children": [
"CHART-e8774b49"
]
},
"DASHBOARD_VERSION_KEY": "v2"
}
""")
pos = json.loads(js)
# dashboard v2 doesn't allow add markup slice
dash.slices = [slc for slc in slices if slc.viz_type != 'markup']
update_slice_ids(pos, dash.slices)
dash.dashboard_title = 'Births'
dash.position_json = json.dumps(pos, indent=4)
dash.slug = 'births'
db.session.merge(dash)
db.session.commit()
|
airbnb/caravel
|
superset/data/birth_names.py
|
Python
|
apache-2.0
| 18,557
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from .routers import router
from .contrib_routers import router as contrib_router
##############################################
# Default
##############################################
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/', include(contrib_router.urls)),
url(r'^api/v1/api-auth/', include('taiga.base.api.urls', namespace='api')),
url(r'^admin/', include(admin.site.urls)),
]
handler500 = "taiga.base.api.views.api_server_error"
##############################################
# Front sitemap
##############################################
if settings.FRONT_SITEMAP_ENABLED:
from django.contrib.sitemaps.views import index
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page
from taiga.front.sitemaps import sitemaps
urlpatterns += [
url(r"^front/sitemap\.xml$",
cache_page(settings.FRONT_SITEMAP_CACHE_TIMEOUT)(index),
{"sitemaps": sitemaps, 'sitemap_url_name': 'front-sitemap'},
name="front-sitemap-index"),
url(r"^front/sitemap-(?P<section>.+)\.xml$",
cache_page(settings.FRONT_SITEMAP_CACHE_TIMEOUT)(sitemap),
{"sitemaps": sitemaps},
name="front-sitemap")
]
##############################################
# Static and media files in debug mode
##############################################
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
def mediafiles_urlpatterns(prefix):
"""
Method for serve media files with runserver.
"""
import re
from django.views.static import serve
return [
url(r'^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')), serve,
{'document_root': settings.MEDIA_ROOT})
]
# Hardcoded only for development server
urlpatterns += staticfiles_urlpatterns(prefix="/static/")
urlpatterns += mediafiles_urlpatterns(prefix="/media/")
|
CoolCloud/taiga-back
|
taiga/urls.py
|
Python
|
agpl-3.0
| 2,961
|
# Load the WCS information from a fits header, and use it
# to convert pixel coordinates to world coordinates.
from __future__ import division # confidence high
import numpy
import pywcs
import pyfits
import sys
# Load the FITS hdulist using pyfits
hdulist = pyfits.open(sys.argv[-1])
# Parse the WCS keywords in the primary HDU
wcs = pywcs.WCS(hdulist[0].header)
# Print out the "name" of the WCS, as defined in the FITS header
print wcs.wcs.name
# Print out all of the settings that were parsed from the header
wcs.wcs.print_contents()
# Some pixel coordinates of interest.
pixcrd = numpy.array([[0,0],[24,38],[45,98]], numpy.float_)
# Convert pixel coordinates to world coordinates
# The second argument is "origin" -- in this case we're declaring we
# have 1-based (Fortran-like) coordinates.
sky = wcs.wcs_pix2sky(pixcrd, 1)
print sky
# Convert the same coordinates back to pixel coordinates.
pixcrd2 = wcs.wcs_sky2pix(sky, 1)
print pixcrd2
# These should be the same as the original pixel coordinates, modulo
# some floating-point error.
assert numpy.max(numpy.abs(pixcrd - pixcrd2)) < 1e-6
|
zqhuang/COOP
|
mapio/pyscripts/readflat.py
|
Python
|
gpl-3.0
| 1,107
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import socket
import sickbeard
from sickbeard import logger, common
from sickrage.helper.exceptions import ex
from libgrowl import gntp
class GrowlNotifier(object):
sr_logo_url = 'https://raw.githubusercontent.com/SickRage/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'
def test_notify(self, host, password):
self._sendRegistration(host, password, 'Test')
return self._sendGrowl("Test Growl", "Testing Growl settings from SickRage", "Test", host, password,
force=True)
def notify_snatch(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONSNATCH:
self._sendGrowl(common.notifyStrings[common.NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONDOWNLOAD:
self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:
self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ": " + lang)
def notify_git_update(self, new_version="??"):
if sickbeard.USE_GROWL:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
self._sendGrowl(title, update_text + new_version)
def notify_login(self, ipaddress=""):
if sickbeard.USE_GROWL:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
self._sendGrowl(title, update_text.format(ipaddress))
def _send_growl(self, options, message=None):
# Send Notification
notice = gntp.GNTPNotice()
# Required
notice.add_header('Application-Name', options['app'])
notice.add_header('Notification-Name', options['name'])
notice.add_header('Notification-Title', options['title'])
if options['password']:
notice.set_password(options['password'])
# Optional
if options['sticky']:
notice.add_header('Notification-Sticky', options['sticky'])
if options['priority']:
notice.add_header('Notification-Priority', options['priority'])
if options['icon']:
notice.add_header('Notification-Icon', self.sr_logo_url)
if message:
notice.add_header('Notification-Text', message)
response = self._send(options['host'], options['port'], notice.encode(), options['debug'])
return True if isinstance(response, gntp.GNTPOK) else False
def _send(self, host, port, data, debug=False):
if debug:
print '<Sending>\n', data, '\n</Sending>'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(data)
response = gntp.parse_gntp(s.recv(1024))
s.close()
if debug:
print '<Received>\n', response, '\n</Received>'
return response
def _sendGrowl(self, title="SickRage Notification", message=None, name=None, host=None, password=None,
force=False):
if not sickbeard.USE_GROWL and not force:
return False
if name is None:
name = title
if host is None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
growlHosts = [(hostParts[0], port)]
opts = {
'name': name,
'title': title,
'app': 'SickRage',
'sticky': None,
'priority': None,
'debug': False
}
if password is None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['icon'] = True
for pc in growlHosts:
opts['host'] = pc[0]
opts['port'] = pc[1]
logger.log(u"GROWL: Sending message '" + message + "' to " + opts['host'] + ":" + str(opts['port']), logger.DEBUG)
try:
if self._send_growl(opts, message):
return True
else:
if self._sendRegistration(host, password, 'Sickbeard'):
return self._send_growl(opts, message)
else:
return False
except Exception as e:
logger.log(u"GROWL: Unable to send growl to " + opts['host'] + ":" + str(opts['port']) + " - " + ex(e), logger.WARNING)
return False
def _sendRegistration(self, host=None, password=None, name='SickRage Notification'):
opts = {}
if host is None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
opts['host'] = hostParts[0]
opts['port'] = port
if password is None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['app'] = 'SickRage'
opts['debug'] = False
# Send Registration
register = gntp.GNTPRegister()
register.add_header('Application-Name', opts['app'])
register.add_header('Application-Icon', self.sr_logo_url)
register.add_notification('Test', True)
register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)
register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)
register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)
if opts['password']:
register.set_password(opts['password'])
try:
return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])
except Exception as e:
logger.log(u"GROWL: Unable to send growl to " + opts['host'] + ":" + str(opts['port']) + " - " + ex(e), logger.WARNING)
return False
notifier = GrowlNotifier
|
pkoutsias/SickRage
|
sickbeard/notifiers/growl.py
|
Python
|
gpl-3.0
| 7,058
|
__version_info__ = {
'major': 0,
'minor': 4,
'micro': 1,
'releaselevel': 'final',
}
def get_version():
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s' % __version_info__)
return ''.join(vers)
__version__ = get_version()
|
ifearcompilererrors/fle_redesign
|
fle_redesign/apps/radpress/__init__.py
|
Python
|
mit
| 491
|
import glob
import re
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages("test-results-plots.pdf")
# each name in the header is of format like: 5b#beer temp
# 5: subplot number
# b: optional plot type specifier
# after #: legend name
class header_decoder:
def __init__(self):
self.matcher = re.compile("\s*([0-9]+)([a-z]*)#(.*)\s*")
self.names = []
self.indices = []
self.types = []
def decode(self, file_name):
f = open(file, "rb")
names = csv.reader(f).next()
f.close()
self.names = []
self.indices = []
self.types = []
for name in names:
match = self.matcher.match(name)
if match:
self.indices.append(match.group(1))
self.types.append(match.group(2))
self.names.append(match.group(3))
else:
print "Cannot decode name '{0}' CSV header of {1}".format(name, file_name)
exit(1)
hd = header_decoder()
for file in glob.glob("*.csv"):
data = np.genfromtxt(file, delimiter = ',', names = True)
plt.figure(figsize=(10,20))
plt.suptitle(file)
num_plots = len(data.dtype.names)
count = 0
hd.decode(file)
for col_name in data.dtype.names:
plt.hold(True)
plot_nr = int(hd.indices[count])
plt.subplot(max(hd.indices), 1, plot_nr)
if hd.types[count] == 'a': # area plot
line = plt.plot(data[col_name], label=hd.names[count])
x = range(0, len(data[col_name]))
last_color = line[-1].get_color()
plt.fill_between(x, 0, data[col_name], facecolor=last_color, alpha=0.5)
else:
plt.plot(data[col_name], label=hd.names[count])
plt.legend()
count += 1
ymin, ymax = plt.ylim()
if ymin < 0 < ymax:
plt.axhline(0, hold=True, color = 'grey') # plot line through zero
pdf.savefig()
mng = plt.get_current_fig_manager()
if plt.get_backend() == 'TkAgg':
mng.window.state('zoomed')
elif plt.get_backend() == 'wxAgg':
mng.frame.Maximize(True)
elif plt.get_backend() == 'QT4Agg':
mng.window.showMaximized()
plt.show()
plt.close()
pdf.close()
|
glibersat/firmware
|
test_results/plot_all.py
|
Python
|
agpl-3.0
| 2,367
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pkg_resources
import os
from openfisca_france_data.calibration import Calibration
from openfisca_france_data.input_data_builders import get_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
openfisca_france_data_location = pkg_resources.get_distribution('openfisca-france-data').location
def test_calibration():
year = 2009
input_data_frame = get_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
year = year,
)
survey_scenario.initialize_weights()
calibration = Calibration(survey_scenario)
calibration.parameters['method'] = 'linear'
print calibration.initial_total_population
calibration.total_population = calibration.initial_total_population * 1.123
print calibration.total_population
filename = os.path.join(
openfisca_france_data_location,
"openfisca_france_data",
"calibrations",
"calib_2006.csv"
)
calibration.set_inputs_margins_from_file(filename, 2006)
calibration.set_parameters('invlo', 3)
calibration.set_parameters('up', 3)
calibration.set_parameters('method', 'logit')
calibration.calibrate()
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
test_calibration()
|
adrienpacifico/openfisca-france-data
|
openfisca_france_data/tests/test_calibration.py
|
Python
|
agpl-3.0
| 2,331
|
import asyncio
import os
import pathlib
import pytest
import aiohttp
from aiohttp import web
try:
import ssl
except:
ssl = False
@pytest.fixture(params=['sendfile', 'fallback'], ids=['sendfile', 'fallback'])
def sender(request):
def maker(*args, **kwargs):
ret = web.FileResponse(*args, **kwargs)
if request.param == 'fallback':
ret._sendfile = ret._sendfile_fallback
return ret
return maker
@asyncio.coroutine
def test_static_file_ok(loop, test_client, sender):
filepath = pathlib.Path(__file__).parent / 'data.unknown_mime_type'
@asyncio.coroutine
def handler(request):
return sender(filepath)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/')
assert resp.status == 200
txt = yield from resp.text()
assert 'file content' == txt.rstrip()
assert 'application/octet-stream' == resp.headers['Content-Type']
assert resp.headers.get('Content-Encoding') is None
yield from resp.release()
@asyncio.coroutine
def test_static_file_ok_string_path(loop, test_client, sender):
filepath = pathlib.Path(__file__).parent / 'data.unknown_mime_type'
@asyncio.coroutine
def handler(request):
return sender(str(filepath))
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/')
assert resp.status == 200
txt = yield from resp.text()
assert 'file content' == txt.rstrip()
assert 'application/octet-stream' == resp.headers['Content-Type']
assert resp.headers.get('Content-Encoding') is None
yield from resp.release()
@asyncio.coroutine
def test_static_file_not_exists(loop, test_client):
app = web.Application()
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/fake')
assert resp.status == 404
yield from resp.release()
@asyncio.coroutine
def test_static_file_name_too_long(loop, test_client):
app = web.Application()
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/x*500')
assert resp.status == 404
yield from resp.release()
@asyncio.coroutine
def test_static_file_upper_directory(loop, test_client):
app = web.Application()
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/../../')
assert resp.status == 404
yield from resp.release()
@asyncio.coroutine
def test_static_file_with_content_type(loop, test_client, sender):
filepath = (pathlib.Path(__file__).parent / 'aiohttp.jpg')
@asyncio.coroutine
def handler(request):
return sender(filepath, chunk_size=16)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/')
assert resp.status == 200
body = yield from resp.read()
with filepath.open('rb') as f:
content = f.read()
assert content == body
assert resp.headers['Content-Type'] == 'image/jpeg'
assert resp.headers.get('Content-Encoding') is None
resp.close()
@asyncio.coroutine
def test_static_file_with_content_encoding(loop, test_client, sender):
filepath = pathlib.Path(__file__).parent / 'hello.txt.gz'
@asyncio.coroutine
def handler(request):
return sender(filepath)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/')
assert 200 == resp.status
body = yield from resp.read()
assert b'hello aiohttp\n' == body
ct = resp.headers['CONTENT-TYPE']
assert 'text/plain' == ct
encoding = resp.headers['CONTENT-ENCODING']
assert 'gzip' == encoding
resp.close()
@asyncio.coroutine
def test_static_file_if_modified_since(loop, test_client, sender):
filename = 'data.unknown_mime_type'
filepath = pathlib.Path(__file__).parent / filename
@asyncio.coroutine
def handler(request):
return sender(filepath)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
resp = yield from client.get('/')
assert 200 == resp.status
lastmod = resp.headers.get('Last-Modified')
assert lastmod is not None
resp.close()
resp = yield from client.get('/', headers={'If-Modified-Since': lastmod})
assert 304 == resp.status
resp.close()
@asyncio.coroutine
def test_static_file_if_modified_since_past_date(loop, test_client, sender):
filename = 'data.unknown_mime_type'
filepath = pathlib.Path(__file__).parent / filename
@asyncio.coroutine
def handler(request):
return sender(filepath)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
lastmod = 'Mon, 1 Jan 1990 01:01:01 GMT'
resp = yield from client.get('/', headers={'If-Modified-Since': lastmod})
assert 200 == resp.status
resp.close()
@asyncio.coroutine
def test_static_file_if_modified_since_invalid_date(loop, test_client, sender):
filename = 'data.unknown_mime_type'
filepath = pathlib.Path(__file__).parent / filename
@asyncio.coroutine
def handler(request):
return sender(filepath)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
lastmod = 'not a valid HTTP-date'
resp = yield from client.get('/', headers={'If-Modified-Since': lastmod})
assert 200 == resp.status
resp.close()
@asyncio.coroutine
def test_static_file_if_modified_since_future_date(loop, test_client, sender):
filename = 'data.unknown_mime_type'
filepath = pathlib.Path(__file__).parent / filename
@asyncio.coroutine
def handler(request):
return sender(filepath)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
lastmod = 'Fri, 31 Dec 9999 23:59:59 GMT'
resp = yield from client.get('/', headers={'If-Modified-Since': lastmod})
assert 304 == resp.status
resp.close()
@pytest.mark.skipif(not ssl, reason="ssl not supported")
@asyncio.coroutine
def test_static_file_ssl(loop, test_server, test_client):
dirname = os.path.dirname(__file__)
filename = 'data.unknown_mime_type'
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.load_cert_chain(
os.path.join(dirname, 'sample.crt'),
os.path.join(dirname, 'sample.key')
)
app = web.Application()
app.router.add_static('/static', dirname)
server = yield from test_server(app, ssl=ssl_ctx)
conn = aiohttp.TCPConnector(verify_ssl=False, loop=loop)
client = yield from test_client(server, connector=conn)
resp = yield from client.get('/static/'+filename)
assert 200 == resp.status
txt = yield from resp.text()
assert 'file content' == txt.rstrip()
ct = resp.headers['CONTENT-TYPE']
assert 'application/octet-stream' == ct
assert resp.headers.get('CONTENT-ENCODING') is None
@asyncio.coroutine
def test_static_file_directory_traversal_attack(loop, test_client):
dirname = os.path.dirname(__file__)
relpath = '../README.rst'
assert os.path.isfile(os.path.join(dirname, relpath))
app = web.Application()
app.router.add_static('/static', dirname)
client = yield from test_client(app)
resp = yield from client.get('/static/'+relpath)
assert 404 == resp.status
url_relpath2 = '/static/dir/../' + relpath
resp = yield from client.get(url_relpath2)
assert 404 == resp.status
url_abspath = \
'/static/' + os.path.abspath(os.path.join(dirname, relpath))
resp = yield from client.get(url_abspath)
assert 404 == resp.status
def test_static_route_path_existence_check():
directory = os.path.dirname(__file__)
web.StaticResource("/", directory)
nodirectory = os.path.join(directory, "nonexistent-uPNiOEAg5d")
with pytest.raises(ValueError):
web.StaticResource("/", nodirectory)
@asyncio.coroutine
def test_static_file_huge(loop, test_client, tmpdir):
filename = 'huge_data.unknown_mime_type'
# fill 100MB file
with tmpdir.join(filename).open('w') as f:
for i in range(1024*20):
f.write(chr(i % 64 + 0x20) * 1024)
file_st = os.stat(str(tmpdir.join(filename)))
app = web.Application()
app.router.add_static('/static', str(tmpdir))
client = yield from test_client(app)
resp = yield from client.get('/static/'+filename)
assert 200 == resp.status
ct = resp.headers['CONTENT-TYPE']
assert 'application/octet-stream' == ct
assert resp.headers.get('CONTENT-ENCODING') is None
assert int(resp.headers.get('CONTENT-LENGTH')) == file_st.st_size
f = tmpdir.join(filename).open('rb')
off = 0
cnt = 0
while off < file_st.st_size:
chunk = yield from resp.content.readany()
expected = f.read(len(chunk))
assert chunk == expected
off += len(chunk)
cnt += 1
f.close()
@asyncio.coroutine
def test_static_file_range(loop, test_client, sender):
filepath = (pathlib.Path(__file__).parent.parent / 'LICENSE.txt')
@asyncio.coroutine
def handler(request):
return sender(filepath, chunk_size=16)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
with filepath.open('rb') as f:
content = f.read()
# Ensure the whole file requested in parts is correct
responses = yield from asyncio.gather(
client.get('/', headers={'Range': 'bytes=0-999'}),
client.get('/', headers={'Range': 'bytes=1000-1999'}),
client.get('/', headers={'Range': 'bytes=2000-'}),
loop=loop
)
assert len(responses) == 3
assert responses[0].status == 206, \
"failed 'bytes=0-999': %s" % responses[0].reason
assert responses[1].status == 206, \
"failed 'bytes=1000-1999': %s" % responses[1].reason
assert responses[2].status == 206, \
"failed 'bytes=2000-': %s" % responses[2].reason
body = yield from asyncio.gather(
*(resp.read() for resp in responses),
loop=loop
)
assert len(body[0]) == 1000, \
"failed 'bytes=0-999', received %d bytes" % len(body[0])
assert len(body[1]) == 1000, \
"failed 'bytes=1000-1999', received %d bytes" % len(body[1])
responses[0].close()
responses[1].close()
responses[2].close()
assert content == b"".join(body)
@asyncio.coroutine
def test_static_file_range_end_bigger_than_size(loop, test_client, sender):
filepath = (pathlib.Path(__file__).parent / 'aiohttp.png')
@asyncio.coroutine
def handler(request):
return sender(filepath, chunk_size=16)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
with filepath.open('rb') as f:
content = f.read()
# Ensure the whole file requested in parts is correct
response = yield from client.get(
'/', headers={'Range': 'bytes=61000-62000'})
assert response.status == 206, \
"failed 'bytes=61000-62000': %s" % response.reason
body = yield from response.read()
assert len(body) == 108, \
"failed 'bytes=0-999', received %d bytes" % len(body[0])
assert content[61000:] == body
@asyncio.coroutine
def test_static_file_range_beyond_eof(loop, test_client, sender):
filepath = (pathlib.Path(__file__).parent / 'aiohttp.png')
@asyncio.coroutine
def handler(request):
return sender(filepath, chunk_size=16)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
# Ensure the whole file requested in parts is correct
response = yield from client.get(
'/', headers={'Range': 'bytes=1000000-1200000'})
assert response.status == 206, \
"failed 'bytes=1000000-1200000': %s" % response.reason
assert response.headers['content-length'] == '0'
@asyncio.coroutine
def test_static_file_range_tail(loop, test_client, sender):
filepath = (pathlib.Path(__file__).parent / 'aiohttp.png')
@asyncio.coroutine
def handler(request):
return sender(filepath, chunk_size=16)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
with filepath.open('rb') as f:
content = f.read()
# Ensure the tail of the file is correct
resp = yield from client.get('/', headers={'Range': 'bytes=-500'})
assert resp.status == 206, resp.reason
body4 = yield from resp.read()
resp.close()
assert content[-500:] == body4
@asyncio.coroutine
def test_static_file_invalid_range(loop, test_client, sender):
filepath = (pathlib.Path(__file__).parent / 'aiohttp.png')
@asyncio.coroutine
def handler(request):
return sender(filepath, chunk_size=16)
app = web.Application()
app.router.add_get('/', handler)
client = yield from test_client(lambda loop: app)
# range must be in bytes
resp = yield from client.get('/', headers={'Range': 'blocks=0-10'})
assert resp.status == 416, 'Range must be in bytes'
resp.close()
# start > end
resp = yield from client.get('/', headers={'Range': 'bytes=100-0'})
assert resp.status == 416, "Range start can't be greater than end"
resp.close()
# start > end
resp = yield from client.get('/', headers={'Range': 'bytes=10-9'})
assert resp.status == 416, "Range start can't be greater than end"
resp.close()
# non-number range
resp = yield from client.get('/', headers={'Range': 'bytes=a-f'})
assert resp.status == 416, 'Range must be integers'
resp.close()
# double dash range
resp = yield from client.get('/', headers={'Range': 'bytes=0--10'})
assert resp.status == 416, 'double dash in range'
resp.close()
# no range
resp = yield from client.get('/', headers={'Range': 'bytes=-'})
assert resp.status == 416, 'no range given'
resp.close()
|
juliatem/aiohttp
|
tests/test_web_sendfile_functional.py
|
Python
|
apache-2.0
| 14,325
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('menus', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('owner', models.ForeignKey(related_name='owner', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='week',
name='days',
),
migrations.AddField(
model_name='week',
name='friday',
field=models.ForeignKey(related_name='friday', default=None, to='menus.Day'),
preserve_default=False,
),
migrations.AddField(
model_name='week',
name='menu',
field=models.ForeignKey(related_name='weeks', default=None, to='menus.Menu'),
preserve_default=False,
),
migrations.AddField(
model_name='week',
name='monday',
field=models.ForeignKey(related_name='monday', default=None, to='menus.Day'),
preserve_default=False,
),
migrations.AddField(
model_name='week',
name='saturday',
field=models.ForeignKey(related_name='saturday', default=None, to='menus.Day'),
preserve_default=False,
),
migrations.AddField(
model_name='week',
name='sunday',
field=models.ForeignKey(related_name='sunday', default=None, to='menus.Day'),
preserve_default=False,
),
migrations.AddField(
model_name='week',
name='thursday',
field=models.ForeignKey(related_name='thursday', default=None, to='menus.Day'),
preserve_default=False,
),
migrations.AddField(
model_name='week',
name='tuesday',
field=models.ForeignKey(related_name='tuesday', default=None, to='menus.Day'),
preserve_default=False,
),
migrations.AddField(
model_name='week',
name='wednesday',
field=models.ForeignKey(related_name='wednesday', default=None, to='menus.Day'),
preserve_default=False,
),
]
|
kimond/miamm
|
miamm/menus/migrations/0002_auto_20150208_1525.py
|
Python
|
bsd-3-clause
| 2,689
|
#Get celebrity data from posh24.com
from bs4 import BeautifulSoup
import json
import re
import requests
#Website to scrape top 100 celebrities from
website = "http://www.posh24.com/celebrities"
#Get website data
data = requests.get(website).text
#Parse data using bs4
soup = BeautifulSoup(data, "html.parser")
#List of top 100 celebs
data = soup.find_all(attrs={"class": "channelListEntry"})
celebs = {}
i = 1
for div in data:
links = div.findAll('a')
for a in links:
#Celeb object
celeb = {}
raw_name = a.get('href')
celeb_name = raw_name.replace("_", " ")[1:].title()
celeb['name'] = celeb_name
#Celeb page
nameWebsite = website[:website.rfind('/')] + raw_name
nameData = requests.get(nameWebsite).text
#Parse celeb page
nameSoup = BeautifulSoup(nameData, 'html.parser')
#Celeb info
attrs = nameSoup.find_all(attrs={"class": "attributeContent"})
#Celeb bio
bio = nameSoup.find(attrs={"class": "info"}).contents[0]
if (" he " in bio or " him " in bio):
celeb['gender'] = "m"
elif (" she " in bio or " her " in bio):
celeb['gender'] = "f"
else:
celeb['gender'] = "unknown"
if (len(attrs) != 0):
birthInfo = re.sub('\s+',' ',attrs[0].contents[0])
if ("in" in birthInfo):
birthInfo = birthInfo.split("in")
celebBD = birthInfo[0]
celebPlace = birthInfo[1]
celebAge = re.sub('\s+',' ',attrs[1].contents[0])
else:
celebBD = "unknown"
celebPlace = birthInfo
celebAge = "unknown"
#print(celebBD, celebPlace, celebAge)
celeb['birth'] = celebBD
celeb['birth-place'] = celebPlace
celeb['age'] = celebAge
else:
celeb['birth'] = "unknown"
celeb['birth-place'] = "unknown"
celeb['age'] = "unknown"
celeb['rank'] = i
i = i+1
#Add celeb to celeb list
celebs[celeb_name] = (celeb)
with open("celebs.json", 'w') as f:
json_data = json.dumps(celebs, sort_keys=True, indent=4, separators=(',', ': '))
f.write(json_data)
|
KingsleyBell/dotacelebbot
|
celebList.py
|
Python
|
mit
| 2,351
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class DomainPaged(Paged):
"""
A paging container for iterating over a list of :class:`Domain <azure.mgmt.web.models.Domain>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Domain]'}
}
def __init__(self, *args, **kwargs):
super(DomainPaged, self).__init__(*args, **kwargs)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/domain_paged.py
|
Python
|
mit
| 906
|
#!/usr/bin/env python
#
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import psutil
import sys
import numpy
from matplotlib import pyplot
class CpuSnapshot(object):
def __init__(self, label):
self.label = label
self.samples = []
def Capture(self, sample_count):
print ('Capturing %d CPU samples for %s...' %
((sample_count - len(self.samples)), self.label))
while len(self.samples) < sample_count:
self.samples.append(psutil.cpu_percent(1.0, False))
def Text(self):
return ('%s: avg=%s, median=%s, min=%s, max=%s' %
(self.label, numpy.average(self.samples),
numpy.median(self.samples),
numpy.min(self.samples), numpy.max(self.samples)))
def Max(self):
return numpy.max(self.samples)
def GrabCpuSamples(sample_count):
print 'Label for snapshot (enter to quit): '
label = raw_input().strip()
if len(label) == 0:
return None
snapshot = CpuSnapshot(label)
snapshot.Capture(sample_count)
return snapshot
def main():
print 'How many seconds to capture per snapshot (enter for 60)?'
sample_count = raw_input().strip()
if len(sample_count) > 0 and int(sample_count) > 0:
sample_count = int(sample_count)
else:
print 'Defaulting to 60 samples.'
sample_count = 60
snapshots = []
while True:
snapshot = GrabCpuSamples(sample_count)
if snapshot is None:
break
snapshots.append(snapshot)
if len(snapshots) == 0:
print 'no samples captured'
return -1
pyplot.title('CPU usage')
for s in snapshots:
pyplot.plot(s.samples, label=s.Text(), linewidth=2)
pyplot.legend()
pyplot.show()
return 0
if __name__ == '__main__':
sys.exit(main())
|
endlessm/chromium-browser
|
third_party/webrtc/tools_webrtc/cpu/cpu_mon.py
|
Python
|
bsd-3-clause
| 2,057
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib import exceptions as n_exc
from oslo_log import log as logging
from quark.db import api as db_api
from quark import exceptions as q_exc
from quark import plugin_views as v
LOG = logging.getLogger(__name__)
def _to_mac_range(val):
cidr_parts = val.split("/")
prefix = cidr_parts[0]
# FIXME(anyone): replace is slow, but this doesn't really
# get called ever. Fix maybe?
prefix = prefix.replace(':', '')
prefix = prefix.replace('-', '')
prefix_length = len(prefix)
if prefix_length < 6 or prefix_length > 12:
raise q_exc.InvalidMacAddressRange(cidr=val)
diff = 12 - len(prefix)
if len(cidr_parts) > 1:
mask = int(cidr_parts[1])
else:
mask = 48 - diff * 4
mask_size = 1 << (48 - mask)
prefix = "%s%s" % (prefix, "0" * diff)
try:
cidr = "%s/%s" % (str(netaddr.EUI(prefix)).replace("-", ":"), mask)
except netaddr.AddrFormatError:
raise q_exc.InvalidMacAddressRange(cidr=val)
prefix_int = int(prefix, base=16)
return cidr, prefix_int, prefix_int + mask_size
def get_mac_address_range(context, id, fields=None):
"""Retrieve a mac_address_range.
: param context: neutron api request context
: param id: UUID representing the network to fetch.
: param fields: a list of strings that are valid keys in a
network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_mac_address_range %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
if not context.is_admin:
raise n_exc.NotAuthorized()
mac_address_range = db_api.mac_address_range_find(
context, id=id, scope=db_api.ONE)
if not mac_address_range:
raise q_exc.MacAddressRangeNotFound(
mac_address_range_id=id)
return v._make_mac_range_dict(mac_address_range)
def get_mac_address_ranges(context):
LOG.info("get_mac_address_ranges for tenant %s" % context.tenant_id)
if not context.is_admin:
raise n_exc.NotAuthorized()
ranges = db_api.mac_address_range_find(context)
return [v._make_mac_range_dict(m) for m in ranges]
def create_mac_address_range(context, mac_range):
LOG.info("create_mac_address_range for tenant %s" % context.tenant_id)
if not context.is_admin:
raise n_exc.NotAuthorized()
cidr = mac_range["mac_address_range"]["cidr"]
do_not_use = mac_range["mac_address_range"].get("do_not_use", "0")
cidr, first_address, last_address = _to_mac_range(cidr)
with context.session.begin():
new_range = db_api.mac_address_range_create(
context, cidr=cidr, first_address=first_address,
last_address=last_address, next_auto_assign_mac=first_address,
do_not_use=do_not_use)
return v._make_mac_range_dict(new_range)
def _delete_mac_address_range(context, mac_address_range):
if mac_address_range.allocated_macs:
raise q_exc.MacAddressRangeInUse(
mac_address_range_id=mac_address_range["id"])
db_api.mac_address_range_delete(context, mac_address_range)
def delete_mac_address_range(context, id):
"""Delete a mac_address_range.
: param context: neutron api request context
: param id: UUID representing the mac_address_range to delete.
"""
LOG.info("delete_mac_address_range %s for tenant %s" %
(id, context.tenant_id))
if not context.is_admin:
raise n_exc.NotAuthorized()
with context.session.begin():
mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE)
if not mar:
raise q_exc.MacAddressRangeNotFound(
mac_address_range_id=id)
_delete_mac_address_range(context, mar)
|
lmaycotte/quark
|
quark/plugin_modules/mac_address_ranges.py
|
Python
|
apache-2.0
| 4,485
|
#!/usr/bin/python
#
# Double Pulsar Checks
# https://github.com/countercept/doublepulsar-detection-script/blob/master/detect_doublepulsar_rdp.py
# Author: Luke Jennings (luke.jennings@countercept.com - @jukelennings)
# XOR Key calculation provided by https://github.com/FireFart
#
# Modified version that allows to be used as library
#
# Copyright (c) 2017, Countercept (https://countercept.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import binascii
import socket
import ssl
import struct
class DoublePulsar(object):
def __init__(self, ip="127.0.0.1", timeout=None, verbose=False):
self.ip = ip
self.timeout = timeout
self.verbose = verbose
# RDP
# Packets
self.ssl_negotiation_request = binascii.unhexlify("030000130ee000000000000100080001000000")
self.non_ssl_negotiation_request = binascii.unhexlify("030000130ee000000000000100080000000000")
self.non_ssl_client_data = binascii.unhexlify(
"030001ac02f0807f658201a00401010401010101ff30190201220201020201000201010201000201010202ffff020102301902010102010102010102010102010002010102020420020102301c0202ffff0202fc170202ffff0201010201000201010202ffff0201020482013f000500147c00018136000800100001c00044756361812801c0d800040008000005000401ca03aa09080000b01d0000000000000000000000000000000000000000000000000000000000000000000007000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ca01000000000018000f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000004c00c00110000000000000002c00c001b0000000000000003c0380004000000726470647200000000008080726470736e640000000000c0647264796e766300000080c0636c6970726472000000a0c0")
self.ssl_client_data = binascii.unhexlify(
"030001ac02f0807f658201a00401010401010101ff30190201220201020201000201010201000201010202ffff020102301902010102010102010102010102010002010102020420020102301c0202ffff0202fc170202ffff0201010201000201010202ffff0201020482013f000500147c00018136000800100001c00044756361812801c0d800040008000005000401ca03aa09080000b01d0000000000000000000000000000000000000000000000000000000000000000000007000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ca01000000000018000f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000100000004c00c00110000000000000002c00c001b0000000000000003c0380004000000726470647200000000008080726470736e640000000000c0647264796e766300000080c0636c6970726472000000a0c0")
self.ping_packet = binascii.unhexlify("0300000e02f0803c443728190200")
# SMB
# Packets
self.negotiate_protocol_request = binascii.unhexlify(
"00000085ff534d4272000000001853c00000000000000000000000000000fffe00004000006200025043204e4554574f524b2050524f4752414d20312e3000024c414e4d414e312e30000257696e646f777320666f7220576f726b67726f75707320332e316100024c4d312e325830303200024c414e4d414e322e3100024e54204c4d20302e313200")
self.session_setup_request = binascii.unhexlify(
"00000088ff534d4273000000001807c00000000000000000000000000000fffe000040000dff00880004110a000000000000000100000000000000d40000004b000000000000570069006e0064006f007700730020003200300030003000200032003100390035000000570069006e0064006f007700730020003200300030003000200035002e0030000000")
self.tree_connect_request = binascii.unhexlify(
"00000060ff534d4275000000001807c00000000000000000000000000000fffe0008400004ff006000080001003500005c005c003100390032002e003100360038002e003100370035002e003100320038005c00490050004300240000003f3f3f3f3f00")
self.trans2_session_setup = binascii.unhexlify(
"0000004eff534d4232000000001807c00000000000000000000000000008fffe000841000f0c0000000100000000000000a6d9a40000000c00420000004e0001000e000d0000000000000000000000000000")
def check_ip_smb(self):
# Connect to socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(float(self.timeout) if self.timeout else None)
host = self.ip
port = 445
s.connect((host, port))
# Send/receive negotiate protocol request
if self.verbose:
print("Sending negotiation protocol request")
s.send(self.negotiate_protocol_request)
s.recv(1024)
# Send/receive session setup request
if self.verbose:
print("Sending session setup request")
s.send(self.session_setup_request)
session_setup_response = s.recv(1024)
# Extract user ID from session setup response
user_id = session_setup_response[32:34]
if self.verbose:
print("User ID = %s" % struct.unpack("<H", user_id)[0])
# Replace user ID in tree connect request packet
modified_tree_connect_request = list(self.tree_connect_request)
modified_tree_connect_request[32] = user_id[0]
modified_tree_connect_request[33] = user_id[1]
modified_tree_connect_request = "".join(modified_tree_connect_request)
# Send tree connect request
if self.verbose:
print("Sending tree connect")
s.send(modified_tree_connect_request)
tree_connect_response = s.recv(1024)
# Extract tree ID from response
tree_id = tree_connect_response[28:30]
if self.verbose:
print("Tree ID = %s" % struct.unpack("<H", tree_id)[0])
# Replace tree ID and user ID in trans2 session setup packet
modified_trans2_session_setup = list(self.trans2_session_setup)
modified_trans2_session_setup[28] = tree_id[0]
modified_trans2_session_setup[29] = tree_id[1]
modified_trans2_session_setup[32] = user_id[0]
modified_trans2_session_setup[33] = user_id[1]
modified_trans2_session_setup = "".join(modified_trans2_session_setup)
# Send trans2 sessions setup request
if self.verbose:
print("Sending trans2 session setup")
s.send(modified_trans2_session_setup)
final_response = s.recv(1024)
s.close()
# Check for 0x51 response to indicate DOUBLEPULSAR infection
if final_response[34] == "\x51":
signature = final_response[18:26]
signature_long = struct.unpack('<Q', signature)[0]
key = calculate_doublepulsar_xor_key(signature_long)
return True, "DoublePulsar SMB implant detected XOR KEY: %s " % hex(key)
else:
return False, "No presence of DOUBLEPULSAR SMB implant"
def check_ip_rdp(self):
# Connect to socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(float(self.timeout) if self.timeout else None)
host = self.ip
port = 3389
s.connect((host, port))
# Send/receive negotiation request
if self.verbose:
print("Sending negotiation request")
s.send(self.ssl_negotiation_request)
negotiation_response = s.recv(1024)
# Determine if server has chosen SSL
if len(negotiation_response) >= 19 and negotiation_response[11] == "\x02" and negotiation_response[15] == "\x01":
if self.verbose:
print("Server chose to use SSL - negotiating SSL connection")
sock = ssl.wrap_socket(s)
s = sock
# Send/receive ssl client data
if self.verbose:
print("Sending SSL client data")
s.send(self.ssl_client_data)
s.recv(1024)
# Server explicitly refused SSL
elif len(negotiation_response) >= 19 and negotiation_response[11] == "\x03" and negotiation_response[15] == "\x02":
if self.verbose:
print("Server explicitly refused SSL, reconnecting")
# Re-connect
s.close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(float(self.timeout) if self.timeout else None)
s.connect((host, port))
# Send/receive non-ssl negotiation request
if self.verbose:
print("Sending non-ssl negotiation request")
s.send(self.non_ssl_negotiation_request)
s.recv(1024)
# Server requires NLA which implant does not support
elif len(negotiation_response) >= 19 and negotiation_response[11] == "\x03" and negotiation_response[15] == "\x05":
s.close()
return False, "Server requires NLA, which DOUBLEPULSAR does not support"
# Carry on non-ssl
else:
# Send/receive non-ssl client data
if self.verbose:
print("Sending client data")
s.send(self.non_ssl_client_data)
s.recv(1024)
# Send/receive ping
if self.verbose:
print("Sending ping packet")
s.send(self.ping_packet)
# Non-infected machines terminate connection, infected send a response
try:
ping_response = s.recv(1024)
if len(ping_response) == 288:
return True, "DoublePulsar SMB implant detected"
else:
return False, "Status Unknown - Response received but length was %d not 288" % (len(ping_response))
s.close()
except socket.error as e:
return False, "No presence of DOUBLEPULSAR RDP implant"
def calculate_doublepulsar_xor_key(s):
x = (2 * s ^ (((s & 0xff00 | (s << 16)) << 8) | (((s >> 16) | s & 0xff0000) >> 8)))
x = x & 0xffffffff # this line was added just to truncate to 32 bits
return x
|
Neo23x0/Loki
|
lib/doublepulsar.py
|
Python
|
gpl-3.0
| 11,278
|
from ert_gui.models.mixins import ModelMixin, AbstractMethodError
class BasicModelMixin(ModelMixin):
VALUE_CHANGED_EVENT = "value_changed_event"
def registerDefaultEvents(self):
super(BasicModelMixin, self).registerDefaultEvents()
self.observable().addEvent(BasicModelMixin.VALUE_CHANGED_EVENT)
def getValue(self):
raise AbstractMethodError(self, "getValue")
def setValue(self, value):
raise AbstractMethodError(self, "setValue")
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert_gui/models/mixins/basic_model.py
|
Python
|
gpl-3.0
| 484
|
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
from ecell.ui.model_editor.Constants import *
from ecell.ui.model_editor.Utils import *
from ecell.ui.model_editor.Command import *
__all__ = (
'LayoutCommand',
'CreateLayout',
'DeleteLayout',
'RenameLayout',
'CloneLayout',
'PasteLayout',
'CreateObject',
'DeleteObject',
'ChangeLayoutProperty',
'SetObjectProperty',
'PasteObject',
'UndeleteObject',
'MoveObject',
'ResizeObject',
'CreateConnection',
'RedirectConnection',
)
class LayoutCommand( Command ):
def __init__(self, aReceiver, *args):
"""
convert Layout receiver to LayoutName
"""
if aReceiver.__class__.__name__ == "Layout":
self.theLayoutName = aReceiver.getName()
self.theLayoutManager = aReceiver.getLayoutManager()
else:
self.theLayoutName = None
Command.__init__( self, aReceiver, *args )
def execute(self):
"""
must convert LayoutName into receiver
"""
if self.theLayoutName != None:
self.theReceiver = self.theLayoutManager.getLayout( self.theLayoutName )
return Command.execute( self )
def checkArgs( self ):
if type (self.theReceiver) == type(self):
if self.theReceiver.__class__.__name__ == self.RECEIVER:
return True
return False
class CreateLayout(LayoutCommand):
"""
arg1: NAME
"""
RECEIVER = 'LayoutManager'
ARGS_NO = 2
NAME = 0
SHOW = 1
def checkArgs( self ):
if not LayoutCommand.checkArgs(self):
return False
self.theName = self.theArgs[ self.NAME ]
self.isShow = self.theArgs[ self.SHOW ]
#check if layout name exists
if self.theReceiver.doesLayoutExist(self.theName):
return False
return True
def do( self ):
self.theReceiver.createLayout( self.theName)
if self.isShow:
self.theReceiver.showLayout(self.theName)
return True
def createReverseCommand( self ):
self.theReverseCommandList = [ DeleteLayout( self.theReceiver, self.theName ) ]
def getAffected( self ):
return (self.RECEIVER, None )
class DeleteLayout(LayoutCommand):
"""
arg1: NAME
"""
RECEIVER = 'LayoutManager'
ARGS_NO = 1
NAME = 0
def checkArgs( self ):
if not LayoutCommand.checkArgs(self):
return False
self.theName = self.theArgs[ self.NAME ]
#check if layout name exists
if not self.theReceiver.doesLayoutExist(self.theName):
return False
return True
def do( self ):
# prepare copy of layout
layoutBuffer = self.theReceiver.theLayoutBufferFactory.createLayoutBuffer( self.theName )
layoutBuffer.setUndoFlag( True )
# check if layout was shown and set show flag in pastelayout command accorddingly
aLayout = self.theReceiver.getLayout( self.theName )
self.theReverseCommandList = [ PasteLayout( self.theReceiver, layoutBuffer, None, aLayout.isShown() ) ]
self.theReceiver.deleteLayout( self.theName)
return True
def createReverseCommand( self ):
self.theReverseCommandList = None
def getAffected( self ):
return (self.RECEIVER, None )
class RenameLayout(LayoutCommand):
"""
arg1: NAME
"""
RECEIVER = 'LayoutManager'
ARGS_NO = 2
OLDNAME = 0
NEWNAME = 1
def checkArgs( self ):
if not LayoutCommand.checkArgs(self):
return False
self.newName = self.theArgs[ self.NEWNAME ]
if not isIDEligible( self.newName ):
return False
self.oldName = self.theArgs[ self.OLDNAME ]
#check if layout name exists
if self.theReceiver.doesLayoutExist(self.newName):
return False
if not self.theReceiver.doesLayoutExist(self.oldName):
return False
return True
def do( self ):
self.theReceiver.renameLayout( self.oldName,self.newName)
return True
def createReverseCommand( self ):
#self.theReverseCommandList = [ RenameLayout( self.theReceiver, self.oldName, self.newName ) ]
self.theReverseCommandList = [ RenameLayout( self.theReceiver, self.newName, self.oldName ) ]
def getAffected( self ):
return (self.RECEIVER, None )
class CloneLayout(LayoutCommand):
"""
arg1: TEMPLATE
"""
RECEIVER = 'LayoutManager'
ARGS_NO = 1
TEMPLATE = 0
def checkArgs( self ):
if not LayoutCommand.checkArgs(self):
return False
self.theTemplate = self.theArgs[ self.TEMPLATE ]
#check if layout name exists
if not self.theReceiver.doesLayoutExist(self.theTemplate):
return False
return True
def do(self):
layoutBuffer = self.theReceiver.theLayoutBufferFactory.createLayoutBuffer( self.theTemplate )
layoutBuffer.setUndoFlag( True )
newName = "copyOf" + self.theTemplate
newName = self.theReceiver.getUniqueLayoutName( newName )
self.theReceiver.theLayoutBufferPaster.pasteLayoutBuffer( layoutBuffer, newName )
self.theReverseCommandList = [ DeleteLayout( self.theReceiver, newName ) ]
return True
def createReverseCommand( self ):
self.theReverseCommandList = None
class PasteLayout(LayoutCommand):
"""
arg1: layoutbuffer
arg2: new name if no new name, submit None
"""
RECEIVER = 'LayoutManager'
ARGS_NO = 3
BUFFER = 0
NEWNAME = 1
SHOW = 2
def checkArgs( self ):
if not LayoutCommand.checkArgs(self):
return False
self.theBuffer = self.theArgs[ self.BUFFER ]
self.newName = self.theArgs[ self.NEWNAME ]
if self.newName != None:
if not isIDEligible( self.newName ):
return False
self.isShow = self.theArgs[ self.SHOW ]
return True
def do(self):
overWrite = False
if self.newName == None:
self.newName = self.theBuffer.getName()
if self.theReceiver.doesLayoutExist(self.newName):
#if self.theReceiver.theModelEditor.printMessage( "Do you want to overwrite layout %s"%self.newName ) = ME_RESULT_OK:
# get copy of layout
layoutBuffer = self.theReceiver.theLayoutBufferFactory.createLayoutBuffer( self.newName )
layoutBuffer.setUndoFlag( True )
#check if layougt was shown, and set flag in pastelayout command
self.theReverseCommandList = [ PasteLayout( self.theReceiver, layoutBuffer, None, self.isShow ) ]
self.theReceiver.deleteLayout( self.newName)
else:
self.theReverseCommandList = [ DeleteLayout( self.theReceiver, self.newName ) ]
self.theReceiver.theLayoutBufferPaster.pasteLayoutBuffer( self.theBuffer, self.newName )
if self.isShow:
self.theReceiver.showLayout(self.newName)
return True
def createReverseCommand( self ):
self.theReverseCommandList = None
def getAffected( self ):
return (self.RECEIVER, None )
class CreateObject(LayoutCommand):
"""
args: objectid, type, fullid, x, y
"""
RECEIVER = 'Layout'
ARGS_NO = 6
OBJECTID = 0
TYPE = 1
FULLID = 2
X = 3
Y = 4
PARENT = 5
def checkArgs( self ):
# no argument check - suppose call is right
self.objectID = self.theArgs[ self.OBJECTID ]
self.theType = self.theArgs[ self.TYPE ]
self.theFullID = self.theArgs[ self.FULLID ]
self.x = self.theArgs[ self.X ]
self.y = self.theArgs[ self.Y ]
self.theParentID = self.theArgs[ self.PARENT ].getID()
#print self.theParentID
#print self.theArgs
return True
def getID(self):
return self.theArgs[ self.OBJECTID ]
def do(self):
theParent = self.theReceiver.getObject( self.theParentID )
#print theParent
self.theReceiver.createObject(self.objectID, self.theType, self.theFullID, self.x, self.y, theParent )
return True
def createReverseCommand( self ):
self.theReverseCommandList = [ DeleteObject( self.theReceiver, self.objectID ) ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class DeleteObject(LayoutCommand):
"""
args: objectid
"""
RECEIVER = 'Layout'
ARGS_NO = 1
OBJECTID = 0
def checkArgs( self ):
# no argument check - suppose call is right
self.objectID = self.theArgs[ self.OBJECTID ]
return True
def do(self):
objectBuffer = self.theReceiver.theLayoutBufferFactory.createObjectBuffer( self.theReceiver.getName(), self.objectID )
anObject = self.theReceiver.getObject(self.objectID)
aParent = anObject.getParent()
if aParent.__class__.__name__ != 'Layout':
aParentID = anObject.getParent().getID()
else:
aParentID ='System0'
self.theReverseCommandList = [ UndeleteObject( self.theReceiver, objectBuffer, None, None, aParentID ) ]
self.theReceiver.deleteObject(self.objectID)
return True
def createReverseCommand( self ):
self.theReverseCommandList = None
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class ChangeLayoutProperty(LayoutCommand):
"""
args:
"""
RECEIVER = 'Layout'
ARGS_NO=2
PROPERTYNAME=0
PROPERTYVALUE=1
def checkArgs( self ):
if not LayoutCommand.checkArgs(self):
return False
self.propertyName= self.theArgs[ self.PROPERTYNAME ]
self.propertyValue= self.theArgs[ self.PROPERTYVALUE ]
self.oldPropertyValue=self.theReceiver.getProperty(self.propertyName)
return True
def do( self ):
self.theReceiver.setProperty(self.propertyName,self.propertyValue)
return True
def createReverseCommand( self ):
self.theReverseCommandList=None
if self.oldPropertyValue != None:
revcom = ChangeLayoutProperty( self.theReceiver, self.propertyName, self.oldPropertyValue )
self.theReverseCommandList = [ revcom ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class SetObjectProperty(LayoutCommand):
"""
args: objectid
"""
RECEIVER = 'Layout'
ARGS_NO = 3
OBJECTID = 0
PROPERTYNAME = 1 # if None, get it from buffer
NEWVALUE = 2 # if None get it from buffer
def checkArgs( self ):
# no argument check - suppose call is right
self.objectID = self.theArgs[ self.OBJECTID ]
self.propertyName = self.theArgs[ self.PROPERTYNAME ]
self.newValue = self.theArgs[ self.NEWVALUE ]
return True
def do(self):
# get object
theObject = self.theReceiver.getObject( self.objectID )
theObject.setProperty( self.propertyName, self.newValue )
return True
def createReverseCommand( self ):
# store old value
oldValue = copyValue( self.theReceiver.getObject(self.objectID).getProperty( self.propertyName ) )
self.theReverseCommandList = [ SetObjectProperty( self.theReceiver, self.objectID, self.propertyName, oldValue ) ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class PasteObject(LayoutCommand):
"""
args: objectid
"""
RECEIVER = 'Layout'
ARGS_NO = 4
BUFFER = 0
X = 1 # if None, get it from buffer
Y = 2 # if None get it from buffer
PARENTID = 3 # cannot be None
def checkArgs( self ):
# no argument check - suppose call is right
self.theBuffer = self.theArgs[ self.BUFFER ]
self.x = self.theArgs[ self.X ]
self.y = self.theArgs[ self.Y ]
self.theParentID = self.theArgs[ self.PARENTID ]
return True
def do(self):
if self.theBuffer.__class__.__name__ == "MultiObjectBuffer":
self.theReceiver.theLayoutBufferPaster.pasteMultiObjectBuffer( self.theReceiver, self.theBuffer, self.x, self.y, self.theParentID )
else:
self.theReceiver.theLayoutBufferPaster.pasteObjectBuffer( self.theReceiver, self.theBuffer, self.x, self.y, self.theParentID )
return True
def createReverseCommand( self ):
self.theReverseCommandList = []
if self.theBuffer.__class__.__name__ == "MultiObjectBuffer":
for aSystemBufferName in self.theBuffer.getSystemObjectListBuffer().getObjectBufferList():
aSystemBuffer = self.theBuffer.getSystemObjectListBuffer().getObjectBuffer( aSystemBufferName )
self.__createReverseCommandForBuffer( aSystemBuffer )
for aBufferName in self.theBuffer.getObjectListBuffer().getObjectBufferList():
anObjectBuffer = self.theBuffer.getObjectListBuffer().getObjectBuffer( aBufferName )
self.__createReverseCommandForBuffer( anObjectBuffer )
else:
self.__createReverseCommandForBuffer( self.theBuffer )
def __createReverseCommandForBuffer( self, anObjectBuffer ):
aType = anObjectBuffer.getProperty( OB_TYPE )
if anObjectBuffer.getUndoFlag():
newID = anObjectBuffer.getID()
else:
# get it from really pasted ones
newID = self.theReceiver.getUniqueObjectID( aType )
anObjectBuffer.setID( newID )
anObjectBuffer.noNewID = True
self.theReverseCommandList += [ DeleteObject( self.theReceiver,newID ) ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class UndeleteObject(LayoutCommand):
"""
args: objectid
"""
RECEIVER = 'Layout'
ARGS_NO = 4
BUFFER = 0
X = 1 # if None, get it from buffer
Y = 2 # if None get it from buffer
PARENTID = 3 # cannot be None
def checkArgs( self ):
# no argument check - suppose call is right
self.theBuffer = self.theArgs[ self.BUFFER ]
self.x = self.theArgs[ self.X ]
self.y = self.theArgs[ self.Y ]
self.theParentID = self.theArgs[ self.PARENTID ]
self.theBuffer.setUndoFlag ( True )
return True
def do(self):
self.theReceiver.theLayoutBufferPaster.pasteObjectBuffer( self.theReceiver, self.theBuffer, self.x, self.y, self.theParentID )
return True
def createReverseCommand( self ):
self.theReverseCommandList = [ ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class MoveObject(LayoutCommand):
"""
args: objectid
"""
RECEIVER = 'Layout'
ARGS_NO = 4
OBJECTID = 0
NEWX = 1
NEWY = 2
NEWPARENT = 3
def checkArgs( self ):
# no argument check - suppose call is right
self.objectID = self.theArgs[ self.OBJECTID ]
self.newx = self.theArgs[ self.NEWX ]
self.newy = self.theArgs[ self.NEWY ]
#self.newParent = self.theArgs[ self.NEWPARENT ]
self.newParent=None
return True
def do(self):
a = self.theReceiver.getObject( self.objectID )
self.theReceiver.moveObject( self.objectID, self.newx, self.newy, self.newParent )
return True
def createReverseCommand( self ):
theObject = self.theReceiver.getObject( self.objectID )
oldX = theObject.getProperty( OB_POS_X )
oldY = theObject.getProperty( OB_POS_Y )
self.theReverseCommandList = [ MoveObject( self.theReceiver, self.objectID, oldX, oldY ) ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class ResizeObject(LayoutCommand):
"""
args: objectid
"""
RECEIVER = 'Layout'
ARGS_NO = 5
OBJECTID = 0
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
def checkArgs( self ):
# no argument check - suppose call is right
self.objectID = self.theArgs[ self.OBJECTID ]
self.up = self.theArgs[ self.UP ]
self.down = self.theArgs[ self.DOWN ]
self.left = self.theArgs[ self.LEFT ]
self.right = self.theArgs[ self.RIGHT ]
return True
def do(self):
self.theReceiver.resizeObject( self.objectID, self.up, self.down, self.left, self.right )
return True
def createReverseCommand( self ):
antiUp = -self.up
antiDown = -self.down
antiLeft = -self.left
antiRight = -self.right
self.theReverseCommandList = [ ResizeObject( self.theReceiver, self.objectID, antiUp, antiDown, antiLeft, antiRight ) ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class CreateConnection(LayoutCommand):
"""
args: objectid
"""
RECEIVER = 'Layout'
ARGS_NO = 7
OBJECTID = 0
PROCESSOBJECTID = 1
VARIABLEOBJECTID = 2
PROCESSRING = 3
VARIABLERING = 4
DIRECTION = 5
VARREFNAME = 6
def checkArgs( self ):
# no argument check - suppose call is right
self.objectID = self.theArgs[ self.OBJECTID ]
self.processObjectID = self.theArgs[ self.PROCESSOBJECTID ]
self.variableObjectID = self.theArgs[ self.VARIABLEOBJECTID ]
self.processRing = self.theArgs[ self.PROCESSRING ]
self.variableRing = self.theArgs[ self.VARIABLERING ]
self.direction = self.theArgs[ self.DIRECTION ]
self.varrefName = self.theArgs[ self.VARREFNAME ]
return True
def do(self):
self.theReceiver.createConnectionObject( self.objectID, self.processObjectID, self.variableObjectID, self.processRing, self.variableRing, self.direction, self.varrefName )
return True
def createReverseCommand( self ):
self.theReverseCommandList = [ DeleteObject( self.theReceiver, self.objectID ) ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
class RedirectConnection(LayoutCommand):
"""
args: anObjectID, newProcessObjectID, newVariableObjectID = None, newRing = None
# arguments are None. means they dont change
"""
RECEIVER = 'Layout'
ARGS_NO = 6
OBJECTID = 0
NEWPROCESSOBJECTID = 1
NEWVARIABLEOBJECTID = 2 # it is either a valid objectID or a pair of values [x,y] indicating the new endpoint
NEWPROCESSRING = 3
NEWVARIABLERING = 4
NEWVARREFNAME = 5 # can be none
def checkArgs( self ):
# no argument check - suppose call is right
self.objectID = self.theArgs[ self.OBJECTID ]
self.newProcessObjectID = self.theArgs[ self.NEWPROCESSOBJECTID ]
self.newVariableObjectID = self.theArgs[ self.NEWVARIABLEOBJECTID ]
self.newProcessRing = self.theArgs[ self.NEWPROCESSRING ]
self.newVariableRing = self.theArgs[ self.NEWVARIABLERING ]
self.newVarrefName = self.theArgs[ self.NEWVARREFNAME ]
return True
def do(self):
self.theReceiver.redirectConnectionObject( self.objectID, self.newProcessObjectID, self.newVariableObjectID, self.newProcessRing, self.newVariableRing,self.newVarrefName )
return True
def createReverseCommand( self ):
theObject = self.theReceiver.getObject( self.objectID )
if self.newProcessObjectID == None:
oldProcessObjectID = None
oldProcessRing = None
else:
oldProcessObjectID = theObject.getProperty( CO_PROCESS_ATTACHED )
oldProcessRing = theObject.getProperty( CO_PROCESS_RING )
if self.newVariableObjectID == None:
oldVariableObjectID = None
oldVariableRing = None
else:
oldVariableObjectID = theObject.getProperty( CO_VARIABLE_ATTACHED )
if oldVariableObjectID == None:
oldVariableObjectID = theObject.getProperty( CO_ENDPOINT2 )
oldVariableRing = theObject.getProperty( CO_VARIABLE_RING )
self.theReverseCommandList = [ RedirectConnection( self.theReceiver, self.objectID, oldProcessObjectID, oldVariableObjectID, oldProcessRing, oldVariableRing, self.newVarrefName) ]
def getAffected( self ):
return (self.RECEIVER, self.theReceiver )
|
ecell/ecell3
|
ecell/frontend/model-editor/ecell/ui/model_editor/LayoutCommand.py
|
Python
|
lgpl-3.0
| 21,379
|
# Leap + InMoov hand version MRL above 2000
inmoov = Runtime.createAndStart("inmoov","InMoov")
inmoov.startRightHand("COM7","atmega2560")
inmoov.rightHand.index.map(0,180,0,160)
inmoov.rightHand.thumb.map(0,180,55,135)
inmoov.rightHand.majeure.map(0,180,50,170)
inmoov.rightHand.ringFinger.map(0,180,48,145)
inmoov.rightHand.pinky.map(0,180,30,168)
inmoov.rightHand.wrist.map(0,180,10,170)#rollwrist
inmoov.rightHand.setVelocity(-1,-1,-1,-1,-1,-1)
sleep(1)
inmoov.rightHand.startLeapTracking()
# inmoov.rightHand.stopLeapTracking()
|
MyRobotLab/pyrobotlab
|
home/hairygael/InMoov4.LeapMotion.py
|
Python
|
apache-2.0
| 533
|
# simpleui implements a number of simple UI patterns with fallback to CLI if the
# selected GUI fails.
#
# Copyright (C) 2012 NigelB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
COMMONUI_AVAILABLE = True
__all__ = ["COMMONUI_AVAILABLE", "simpleui_Impl"]
|
nigelb/simpleui
|
simpleui/cli_impl/__init__.py
|
Python
|
gpl-3.0
| 844
|
from django.core.urlresolvers import resolve
from django.test import TestCase
from django.http import HttpRequest
from django.template.loader import render_to_string
# Return the HTML of home_page.
from schedule.views import home_page
# Import Name class from models.
from schedule.models import Name
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
# Find the correct mapping of the site's root
# to the home_page function.
found = resolve('/')
self.assertEqual(found.func, home_page)
def test_home_page_returns_correct_html(self):
# Object page Django will see when requested by a user's browser.
request = HttpRequest()
# Gives a response after being passed from the home_page view.
response = home_page(request)
# No longer check constants, instead check to make
# sure the right template is being rendered.
expected_html = render_to_string('schedule/index.html', request=request)
# Covert bytes into a Python unicode string to
# allow the comparison of strings with strings,
# rather than bytes with bytes.
self.assertEqual(response.content.decode(), expected_html)
def test_home_page_displays_all_names(self):
# Create objects to model class Name.
Name.objects.create(text='namey 1')
Name.objects.create(text='namey 2')
request = HttpRequest()
response = home_page(request)
# Check that the objects from Name appear.
self.assertIn('namey 1', response.content.decode())
self.assertIn('namey 2', response.content.decode())
def test_home_page_can_save_a_POST_request(self):
# Object page Django will see when requested by a user's browser.
request = HttpRequest()
# Adapt previous method to add the POST request.
request.method = 'POST'
request.POST['name_text'] = 'A new name'
# Gives a response after being passed from the home_page view.
response = home_page(request)
# Ensure new name has been save to the database.
self.assertEqual(Name.objects.count(), 1)
# Check that the first name's text in the database is correct.
new_name = Name.objects.first()
self.assertEqual(new_name.text, 'A new name')
def test_home_page_redirects_after_POST(self):
request = HttpRequest()
# Adapt previous method to add the POST request.
request.method = 'POST'
request.POST['name_text'] = 'A new name'
# Gives a response after being passed from the home_page view.
response = home_page(request)
# Ensure the status code matches.
self.assertEqual(response.status_code, 302)
# Ensure the location after the HTTP redirect matches.
self.assertEqual(response['location'], '/schedules/the-only-schedule-in-the-world/')
def test_home_page_only_saves_names_when_necessary(self):
# Object page Django will see when requested by a user's browser.
request = HttpRequest()
# View Django will use when requested by urls passed.
home_page(request)
# Ensure the number of objects in Name class match.
self.assertEqual(Name.objects.count(), 0)
class ListViewTest(TestCase):
def test_home_page_displays_all_names(self):
# Create objects to model class Name.
Name.objects.create(text='namey 1')
Name.objects.create(text='namey 2')
# Similarly to the API Selenium uses, Django TestCase uses an
# attribute known as self.client to .get the URL being tested.
response = self.client.get('/schedules/the-only-schedule-in-the-world/')
# Rather than assertIn/response.content.decode()
# Django provides assertContains which deals well with
# responses and the bytes of their content.
self.assertContains(response, 'namey 1')
self.assertContains(response, 'namey 2')
# Object-Relational Mapping.
class ItemModelTest(TestCase):
def test_saving_and_retrieving_names(self):
# Import the class Name.
first_name = Name()
# Specify and save the first object's text field attribute.
first_name.text = 'The first (ever) name'
first_name.save()
# Import the class Name.
second_name = Name()
# Specify and save the second object's text field attribute.
second_name.text = 'Second name'
second_name.save()
# Retrieve all records for for Name.
saved_names = Name.objects.all()
# Ensure the both object's text field attributes have been saved.
self.assertEqual(saved_names.count(), 2)
# Get the first object's text field attribute.
first_saved_name = saved_names[0]
# Get the second object's text field attribute.
second_saved_name = saved_names[1]
# Ensure both object's text field attributes match what was saved.
self.assertEqual(first_saved_name.text, 'The first (ever) name')
self.assertEqual(second_saved_name.text, 'Second name')
|
Giovanni21M/SecP
|
schedule/unit_tests/tests.py
|
Python
|
mit
| 5,147
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Project/TranslationPropertiesDialog.ui'
#
# Created: Tue Nov 18 17:53:58 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TranslationPropertiesDialog(object):
def setupUi(self, TranslationPropertiesDialog):
TranslationPropertiesDialog.setObjectName("TranslationPropertiesDialog")
TranslationPropertiesDialog.resize(592, 543)
TranslationPropertiesDialog.setSizeGripEnabled(True)
self._2 = QtWidgets.QVBoxLayout(TranslationPropertiesDialog)
self._2.setObjectName("_2")
self._3 = QtWidgets.QGridLayout()
self._3.setObjectName("_3")
self.transBinPathEdit = QtWidgets.QLineEdit(TranslationPropertiesDialog)
self.transBinPathEdit.setObjectName("transBinPathEdit")
self._3.addWidget(self.transBinPathEdit, 3, 0, 1, 1)
self.label = QtWidgets.QLabel(TranslationPropertiesDialog)
self.label.setObjectName("label")
self._3.addWidget(self.label, 2, 0, 1, 2)
self.transPatternEdit = QtWidgets.QLineEdit(TranslationPropertiesDialog)
self.transPatternEdit.setObjectName("transPatternEdit")
self._3.addWidget(self.transPatternEdit, 1, 0, 1, 1)
self.textLabel1_3 = QtWidgets.QLabel(TranslationPropertiesDialog)
self.textLabel1_3.setWordWrap(True)
self.textLabel1_3.setObjectName("textLabel1_3")
self._3.addWidget(self.textLabel1_3, 0, 0, 1, 2)
self.transPatternButton = QtWidgets.QToolButton(TranslationPropertiesDialog)
self.transPatternButton.setObjectName("transPatternButton")
self._3.addWidget(self.transPatternButton, 1, 1, 1, 1)
self.transBinPathButton = QtWidgets.QToolButton(TranslationPropertiesDialog)
self.transBinPathButton.setObjectName("transBinPathButton")
self._3.addWidget(self.transBinPathButton, 3, 1, 1, 1)
self._2.addLayout(self._3)
self.exceptionsGroup = QtWidgets.QGroupBox(TranslationPropertiesDialog)
self.exceptionsGroup.setObjectName("exceptionsGroup")
self._4 = QtWidgets.QGridLayout(self.exceptionsGroup)
self._4.setObjectName("_4")
self.exceptDirButton = QtWidgets.QPushButton(self.exceptionsGroup)
self.exceptDirButton.setObjectName("exceptDirButton")
self._4.addWidget(self.exceptDirButton, 2, 3, 1, 1)
self.exceptFileButton = QtWidgets.QPushButton(self.exceptionsGroup)
self.exceptFileButton.setObjectName("exceptFileButton")
self._4.addWidget(self.exceptFileButton, 2, 2, 1, 1)
self.addExceptionButton = QtWidgets.QPushButton(self.exceptionsGroup)
self.addExceptionButton.setEnabled(False)
self.addExceptionButton.setObjectName("addExceptionButton")
self._4.addWidget(self.addExceptionButton, 2, 1, 1, 1)
self.deleteExceptionButton = QtWidgets.QPushButton(self.exceptionsGroup)
self.deleteExceptionButton.setEnabled(False)
self.deleteExceptionButton.setObjectName("deleteExceptionButton")
self._4.addWidget(self.deleteExceptionButton, 2, 0, 1, 1)
self.exceptionEdit = QtWidgets.QLineEdit(self.exceptionsGroup)
self.exceptionEdit.setObjectName("exceptionEdit")
self._4.addWidget(self.exceptionEdit, 1, 0, 1, 4)
self.exceptionsList = QtWidgets.QListWidget(self.exceptionsGroup)
self.exceptionsList.setAlternatingRowColors(True)
self.exceptionsList.setObjectName("exceptionsList")
self._4.addWidget(self.exceptionsList, 0, 0, 1, 4)
self._2.addWidget(self.exceptionsGroup)
self.buttonBox = QtWidgets.QDialogButtonBox(TranslationPropertiesDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self._2.addWidget(self.buttonBox)
self.label.setBuddy(self.transBinPathEdit)
self.textLabel1_3.setBuddy(self.transPatternEdit)
self.retranslateUi(TranslationPropertiesDialog)
self.buttonBox.accepted.connect(TranslationPropertiesDialog.accept)
self.buttonBox.rejected.connect(TranslationPropertiesDialog.reject)
QtCore.QMetaObject.connectSlotsByName(TranslationPropertiesDialog)
TranslationPropertiesDialog.setTabOrder(self.transPatternEdit, self.transPatternButton)
TranslationPropertiesDialog.setTabOrder(self.transPatternButton, self.transBinPathEdit)
TranslationPropertiesDialog.setTabOrder(self.transBinPathEdit, self.transBinPathButton)
TranslationPropertiesDialog.setTabOrder(self.transBinPathButton, self.exceptionsList)
TranslationPropertiesDialog.setTabOrder(self.exceptionsList, self.exceptionEdit)
TranslationPropertiesDialog.setTabOrder(self.exceptionEdit, self.deleteExceptionButton)
TranslationPropertiesDialog.setTabOrder(self.deleteExceptionButton, self.addExceptionButton)
TranslationPropertiesDialog.setTabOrder(self.addExceptionButton, self.exceptFileButton)
TranslationPropertiesDialog.setTabOrder(self.exceptFileButton, self.exceptDirButton)
def retranslateUi(self, TranslationPropertiesDialog):
_translate = QtCore.QCoreApplication.translate
TranslationPropertiesDialog.setWindowTitle(_translate("TranslationPropertiesDialog", "Translation Properties"))
self.transBinPathEdit.setToolTip(_translate("TranslationPropertiesDialog", "Enter the path for the binary translation files (*.qm)"))
self.transBinPathEdit.setWhatsThis(_translate("TranslationPropertiesDialog", "<b>Binary Translations Path</b>\n"
"<p>Enter the directory for the binary translation files (*.qm). Leave it empty to store them together with the *.ts files.</p>"))
self.label.setText(_translate("TranslationPropertiesDialog", "&Binary Translations Path:"))
self.transPatternEdit.setToolTip(_translate("TranslationPropertiesDialog", "Enter the path pattern for the translation files"))
self.transPatternEdit.setWhatsThis(_translate("TranslationPropertiesDialog", "<b>Translation Pattern</b>\n"
"<p>Enter the path pattern for the translation files using %language% at the place of the language code (e.g. /path_to_eric/i18n/eric6_%language%.ts). This will result in translation files like /path_to_eric/i18n/eric6_de.ts.</p>"))
self.textLabel1_3.setText(_translate("TranslationPropertiesDialog", "&Translation Path Pattern:\n"
"(Use \'%language%\' where the language code should be inserted, e.g. i18n/eric6_%language%.ts)"))
self.transPatternButton.setToolTip(_translate("TranslationPropertiesDialog", "Show directory selection dialog"))
self.transPatternButton.setWhatsThis(_translate("TranslationPropertiesDialog", "<b>Translation Pattern</b>\n"
"<p>Select a translation file via a file selection dialog.</p>"))
self.transBinPathButton.setToolTip(_translate("TranslationPropertiesDialog", "Show directory selection dialog"))
self.transBinPathButton.setWhatsThis(_translate("TranslationPropertiesDialog", "<b>Binary Translations Path</b>\n"
"<p>Select the directory for the binary translations via a directory selection dialog.</p>"))
self.exceptionsGroup.setTitle(_translate("TranslationPropertiesDialog", "Exclude from translation"))
self.exceptDirButton.setToolTip(_translate("TranslationPropertiesDialog", "Press to select a directory via a selection dialog"))
self.exceptDirButton.setText(_translate("TranslationPropertiesDialog", "Select d&irectory..."))
self.exceptFileButton.setToolTip(_translate("TranslationPropertiesDialog", "Press to select a file via a selection dialog"))
self.exceptFileButton.setText(_translate("TranslationPropertiesDialog", "Select &file..."))
self.addExceptionButton.setToolTip(_translate("TranslationPropertiesDialog", "Press to add the entered path or file to the list"))
self.addExceptionButton.setText(_translate("TranslationPropertiesDialog", "&Add"))
self.deleteExceptionButton.setToolTip(_translate("TranslationPropertiesDialog", "Press to delete the selected entry from the list"))
self.deleteExceptionButton.setText(_translate("TranslationPropertiesDialog", "&Delete"))
self.exceptionEdit.setToolTip(_translate("TranslationPropertiesDialog", "Enter a path or file to be added"))
self.exceptionsList.setToolTip(_translate("TranslationPropertiesDialog", "List of paths or files to excude from translation"))
self.exceptionsList.setSortingEnabled(True)
|
davy39/eric
|
Project/Ui_TranslationPropertiesDialog.py
|
Python
|
gpl-3.0
| 8,712
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.