hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e33749578fdd9ea385b7341de972af59efa5bad | 13,852 | py | Python | examples/rough_translated1/translate_clang2.py | JaneliaSciComp/osgpyplusplus | a5ae3f69c7e9101a32d8cc95fe680dab292f75ac | [
"BSD-3-Clause"
] | 17 | 2015-06-01T12:19:46.000Z | 2022-02-12T02:37:48.000Z | examples/rough_translated1/translate_clang2.py | cmbruns/osgpyplusplus | f8bfca2cf841e15f6ddb41c958f3ad0d0b9e4b75 | [
"BSD-3-Clause"
] | 7 | 2015-07-04T14:36:49.000Z | 2015-07-23T18:09:49.000Z | examples/rough_translated1/translate_clang2.py | cmbruns/osgpyplusplus | f8bfca2cf841e15f6ddb41c958f3ad0d0b9e4b75 | [
"BSD-3-Clause"
] | 7 | 2015-11-28T17:00:31.000Z | 2020-01-08T07:00:59.000Z | #!/bin/env python
# Module to help convert OpenSceneGraph C++ examples to python
import clang.cindex
from clang.cindex import CursorKind, TokenKind
INDENT_SIZE = 4
# Conversion from C++ to python
punctuation_translator = {
'->': '.',
'::': '.',
'{': '',
'}': '',
',': ', ',
'&': '',
';': '',
'=': ' = ',
}
keyword_translator = {
'catch': 'except',
'double': 'float',
'false': 'False',
'long': 'int',
'nullptr': 'None',
'short': 'int',
'this': 'self',
'throw': 'raise',
'true': 'True',
}
# Conversion from C++ to python
method_translator = {
'operator()': '__call__',
}
def py_type(cursor):
"Convert C++ type reference to python type name"
t = ""
# Limit token parsing to tokens that fall within the specified cursor bounds
end_pos = [cursor.extent.end.line, cursor.extent.end.column]
for token in cursor.get_tokens():
if token.location.line > end_pos[0]:
break
if token.location.line == end_pos[0] and token.location.column > end_pos[1]:
break
if token.kind == TokenKind.KEYWORD:
continue # no keywords, like "public"
elif token.kind == TokenKind.IDENTIFIER:
t += token.spelling
elif token.spelling in punctuation_translator and token.kind == TokenKind.PUNCTUATION:
t += punctuation_translator[token.spelling]
else:
t += token.spelling # TODO??
t += "#"
t += str(token.kind)
# print token.kind, token.spelling # TODO
yield t
def all_nodes(cursor):
for child in cursor.get_children():
yield child
def all_tokens(cursor):
result = ""
s = cursor.extent.start
e = cursor.extent.end
for t in cursor.get_tokens():
if t.location.line < s.line:
continue
if t.location.line > e.line:
continue
if t.location.line == s.line and t.location.column < s.column:
continue
if t.location.line == e.line and t.location.column > e.column:
continue
result += t.spelling
return [result,]
def arg_nonmatching(cursor):
match = None
for match in matching_child(cursor):
break
args = []
for c in cursor.get_children():
if match is not None and c == match:
continue
args.append(c)
return [", ".join([string_for_cursor(c) for c in args]),]
def args(cursor):
a = []
if (cursor.kind == CursorKind.CONSTRUCTOR or (cursor.kind ==
CursorKind.CXX_METHOD and not cursor.is_static_method())):
a.append("self")
for child in cursor.get_children():
if not child.kind == CursorKind.PARM_DECL:
continue
a.append(child.spelling)
return "(" + ", ".join(a) + ")"
def bases(class_cursor):
bases = []
for c in class_cursor.get_children():
if c.kind == CursorKind.CXX_BASE_SPECIFIER:
bases.append(c)
arg_list = []
for b in bases:
base_name = ""
for child in all_nodes(b):
base_name += string_for_cursor(child)
arg_list.append(base_name)
args = ", ".join(arg_list)
if len(bases) < 1:
args = "object"
yield "(" + args + ")"
def call_expression(cursor):
prefix = list(matching_child(cursor))
p = ""
if len(prefix) > 0 and str(prefix[0]) != "":
p = prefix[0].spelling
if p == "operator->":
yield "self"
for c in arg_nonmatching(cursor):
yield c
elif p == "operator!":
yield "not "
for c in arg_nonmatching(cursor):
yield c
else:
for c in prefix:
yield c
yield "("
for c in arg_nonmatching(cursor):
yield c
yield ")"
def compound_statement(cursor):
for child in cursor.get_children():
yield indent(cursor)
yield child
yield "\n"
def ctor_init(cursor):
"parse out initializers, if any, from constructor"
# Don't look after first child item
first_pos = [cursor.extent.start.line, cursor.extent.start.column]
last_pos = [cursor.extent.end.line, cursor.extent.end.column]
parsed_members = list()
for c in cursor.get_children():
if c.kind == CursorKind.PARM_DECL: # go past constructor arguments
first_pos = [c.extent.end.line, c.extent.end.column+1]
continue
if c.kind == CursorKind.MEMBER_REF: # but take note of member initializers
parsed_members.append(c)
continue
if c.kind == CursorKind.CALL_EXPR: # and the second part of the initializer
continue
# First generic child found, probably COMPOUND_STMT. Stop searching before this point
last_pos = [c.location.line, c.location.column - 1]
break
found_colon = False
paren_level = 0
initializers = []
for t in cursor.get_tokens():
if t.location.line < first_pos[0]:
continue
if t.location.line > last_pos[0]:
break
if t.location.line == first_pos[0] and t.location.column < first_pos[1]:
continue
if t.location.line == last_pos[0] and t.location.column > last_pos[1]:
break
if not found_colon and t.kind == TokenKind.PUNCTUATION and t.spelling == ":":
found_colon = True
initializers.append([])
continue
if not found_colon:
continue
# inside parentheses, anything can happen, without advancing to the next initializer
if t.kind == TokenKind.PUNCTUATION and t.spelling == "(":
paren_level += 1
if t.kind == TokenKind.PUNCTUATION and t.spelling == ")":
paren_level -= 1
# Commas signal separate initializers
if paren_level == 0 and t.kind == TokenKind.PUNCTUATION and t.spelling == ",":
initializers.append([])
continue
# If we get this far, the current token belongs in the current intializier
initializers[-1].append(t)
result = ""
# Emit one initializer per line
for i in initializers:
if len(i) < 1:
continue
result += indent(cursor)
# Separate stuff before parentheses from after parentheses
if i[-1].spelling != ")":
print i[-1].kind, i[-1].spelling
raise Exception("Hey!, I expected a parenthesis there!")
# find first parenthesis
pre_paren = []
in_paren = []
found_paren = False
for tok in i[:-1]:
if not found_paren and tok.spelling == "(":
found_paren = True
continue
if found_paren:
in_paren.append(tok)
else:
pre_paren.append(tok)
field = translate_tokens(pre_paren)
value = translate_tokens(in_paren)
# Is it a field or a base initializer?
if (field in [c.spelling for c in parsed_members]):
# Field initializer
result += "self."+field+" = "+value
else:
# Assume base class initializer
result += field+".__init__(self, "+value+")"
result += "\n"
return result
def ctor_nodes(cursor):
"Avoid child nodes that cause trouble for constructor"
for child in cursor.get_children():
if child.kind == CursorKind.MEMBER_REF:
continue
if child.kind == CursorKind.CALL_EXPR:
continue
yield child
def debug(cursor):
yield str(cursor.kind)
def dec_indent(cursor):
global indent_level
indent_level -= INDENT_SIZE
return []
def displayname(cursor):
yield cursor.displayname
main_file = None
indent_level = 0
def filter_by_file(cursor):
for child in cursor.get_children():
if str(child.location.file) != main_file:
continue
yield child
def first_token(cursor):
token = cursor.get_tokens().next()
yield translate_token(token)
def first_node(cursor):
for child in cursor.get_children():
yield child
break
def inc_indent(cursor):
global indent_level
indent_level += INDENT_SIZE
return []
def indent(cursor):
return " "*indent_level
def kind(cursor):
return str(cursor.kind)
def matching_child(cursor):
for c in cursor.get_children():
if c.spelling == cursor.spelling:
yield c
def nodes_and_tokens(cursor):
children = cursor.get_children()
tokens = cursor.get_tokens()
try:
t = tokens.next()
except StopIteration:
t = None
for c in children:
# Emit tokens preceding child
l1 = c.extent.start.line
c1 = c.extent.start.column
while token_precedes(t, l1, c1):
yield string_for_token(t)
try:
t = tokens.next()
except StopIteration:
t = None
yield c
# Discard tokens inside child
l1 = c.extent.end.line
c1 = c.extent.end.column + 1
while token_precedes(t, l1, c1):
try:
t = tokens.next()
except StopIteration:
t = None
l0 = l1
c0 = c1
l1 = cursor.extent.end.line
c1 = cursor.extent.end.column + 1
while token_precedes(t, l1, c1):
yield string_for_token(t)
try:
t = tokens.next()
except StopIteration:
t = None
def non_first_nodes(cursor):
saw_first = False
for child in cursor.get_children():
if not saw_first:
saw_first = True
continue
yield child
def show_whole_structure(cursor, my_indent=indent_level):
result = " "*my_indent + "%s:%s:%s\n" % (cursor.kind, cursor.spelling, cursor.displayname)
for child in cursor.get_children():
result += show_whole_structure(child, my_indent + 4)
return result
def spelling(cursor):
return cursor.spelling
def string_for_token(token):
# return "%s'%s'" % (token.kind, token.spelling)
return translate_token(token)
def token_precedes(token, line1, col1):
if token is None:
return False
if token.location.line < line1:
return True
elif token.location.line == line1 and token.location.column < col1:
return True
else:
return False
def translate_method(cursor):
if cursor.spelling in method_translator:
return method_translator[cursor.spelling]
return cursor.spelling
def translate_token(token):
if token.kind == TokenKind.PUNCTUATION:
if token.spelling in punctuation_translator:
return punctuation_translator[token.spelling]
if token.kind == TokenKind.KEYWORD:
if token.spelling in keyword_translator:
return keyword_translator[token.spelling]
return token.spelling
def translate_tokens(tokens):
result = ""
for t in tokens:
result += translate_token(t)
return result
default_sequence = [indent, "**", kind, ":", spelling, ":", displayname, "\n", inc_indent, all_nodes, dec_indent]
#
cursor_sequence = {
CursorKind.CALL_EXPR: [call_expression],
CursorKind.CLASS_DECL: ["\n", indent, "class ", spelling, bases, ":\n", inc_indent, all_nodes, dec_indent, "\n"],
CursorKind.COMPOUND_STMT: [compound_statement],
CursorKind.CONSTRUCTOR: [indent, "def __init__", args, ":\n", inc_indent, ctor_init, ctor_nodes, dec_indent, "\n"],
CursorKind.CXX_ACCESS_SPEC_DECL: [all_nodes], # Don't care about "public:" in python...
CursorKind.CXX_BASE_SPECIFIER: [], # Handled in CLASS_DECL
CursorKind.CXX_METHOD: [indent, "def ", translate_method, args, ":\n", inc_indent, all_nodes, dec_indent, "\n"],
CursorKind.CXX_NEW_EXPR: [spelling],
CursorKind.DECL_REF_EXPR: [spelling, all_nodes], # TODO not sure about the all_nodes...
CursorKind.DECL_STMT: [nodes_and_tokens],
CursorKind.FIELD_DECL: [],
CursorKind.FUNCTION_DECL: [indent, "def ", spelling, args, ":\n", inc_indent, all_nodes, dec_indent, "\n"],
CursorKind.IF_STMT: ["if ", first_node, ":\n", inc_indent, indent, non_first_nodes, dec_indent],
CursorKind.INTEGER_LITERAL: [first_token],
CursorKind.MEMBER_REF_EXPR: [all_nodes, ".", spelling],
# CursorKind.MEMBER_REF: [".", spelling],
CursorKind.NAMESPACE_REF: [spelling, ".", all_nodes],
CursorKind.PARM_DECL: [],
CursorKind.RETURN_STMT: ["return ", all_nodes],
CursorKind.TEMPLATE_REF: [spelling, "<", all_nodes, ">"],
CursorKind.TRANSLATION_UNIT: [filter_by_file],
CursorKind.TYPE_REF: [py_type, all_nodes],
CursorKind.UNARY_OPERATOR: [first_token, all_nodes],
CursorKind.UNEXPOSED_EXPR: [all_nodes],
CursorKind.VAR_DECL: [spelling, " = ", all_nodes],
}
def string_for_cursor(cursor):
if isinstance(cursor, basestring):
return cursor # It's already a string
try:
rules = cursor_sequence[cursor.kind]
except KeyError: # ...undocumented cursor type...?
rules = default_sequence
result = ""
for r in rules:
if isinstance(r, basestring):
result += r
else:
for child in r(cursor):
result += string_for_cursor(child)
return result
def main():
examples_src = "C:/Users/cmbruns/git/osg/examples"
osg_includes = "C:/Users/cmbruns/git/osg/include"
src_file = examples_src + "/osggraphicscost/osggraphicscost.cpp"
index = clang.cindex.Index.create()
translation_unit = index.parse(src_file, args=["-I%s"%osg_includes, '-x', 'c++', '-D__CODE_GENERATOR__'])
global main_file
main_file = str(translation_unit.spelling)
print string_for_cursor(translation_unit.cursor)
if __name__ == "__main__":
main()
| 31.553531 | 119 | 0.605761 |
716d33e682a69aa4594ccde258dca526834b947e | 3,754 | py | Python | rest_framework_dynamicjwt/backends.py | harrytwigg/djangorestframework-simplejwt | 92ef223f273ea3d95bbfc8ea1280b629dfdb4425 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | rest_framework_dynamicjwt/backends.py | harrytwigg/djangorestframework-simplejwt | 92ef223f273ea3d95bbfc8ea1280b629dfdb4425 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | rest_framework_dynamicjwt/backends.py | harrytwigg/djangorestframework-simplejwt | 92ef223f273ea3d95bbfc8ea1280b629dfdb4425 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | import jwt
from django.utils.translation import gettext_lazy as _
from jwt import (
InvalidAlgorithmError, InvalidTokenError, PyJWKClient, algorithms,
)
from .exceptions import TokenBackendError
from .models import AuthenticationSettingsModel
from .utils import format_lazy
ALLOWED_ALGORITHMS = (
'HS256',
'HS384',
'HS512',
'RS256',
'RS384',
'RS512',
)
class TokenBackend:
def __init__(self, authentication_settings: AuthenticationSettingsModel):
self._validate_algorithm(authentication_settings.algorithm)
self.algorithm = authentication_settings.algorithm
self.signing_key = authentication_settings.signing_key
self.audience = None if authentication_settings.audience == '' else authentication_settings.audience
self.issuer = None if authentication_settings.issuer == '' else authentication_settings.issuer
self.jwks_client = PyJWKClient(
authentication_settings.jwk_url) if authentication_settings.jwk_url != '' else None
self.leeway = authentication_settings.leeway
if authentication_settings.algorithm.startswith("HS"):
self.verifying_key = authentication_settings.signing_key
else:
self.verifying_key = authentication_settings.verifying_key
def _validate_algorithm(self, algorithm):
"""
Ensure that the nominated algorithm is recognized, and that cryptography is installed for those
algorithms that require it
"""
if algorithm not in ALLOWED_ALGORITHMS:
raise TokenBackendError(format_lazy(
_("Unrecognized algorithm type '{}'"), algorithm))
if algorithm in algorithms.requires_cryptography and not algorithms.has_crypto:
raise TokenBackendError(format_lazy(
_("You must have cryptography installed to use {}."), algorithm))
def get_verifying_key(self, token):
if self.algorithm.startswith("HS"):
return self.signing_key
if self.jwks_client:
return self.jwks_client.get_signing_key_from_jwt(token).key
return self.verifying_key
def encode(self, payload):
"""
Returns an encoded token for the given payload dictionary.
"""
jwt_payload = payload.copy()
if self.audience is not None:
jwt_payload['aud'] = self.audience
if self.issuer is not None:
jwt_payload['iss'] = self.issuer
token = jwt.encode(jwt_payload, self.signing_key,
algorithm=self.algorithm)
if isinstance(token, bytes):
# For PyJWT <= 1.7.1
return token.decode('utf-8')
# For PyJWT >= 2.0.0a1
return token
def decode(self, token, verify=True):
"""
Performs a validation of the given token and returns its payload
dictionary.
Raises a `TokenBackendError` if the token is malformed, if its
signature check fails, or if its 'exp' claim indicates it has expired.
"""
try:
return jwt.decode(
token,
self.get_verifying_key(token),
algorithms=[self.algorithm],
verify=verify,
audience=self.audience,
issuer=self.issuer,
leeway=self.leeway,
options={
'verify_aud': self.audience is not None,
'verify_signature': verify,
},
)
except InvalidAlgorithmError as ex:
raise TokenBackendError(_('Invalid algorithm specified')) from ex
except InvalidTokenError:
raise TokenBackendError(_('Token is invalid or expired'))
| 35.752381 | 108 | 0.638519 |
bd53d9cd44a117de96d9fc088923d35716c46b34 | 2,789 | py | Python | uptrends/models/scheduled_report_file_type.py | hpcc-systems/uptrends-python | 2e05ba851a4e65bde3c40514f499c475465bef90 | [
"BSD-3-Clause"
] | null | null | null | uptrends/models/scheduled_report_file_type.py | hpcc-systems/uptrends-python | 2e05ba851a4e65bde3c40514f499c475465bef90 | [
"BSD-3-Clause"
] | null | null | null | uptrends/models/scheduled_report_file_type.py | hpcc-systems/uptrends-python | 2e05ba851a4e65bde3c40514f499c475465bef90 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ScheduledReportFileType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
PDF = "PDF"
EXCEL = "Excel"
HTML = "Html"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""ScheduledReportFileType - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ScheduledReportFileType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ScheduledReportFileType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.357895 | 321 | 0.574041 |
bf52663eef0400a16144a668b1ad068a979ca078 | 4,045 | py | Python | skrebate/multisurfstar.py | sauravbose/scikit-rebate | e620798ba5af5cf58fc58bed0e7ac83b12119b66 | [
"MIT"
] | null | null | null | skrebate/multisurfstar.py | sauravbose/scikit-rebate | e620798ba5af5cf58fc58bed0e7ac83b12119b66 | [
"MIT"
] | null | null | null | skrebate/multisurfstar.py | sauravbose/scikit-rebate | e620798ba5af5cf58fc58bed0e7ac83b12119b66 | [
"MIT"
] | 1 | 2018-08-14T00:06:37.000Z | 2018-08-14T00:06:37.000Z | # -*- coding: utf-8 -*-
"""
scikit-rebate was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Pete Schmitt (pschmitt@upenn.edu)
- Ryan J. Urbanowicz (ryanurb@upenn.edu)
- Weixuan Fu (weixuanf@upenn.edu)
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
from .surfstar import SURFstar
from .scoring_utils import MultiSURFstar_compute_scores
from sklearn.externals.joblib import Parallel, delayed
class MultiSURFstar(SURFstar):
"""Feature selection using data-mined expert knowledge.
Based on the MultiSURF algorithm as introduced in:
Moore, Jason et al. Multiple Threshold Spatially Uniform ReliefF
for the Genetic Analysis of Complex Human Diseases.
"""
############################# MultiSURF* ########################################
def _find_neighbors(self, inst):
dist_vect = []
for j in range(self._datalen):
if inst != j:
locator = [inst, j]
if inst < j:
locator.reverse()
dist_vect.append(self._distance_array[locator[0]][locator[1]])
dist_vect = np.array(dist_vect)
inst_avg_dist = np.average(dist_vect)
inst_std = np.std(dist_vect) / 2.
near_threshold = inst_avg_dist - inst_std
far_threshold = inst_avg_dist + inst_std
NN_near = []
NN_far = []
for j in range(self._datalen):
if inst != j:
locator = [inst, j]
if inst < j:
locator.reverse()
if self._distance_array[locator[0]][locator[1]] < near_threshold:
NN_near.append(j)
elif self._distance_array[locator[0]][locator[1]] > far_threshold:
NN_far.append(j)
return np.array(NN_near), np.array(NN_far)
def _run_algorithm(self):
attr = self._get_attribute_info()
nan_entries = np.isnan(self._X)
NNlist = [self._find_neighbors(datalen) for datalen in range(self._datalen)]
NN_near_list = [i[0] for i in NNlist]
NN_far_list = [i[1] for i in NNlist]
if self.n_jobs != 1:
scores = np.sum(Parallel(n_jobs=self.n_jobs)(delayed(
MultiSURFstar_compute_scores)(instance_num, attr, nan_entries, self._num_attributes,
NN_near, NN_far, self._headers, self._class_type, self._X, self._y, self._labels_std)
for instance_num, NN_near, NN_far in zip(range(self._datalen), NN_near_list, NN_far_list)), axis=0)
else:
scores = np.sum([MultiSURFstar_compute_scores(instance_num, attr, nan_entries, self._num_attributes,
NN_near, NN_far, self._headers, self._class_type, self._X, self._y, self._labels_std)
for instance_num, NN_near, NN_far in zip(range(self._datalen), NN_near_list, NN_far_list)], axis=0)
return np.array(scores)
| 42.578947 | 116 | 0.665019 |
c11e7ec0c5a28c1206735966ac190d52c9adf17a | 24,150 | py | Python | analyzer/libs/pygments/pygments/lexers/css.py | oslab-swrc/juxta | 481cd6f01e87790041a07379805968bcf57d75f4 | [
"MIT"
] | 182 | 2017-03-05T07:43:13.000Z | 2022-03-15T13:09:07.000Z | analyzer/libs/pygments/pygments/lexers/css.py | oslab-swrc/juxta | 481cd6f01e87790041a07379805968bcf57d75f4 | [
"MIT"
] | 3 | 2021-05-10T18:59:14.000Z | 2021-09-02T01:50:15.000Z | analyzer/libs/pygments/pygments/lexers/css.py | oslab-swrc/juxta | 481cd6f01e87790041a07379805968bcf57d75f4 | [
"MIT"
] | 16 | 2017-03-07T11:01:27.000Z | 2022-01-08T09:21:01.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.css
~~~~~~~~~~~~~~~~~~~
Lexers for CSS and related stylesheet formats.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import iteritems
__all__ = ['CssLexer', 'SassLexer', 'ScssLexer']
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\{', Punctuation, 'content'),
(r'\:[\w-]+', Name.Decorator),
(r'\.[\w-]+', Name.Class),
(r'\#[\w-]+', Name.Function),
(r'@[\w-]+', Keyword, 'atrule'),
(r'[\w-]+', Name.Tag),
(r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'\{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'\}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'\}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline-color',
'outline-style', 'outline-width', 'outline', 'overflow', 'overflow-x',
'overflow-y', 'padding-bottom', 'padding-left', 'padding-right', 'padding-top',
'padding', 'page', 'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch-range', 'pitch',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bolder', 'bold', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'left', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slower', 'slow', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text-bottom', 'text-top', 'text', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Keyword),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'yellow', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'black', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'silver', 'skyblue', 'gray', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', 'teal',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'navy', 'orchid', 'blue', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'red', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'purple', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'lime', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'fuchsia', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'white', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', 'olive',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'aqua', 'darkgoldenrod', 'maroon', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', 'green',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[.-]?[0-9]*[.]?[0-9]+(em|px|pt|pc|in|mm|cm|ex|s)\b', Number),
# Separate regex for percentages, as can't do word boundaries with %
(r'[.-]?[0-9]*[.]?[0-9]+%', Number),
(r'-?[0-9]+', Number),
(r'[~^*!%&<>|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_]\w*', Name)
]
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color',
'outline-style', 'outline-width', 'overflow', 'padding-bottom',
'padding-left', 'padding-right', 'padding-top', 'padding', 'page',
'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch', 'pitch-range',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom', 'left',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Name.Constant),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'skyblue', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'orchid', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'darkgoldenrod', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Entity),
(words((
'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green',
'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'),
Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#\{', String.Interpol, 'interpolation'),
(r'[~^*!&%<>|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[\w-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
.. versionadded:: 1.3
"""
name = 'Sass'
aliases = ['sass']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[\w-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
default('selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
default('value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Text), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
| 48.396794 | 98 | 0.50323 |
5987dbfe83457f45086e74527842997f9318ece4 | 755 | py | Python | ckanext/qa/interfaces.py | salsadigitalauorg/ckanext-qa | a86f672bce59337daec197b9346ac850e7bdae21 | [
"MIT"
] | null | null | null | ckanext/qa/interfaces.py | salsadigitalauorg/ckanext-qa | a86f672bce59337daec197b9346ac850e7bdae21 | [
"MIT"
] | 4 | 2020-07-30T03:36:34.000Z | 2022-01-10T23:24:44.000Z | ckanext/qa/interfaces.py | salsadigitalauorg/ckanext-qa | a86f672bce59337daec197b9346ac850e7bdae21 | [
"MIT"
] | 1 | 2020-06-29T00:48:05.000Z | 2020-06-29T00:48:05.000Z | import logging
import ckan.plugins as plugins
from ckan.plugins.interfaces import Interface
log = logging.getLogger(__name__)
class IQA(Interface):
"""
Interface for other plugins to hook into and apply their own custom resource score before it's saved
"""
@classmethod
def custom_resource_score(cls, resource, resource_score):
result = None
for observer in plugins.PluginImplementations(cls):
try:
result = observer.custom_resource_score(resource, resource_score)
except Exception as ex:
log.exception(ex)
# We reraise all exceptions so they are obvious there
# is something wrong
raise
return result
| 29.038462 | 104 | 0.647682 |
c2249c2e5a777ea0ef453b7fb32a67aba038255c | 2,429 | py | Python | bin/basenji_hdf5_sample.py | AndyPJiang/basenji | 64e43570c8bece156b4ab926608014f489b7965e | [
"Apache-2.0"
] | 1 | 2020-05-22T20:53:37.000Z | 2020-05-22T20:53:37.000Z | bin/basenji_hdf5_sample.py | AndyPJiang/basenji | 64e43570c8bece156b4ab926608014f489b7965e | [
"Apache-2.0"
] | null | null | null | bin/basenji_hdf5_sample.py | AndyPJiang/basenji | 64e43570c8bece156b4ab926608014f489b7965e | [
"Apache-2.0"
] | 1 | 2021-02-05T21:01:05.000Z | 2021-02-05T21:01:05.000Z | #!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser
import h5py
import numpy as np
"""basenji_hdf5_sample.py
Samples sequences from an HDF5 training file.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <in_h5> <out_h5>'
parser = OptionParser(usage)
parser.add_option(
'-s',
dest='seqs_pct',
default=0.25,
type='float',
help='Propostion of sequencs to sample [Default: %default]')
parser.add_option(
'--test',
dest='test_only',
default=False,
action='store_true',
help='Drop the train and validation sets [Default: %default]')
# parser.add_option('-t', dest='targets_pct', default=1, type='float', help='Proportion of targets to sample [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide input and output HDF5')
else:
in_h5 = args[0]
out_h5 = args[1]
in_h5_open = h5py.File(in_h5)
out_h5_open = h5py.File(out_h5, 'w')
for key in in_h5_open.keys():
print(key)
if key[-3:] == '_in' or key[-4:] == '_out':
if not options.test_only or key.startswith('test'):
n_in = in_h5_open[key].shape[0]
n_out = int(n_in * options.seqs_pct)
out_h5_open.create_dataset(key, data=in_h5_open[key][:n_out])
else:
out_h5_open.create_dataset(key, data=in_h5_open[key])
in_h5_open.close()
out_h5_open.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| 31.960526 | 132 | 0.568135 |
b02e9fb1a4986e4956f589b5247dddc98a4e5279 | 331 | py | Python | env/lib/python3.6/site-packages/scipy/_distributor_init.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | venv/lib/python3.7/site-packages/scipy/_distributor_init.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | venv/lib/python3.7/site-packages/scipy/_distributor_init.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | """ Distributor init file
Distributors: you can add custom code here to support particular distributions
of scipy.
For example, this is a good place to put any checks for hardware requirements.
The scipy standard source distribution will not put code in this file, so you
can safely replace this file with your own version.
"""
| 30.090909 | 78 | 0.78852 |
c49fb3e4bdb69689e36863934818acfd4d7d8475 | 1,495 | py | Python | license_protected_downloads/tests/test_models.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null | license_protected_downloads/tests/test_models.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null | license_protected_downloads/tests/test_models.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null | __author__ = 'dooferlad'
import unittest
import hashlib
from django.test import TestCase
from license_protected_downloads.models import License
class LicenseTestCase(TestCase):
def setUp(self):
lic1_text = 'Samsung License'
lic2_text = 'Stericsson License'
lic3_text = 'Linaro License'
digest1 = hashlib.md5(lic1_text).hexdigest()
digest2 = hashlib.md5(lic2_text).hexdigest()
digest3 = hashlib.md5(lic2_text).hexdigest()
self.lic1 = License.objects.create(digest=digest1, text=lic1_text,
theme='samsung')
self.lic2 = License.objects.create(digest=digest2, text=lic2_text,
theme='stericsson')
self.lic3 = License.objects.create(digest=digest3, text=lic3_text,
theme='linaro')
def test_add_license_to_database(self):
self.assertEquals(self.lic1.theme, 'samsung')
self.assertEquals(self.lic2.theme, 'stericsson')
self.assertEquals(self.lic3.theme, 'linaro')
lic1 = License.objects.get(pk=1)
self.assertEquals(lic1.theme, 'samsung')
self.assertEquals(lic1.text, 'Samsung License')
lic2 = License.objects.get(pk=2)
self.assertEquals(lic2.theme, 'stericsson')
self.assertEquals(lic2.text, 'Stericsson License')
lic3 = License.objects.get(pk=3)
self.assertEquals(lic3.theme, 'linaro')
self.assertEquals(lic3.text, 'Linaro License')
if __name__ == '__main__':
unittest.main()
| 35.595238 | 74 | 0.669565 |
f6e52d5dda5e1504835139e490623d4cdb6454de | 9,148 | py | Python | blog/models.py | allink/allink-apps | 101a9e2e0129d932970e0ae89f790d2033a7c805 | [
"BSD-3-Clause"
] | 1 | 2017-03-13T08:49:49.000Z | 2017-03-13T08:49:49.000Z | blog/models.py | allink/allink-apps | 101a9e2e0129d932970e0ae89f790d2033a7c805 | [
"BSD-3-Clause"
] | null | null | null | blog/models.py | allink/allink-apps | 101a9e2e0129d932970e0ae89f790d2033a7c805 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core.cache import cache
from filer.fields.image import FilerImageField
from adminsortable.fields import SortableForeignKey
from parler.models import TranslatableModel, TranslatedFields
from djangocms_text_ckeditor.fields import HTMLField
from cms.models.fields import PlaceholderField
from model_utils.models import TimeFramedModel
from aldryn_translation_tools.models import TranslationHelperMixin
from aldryn_common.admin_fields.sortedm2m import SortedM2MModelField
from allink_core.allink_base.models.choices import SALUTATION_CHOICES
from allink_core.allink_base.models import AllinkManualEntriesMixin, AllinkTranslatedAutoSlugifyMixin
from allink_core.allink_base.models import AllinkBaseModel, AllinkBaseImage, AllinkBaseAppContentPlugin, AllinkAddressFieldsModel, AllinkSimpleRegistrationFieldsModel
from allink_apps.locations.models import Locations
from polymorphic.models import PolymorphicModel
from allink_apps.blog.managers import AllinkEventsManager, AllinkBlogManager
# Blog Parent class
class Blog(PolymorphicModel, TranslationHelperMixin, AllinkTranslatedAutoSlugifyMixin, TranslatableModel, TimeFramedModel, AllinkBaseModel):
"""
Translations
feel free to add app specific fields)
to override slug generation:
slug_source_field_name = 'title'
"""
slug_source_field_name = 'title'
translations = TranslatedFields(
title=models.CharField(
max_length=255
),
slug=models.SlugField(
_(u'Slug'),
max_length=255,
default='',
blank=True,
help_text=_(u'Leave blank to auto-generate a unique slug.')
),
lead=HTMLField(
_(u'Lead Text'),
help_text=_(u'Teaser text that in some cases is used in the list view and/or in the detail view.'),
blank=True,
null=True,
),
text=HTMLField(
_(u'Detailed Text'),
help_text=_(u'The full text in detail view.'),
blank=True,
null=True,
)
)
preview_image = FilerImageField(
verbose_name=_(u'Preview Image'),
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='%(app_label)s_%(class)s_preview_image',
)
template = models.CharField(
_(u'Template'),
help_text=_(u'Choose a template.'),
max_length=50,
blank=True,
null=True,
)
header_placeholder = PlaceholderField(u'blog_header', related_name='%(app_label)s_%(class)s_header_placeholder')
content_placeholder = PlaceholderField(u'blog_content', related_name='%(app_label)s_%(class)s_content_placeholder')
objects = AllinkBlogManager()
class Meta:
app_label = 'blog'
verbose_name = _('Blog entry')
verbose_name_plural = _('Blog entries')
def get_detail_view(self):
return 'blog:detail'
# News
class News(Blog):
objects = AllinkBlogManager()
class Meta:
app_label = 'blog'
verbose_name = _('News entry')
verbose_name_plural = _('News')
def get_detail_view(self):
return '{}:detail'.format(self._meta.model_name)
# Events
class Events(Blog):
objects = AllinkEventsManager()
translations_events = TranslatedFields(
costs=models.CharField(
max_length=255,
help_text=_(u'Costs'),
blank=True,
null=True,
)
)
form_enabled = models.BooleanField(
_(u'Event Form enabled'),
default=True
)
event_date = models.DateField(
_(u'Event Date'),
blank=True,
null=True,
)
event_time = models.TimeField(
_(u'Event Time'),
blank=True,
null=True,
)
location = models.ForeignKey(
Locations,
blank=True,
null=True,
related_name='events'
)
class Meta:
app_label = 'blog'
verbose_name = _('Event')
verbose_name_plural = _('Events')
def __str__(self):
return u'%s %s' % (self.title, self.event_date)
def show_registration_form(self):
if getattr(self, 'event_date'):
if self.event_date < datetime.now().date():
return False
if self.form_enabled:
return True
else:
return False
def get_detail_view(self):
return '{}:detail'.format(self._meta.model_name)
# APP CONTENT PLUGIN
class BlogAppContentPlugin(AllinkManualEntriesMixin, AllinkBaseAppContentPlugin):
data_model = Blog
manual_entries = SortedM2MModelField(
'{}.{}'.format(data_model._meta.app_label, data_model._meta.model_name), blank=True,
help_text=_('Select and arrange specific entries, or, leave blank to select all. (If '
'manual entries are selected the category filtering will be ignored.)')
)
def get_render_queryset_for_display(self, category=None, filters={}):
"""
returns all data_model objects distinct to id which are in the selected categories
- category: category instance
- filter: list tuple with model fields and value
-> adds additional query
-> Is also defined in AllinkManualEntriesMixin to handel manual entries !!
"""
def ordered_by_events():
if category and getattr(category, 'name') == 'Events':
return True
elif self.categories.count() == 1 and self.categories.first().name == 'Events':
return True
else:
return False
valid_cache_keys = cache.get('render_queryset_for_display_valid_keys_%s' % self.id, [])
cache_key = 'render_queryset_for_display_%s_%s_%s' % (self.id, category.id if category else '', json.dumps(filters))
if cache_key in valid_cache_keys:
cached_qs = cache.get(cache_key, None)
if cached_qs:
return cached_qs
# apply filters from request
queryset = self.data_model.objects.active().filter(**filters)
if self.categories.exists() or category:
if category:
# TODO how can we automatically apply the manager of the subclass?
if ordered_by_events():
queryset = Events.objects.active().filter_by_category(category)
else:
queryset = queryset.filter_by_category(category)
else:
if ordered_by_events():
queryset = Events.objects.active().filter_by_categories(categories=self.categories.all())
else:
queryset = queryset.filter_by_categories(categories=self.categories.all())
if self.categories_and.exists():
queryset = queryset.filter_by_categories(categories=self.categories_and.all())
ordered_qs = self._apply_ordering_to_queryset_for_display(queryset)
# cache for for a half year and add to valid cache keys
cache.set(cache_key, ordered_qs, 60 * 60 * 24 * 180)
valid_cache_keys.append(cache_key)
cache.set('render_queryset_for_display_valid_keys_%s' % self.id, valid_cache_keys, 60 * 60 * 24 * 360)
return ordered_qs
class BlogImage(AllinkBaseImage):
blog = SortableForeignKey(Blog, verbose_name=_(u'Images'), help_text=_(u'The first image will be used as preview image.'), blank=True, null=True)
class EventsRegistration(AllinkAddressFieldsModel, AllinkSimpleRegistrationFieldsModel):
require_terms = False
event = models.ForeignKey(Events)
# job = models.TextField(
# _(u'Education/ Job')
# )
salutation = models.IntegerField(
_(u'Salutation'),
choices=SALUTATION_CHOICES,
null=True
)
company_name = models.CharField(
_(u'Company'),
max_length=255,
blank=True,
null=True
)
phone = models.CharField(
_(u'Phone'),
max_length=30,
blank=True,
null=True
)
# terms = models.ForeignKey(
# AllinkTerms,
# verbose_name=_(u'I have read and accept the terms and conditions.'),
# null=True
# )
def __str__(self):
return u'%s %s' % (self.first_name, self.last_name)
@classmethod
def get_verbose_name(cls):
from allink_core.allink_config.models import AllinkConfig
try:
field_name = cls._meta.model_name + '_verbose'
return getattr(AllinkConfig.get_solo(), field_name)
except AttributeError:
return cls._meta.verbose_name
@classmethod
def get_verbose_name_plural(cls):
from allink_core.allink_config.models import AllinkConfig
try:
field_name = cls._meta.model_name + '_verbose_plural'
return getattr(AllinkConfig.get_solo(), field_name)
except AttributeError:
return cls._meta.verbose_name_plural
| 31.763889 | 166 | 0.64648 |
a13ba0ab7b10d29396c60d03d3eddd79945c9752 | 136,945 | py | Python | diana/classes/goatools/test_data/goids_GOATOOLS_Consistent_Increase.py | quimaguirre/diana | 930da0ea91ad87e354061af18db6c437a3318366 | [
"MIT"
] | 3 | 2019-07-11T05:32:13.000Z | 2021-03-12T01:10:21.000Z | diana/classes/goatools/test_data/goids_GOATOOLS_Consistent_Increase.py | quimaguirre/diana | 930da0ea91ad87e354061af18db6c437a3318366 | [
"MIT"
] | null | null | null | diana/classes/goatools/test_data/goids_GOATOOLS_Consistent_Increase.py | quimaguirre/diana | 930da0ea91ad87e354061af18db6c437a3318366 | [
"MIT"
] | 1 | 2019-02-12T13:45:42.000Z | 2019-02-12T13:45:42.000Z | """GOATOOLS GOEA on 2015 Gjoneska data using go-basic.obo: fmt(1.2) rel(2017-01-12) 47,943 GO Terms"""
# Created: 2017-01-13
import collections as cx
nt_fields = [
"p_uncorrected",
"study_count",
"study_n",
"namespace",
"p_fdr_bh",
"study_items",
"NS",
"pop_items",
"pop_n",
"p_sm_bonferroni",
"is_obsolete",
"GO",
"name",
"pop_count",
"alt_ids",
"level",
"depth",
"enrichment",
"Cluster",
"geneid_set",
"symbols",
"symbol_set",
"geneids",
"format_txt",
"hdr_idx",
"is_hdrgo",
"is_usrgo",
"hdr1usr01",
"dcnt",
"D1",
"GO_name",
]
Nt = cx.namedtuple("Nt", " ".join(nt_fields))
# 60 items
nts = [
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0042127', name='', pop_count='', alt_ids='', level=4, depth=4, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=271, D1='A', GO_name='regulation of cell proliferation'),
Nt(p_uncorrected=8.314240949182051e-06, study_count=9, study_n=794, namespace='biological_process', p_fdr_bh=0.0038172443824562806, study_items=set([u'ENSMUSG00000079362', u'ENSMUSG00000040264', u'ENSMUSG00000029298', u'ENSMUSG00000060550', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000041515', u'ENSMUSG00000073421', u'ENSMUSG00000024411']), NS='BP', pop_items=set([u'ENSMUSG00000040264', u'ENSMUSG00000026712', u'ENSMUSG00000026615', u'ENSMUSG00000021559', u'ENSMUSG00000019122', u'ENSMUSG00000028268', u'ENSMUSG00000031778', u'ENSMUSG00000032423', u'ENSMUSG00000018927', u'ENSMUSG00000035352', u'ENSMUSG00000029298', u'ENSMUSG00000079363', u'ENSMUSG00000079362', u'ENSMUSG00000040253', u'ENSMUSG00000074129', u'ENSMUSG00000028270', u'ENSMUSG00000057666', u'ENSMUSG00000034974', u'ENSMUSG00000073421', u'ENSMUSG00000000982', u'ENSMUSG00000023259', u'ENSMUSG00000035385', u'ENSMUSG00000060550', u'ENSMUSG00000041515', u'ENSMUSG00000018930', u'ENSMUSG00000024411']), pop_n=13836, p_sm_bonferroni=0.12596906462105725, is_obsolete=False, GO='GO:0071346', name='cellular response to interferon-gamma', pop_count=26, alt_ids=[], level=6, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000079362', u'ENSMUSG00000040264', u'ENSMUSG00000024411', u'ENSMUSG00000060550', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000041515', u'ENSMUSG00000073421', u'ENSMUSG00000029298']), symbols=u'Aqp4 Gbp2 Gbp2b Gbp3 Gbp9 Gm43302 H2-Ab1 H2-Q7 Irf8', symbol_set=set(['Gbp2b', u'Aqp4', u'Gbp3', u'Gbp2', 'H2-Q7', u'Irf8', u'Gbp9', 'Gm43302', 'H2-Ab1']), geneids='ENSMUSG00000060550 ENSMUSG00000040264 ENSMUSG00000079362 ENSMUSG00000024411 ENSMUSG00000028270 ENSMUSG00000028268 ENSMUSG00000041515 ENSMUSG00000073421 ENSMUSG00000029298', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='BF', GO_name='cellular response to interferon-gamma'),
Nt(p_uncorrected=4.231056097074788e-05, study_count=14, study_n=794, namespace='biological_process', p_fdr_bh=0.014617539815196508, study_items=set([u'ENSMUSG00000024927', u'ENSMUSG00000060802', u'ENSMUSG00000035673', u'ENSMUSG00000032372', u'ENSMUSG00000047123', u'ENSMUSG00000030793', u'ENSMUSG00000020638', u'ENSMUSG00000032691', u'ENSMUSG00000056019', u'ENSMUSG00000006728', u'ENSMUSG00000028270', u'ENSMUSG00000018920', u'ENSMUSG00000041515', u'ENSMUSG00000022500']), NS='BP', pop_items=set([u'ENSMUSG00000020889', u'ENSMUSG00000026712', u'ENSMUSG00000034160', u'ENSMUSG00000037992', u'ENSMUSG00000059089', u'ENSMUSG00000028270', u'ENSMUSG00000026842', u'ENSMUSG00000030793', u'ENSMUSG00000020638', u'ENSMUSG00000056019', u'ENSMUSG00000056501', u'ENSMUSG00000026883', u'ENSMUSG00000022500', u'ENSMUSG00000058301', u'ENSMUSG00000003184', u'ENSMUSG00000024927', u'ENSMUSG00000002250', u'ENSMUSG00000042419', u'ENSMUSG00000022194', u'ENSMUSG00000009681', u'ENSMUSG00000003437', u'ENSMUSG00000002602', u'ENSMUSG00000032372', u'ENSMUSG00000032377', u'ENSMUSG00000047123', u'ENSMUSG00000016024', u'ENSMUSG00000018920', u'ENSMUSG00000035385', u'ENSMUSG00000030595', u'ENSMUSG00000026104', u'ENSMUSG00000005583', u'ENSMUSG00000041135', u'ENSMUSG00000021624', u'ENSMUSG00000021699', u'ENSMUSG00000015243', u'ENSMUSG00000022901', u'ENSMUSG00000039217', u'ENSMUSG00000028525', u'ENSMUSG00000002108', u'ENSMUSG00000024789', u'ENSMUSG00000075701', u'ENSMUSG00000034855', u'ENSMUSG00000020399', u'ENSMUSG00000031555', u'ENSMUSG00000020850', u'ENSMUSG00000019461', u'ENSMUSG00000005951', u'ENSMUSG00000006728', u'ENSMUSG00000032369', u'ENSMUSG00000059866', u'ENSMUSG00000060802', u'ENSMUSG00000060803', u'ENSMUSG00000051439', u'ENSMUSG00000035673', u'ENSMUSG00000019850', u'ENSMUSG00000028163', u'ENSMUSG00000024045', u'ENSMUSG00000027164', u'ENSMUSG00000032691', u'ENSMUSG00000020484', u'ENSMUSG00000026234', u'ENSMUSG00000044786', u'ENSMUSG00000005609', u'ENSMUSG00000053436', u'ENSMUSG00000026361', u'ENSMUSG00000021936', u'ENSMUSG00000041515', u'ENSMUSG00000039005', u'ENSMUSG00000034889', u'ENSMUSG00000037411', u'ENSMUSG00000028599']), pop_n=13836, p_sm_bonferroni=0.6410473092678012, is_obsolete=False, GO='GO:0071222', name='cellular response to lipopolysaccharide', pop_count=71, alt_ids=[], level=5, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000024927', u'ENSMUSG00000022500', u'ENSMUSG00000060802', u'ENSMUSG00000035673', u'ENSMUSG00000032372', u'ENSMUSG00000030793', u'ENSMUSG00000047123', u'ENSMUSG00000032691', u'ENSMUSG00000056019', u'ENSMUSG00000006728', u'ENSMUSG00000028270', u'ENSMUSG00000018920', u'ENSMUSG00000041515', u'ENSMUSG00000020638']), symbols=u'B2m Cdk4 Cmpk2 Cxcl16 Gbp2 Irf8 Litaf Nlrp3 Plscr2 Pycard Rela Sbno2 Ticam1 Zfp709', symbol_set=set([u'Sbno2', u'Irf8', u'Plscr2', u'Nlrp3', u'Ticam1', u'Zfp709', u'Gbp2', u'Cmpk2', u'Litaf', u'B2m', u'Cdk4', u'Pycard', u'Rela', u'Cxcl16']), geneids='ENSMUSG00000024927 ENSMUSG00000060802 ENSMUSG00000035673 ENSMUSG00000032372 ENSMUSG00000047123 ENSMUSG00000030793 ENSMUSG00000020638 ENSMUSG00000056019 ENSMUSG00000032691 ENSMUSG00000018920 ENSMUSG00000006728 ENSMUSG00000028270 ENSMUSG00000041515 ENSMUSG00000022500', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='BF', GO_name='cellular response to lipopolysaccharide'),
Nt(p_uncorrected=0.00018831346875026884, study_count=3, study_n=794, namespace='biological_process', p_fdr_bh=0.04322935401568671, study_items=set([u'ENSMUSG00000064210', u'ENSMUSG00000029468', u'ENSMUSG00000029470']), NS='BP', pop_items=set([u'ENSMUSG00000064210', u'ENSMUSG00000029468', u'ENSMUSG00000029470']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0035590', name='purinergic nucleotide receptor signaling pathway', pop_count=3, alt_ids=[], level=5, depth=7, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000064210', u'ENSMUSG00000029468', u'ENSMUSG00000029470']), symbols=u'Ano6 P2rx4 P2rx7', symbol_set=set([u'Ano6', u'P2rx4', u'P2rx7']), geneids='ENSMUSG00000064210 ENSMUSG00000029468 ENSMUSG00000029470', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=1, D1='AB', GO_name='purinergic nucleotide receptor signaling pathway'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0019222', name='', pop_count='', alt_ids='', level=3, depth=3, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=3356, D1='A', GO_name='regulation of metabolic process'),
Nt(p_uncorrected=8.383267349436762e-07, study_count=10, study_n=794, namespace='biological_process', p_fdr_bh=0.0006048327791015066, study_items=set([u'ENSMUSG00000028191', u'ENSMUSG00000026029', u'ENSMUSG00000039304', u'ENSMUSG00000023905', u'ENSMUSG00000021408', u'ENSMUSG00000030793', u'ENSMUSG00000027322', u'ENSMUSG00000027248', u'ENSMUSG00000024778', u'ENSMUSG00000026942']), NS='BP', pop_items=set([u'ENSMUSG00000030339', u'ENSMUSG00000027466', u'ENSMUSG00000023905', u'ENSMUSG00000030793', u'ENSMUSG00000036986', u'ENSMUSG00000026942', u'ENSMUSG00000004446', u'ENSMUSG00000026029', u'ENSMUSG00000053647', u'ENSMUSG00000025507', u'ENSMUSG00000026223', u'ENSMUSG00000026395', u'ENSMUSG00000054499', u'ENSMUSG00000028191', u'ENSMUSG00000021408', u'ENSMUSG00000004637', u'ENSMUSG00000031980', u'ENSMUSG00000010047', u'ENSMUSG00000022781', u'ENSMUSG00000036712', u'ENSMUSG00000031548', u'ENSMUSG00000027322', u'ENSMUSG00000039304', u'ENSMUSG00000027248', u'ENSMUSG00000007655', u'ENSMUSG00000024778']), pop_n=13836, p_sm_bonferroni=0.012701488361131639, is_obsolete=False, GO='GO:2001238', name='positive regulation of extrinsic apoptotic signaling pathway', pop_count=26, alt_ids=[], level=7, depth=9, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000027322', u'ENSMUSG00000028191', u'ENSMUSG00000026029', u'ENSMUSG00000023905', u'ENSMUSG00000021408', u'ENSMUSG00000030793', u'ENSMUSG00000039304', u'ENSMUSG00000027248', u'ENSMUSG00000024778', u'ENSMUSG00000026942']), symbols=u'Bcl10 Casp8 Fas Pdia3 Pycard Ripk1 Siglec1 Tnfrsf12a Tnfsf10 Traf2', symbol_set=set([u'Traf2', u'Tnfsf10', u'Fas', u'Pdia3', u'Casp8', u'Bcl10', u'Ripk1', u'Pycard', u'Tnfrsf12a', u'Siglec1']), geneids='ENSMUSG00000028191 ENSMUSG00000026029 ENSMUSG00000023905 ENSMUSG00000021408 ENSMUSG00000027322 ENSMUSG00000030793 ENSMUSG00000026942 ENSMUSG00000027248 ENSMUSG00000024778 ENSMUSG00000039304', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=3, D1='A', GO_name='positive regulation of extrinsic apoptotic signaling pathway'),
Nt(p_uncorrected=3.19461130395962e-05, study_count=24, study_n=794, namespace='biological_process', p_fdr_bh=0.011805257528363953, study_items=set([u'ENSMUSG00000021451', u'ENSMUSG00000063450', u'ENSMUSG00000024397', u'ENSMUSG00000000555', u'ENSMUSG00000023348', u'ENSMUSG00000052593', u'ENSMUSG00000002603', u'ENSMUSG00000024621', u'ENSMUSG00000004056', u'ENSMUSG00000018920', u'ENSMUSG00000020689', u'ENSMUSG00000027087', u'ENSMUSG00000039621', u'ENSMUSG00000040254', u'ENSMUSG00000031740', u'ENSMUSG00000000957', u'ENSMUSG00000030538', u'ENSMUSG00000042228', u'ENSMUSG00000062960', u'ENSMUSG00000024965', u'ENSMUSG00000068748', u'ENSMUSG00000014599', u'ENSMUSG00000017774', u'ENSMUSG00000037926']), NS='BP', pop_items=set([u'ENSMUSG00000022836', u'ENSMUSG00000063531', u'ENSMUSG00000028228', u'ENSMUSG00000025856', u'ENSMUSG00000027985', u'ENSMUSG00000028431', u'ENSMUSG00000038894', u'ENSMUSG00000030110', u'ENSMUSG00000031778', u'ENSMUSG00000026697', u'ENSMUSG00000061353', u'ENSMUSG00000002489', u'ENSMUSG00000035133', u'ENSMUSG00000032035', u'ENSMUSG00000028583', u'ENSMUSG00000021822', u'ENSMUSG00000057969', u'ENSMUSG00000033717', u'ENSMUSG00000020372', u'ENSMUSG00000040254', u'ENSMUSG00000039239', u'ENSMUSG00000000489', u'ENSMUSG00000039936', u'ENSMUSG00000068154', u'ENSMUSG00000039621', u'ENSMUSG00000042228', u'ENSMUSG00000001506', u'ENSMUSG00000024965', u'ENSMUSG00000052353', u'ENSMUSG00000040152', u'ENSMUSG00000014599', u'ENSMUSG00000066551', u'ENSMUSG00000069135', u'ENSMUSG00000025499', u'ENSMUSG00000053475', u'ENSMUSG00000020074', u'ENSMUSG00000028249', u'ENSMUSG00000063450', u'ENSMUSG00000026193', u'ENSMUSG00000021478', u'ENSMUSG00000005672', u'ENSMUSG00000029231', u'ENSMUSG00000037685', u'ENSMUSG00000052593', u'ENSMUSG00000041417', u'ENSMUSG00000028195', u'ENSMUSG00000022150', u'ENSMUSG00000025207', u'ENSMUSG00000032402', u'ENSMUSG00000051675', u'ENSMUSG00000004936', u'ENSMUSG00000040552', u'ENSMUSG00000027087', u'ENSMUSG00000020053', u'ENSMUSG00000062209', u'ENSMUSG00000024101', u'ENSMUSG00000004056', u'ENSMUSG00000023088', u'ENSMUSG00000026836', u'ENSMUSG00000020859', u'ENSMUSG00000034855', u'ENSMUSG00000028466', u'ENSMUSG00000028019', u'ENSMUSG00000045991', u'ENSMUSG00000030774', u'ENSMUSG00000055980', u'ENSMUSG00000020484', u'ENSMUSG00000007613', u'ENSMUSG00000042524', u'ENSMUSG00000024456', u'ENSMUSG00000029816', u'ENSMUSG00000020122', u'ENSMUSG00000030890', u'ENSMUSG00000038777', u'ENSMUSG00000000627', u'ENSMUSG00000026923', u'ENSMUSG00000028064', u'ENSMUSG00000021451', u'ENSMUSG00000000957', u'ENSMUSG00000020828', u'ENSMUSG00000028780', u'ENSMUSG00000027835', u'ENSMUSG00000021224', u'ENSMUSG00000034684', u'ENSMUSG00000030707', u'ENSMUSG00000030805', u'ENSMUSG00000026121', u'ENSMUSG00000002603', u'ENSMUSG00000053647', u'ENSMUSG00000024620', u'ENSMUSG00000024621', u'ENSMUSG00000028864', u'ENSMUSG00000050357', u'ENSMUSG00000021904', u'ENSMUSG00000049107', u'ENSMUSG00000018920', u'ENSMUSG00000004655', u'ENSMUSG00000020659', u'ENSMUSG00000020689', u'ENSMUSG00000022031', u'ENSMUSG00000032135', u'ENSMUSG00000025809', u'ENSMUSG00000029291', u'ENSMUSG00000027695', u'ENSMUSG00000030530', u'ENSMUSG00000030539', u'ENSMUSG00000034974', u'ENSMUSG00000024397', u'ENSMUSG00000002900', u'ENSMUSG00000022607', u'ENSMUSG00000059456', u'ENSMUSG00000028874', u'ENSMUSG00000002580', u'ENSMUSG00000021338', u'ENSMUSG00000017774', u'ENSMUSG00000037926', u'ENSMUSG00000030538', u'ENSMUSG00000061878', u'ENSMUSG00000055723', u'ENSMUSG00000054836', u'ENSMUSG00000068290', u'ENSMUSG00000054693', u'ENSMUSG00000018565', u'ENSMUSG00000054808', u'ENSMUSG00000026864', u'ENSMUSG00000021360', u'ENSMUSG00000045382', u'ENSMUSG00000005534', u'ENSMUSG00000005871', u'ENSMUSG00000005533', u'ENSMUSG00000023348', u'ENSMUSG00000024486', u'ENSMUSG00000068566', u'ENSMUSG00000029648', u'ENSMUSG00000000127', u'ENSMUSG00000036585', u'ENSMUSG00000036106', u'ENSMUSG00000050965', u'ENSMUSG00000048376', u'ENSMUSG00000085795', u'ENSMUSG00000031841', u'ENSMUSG00000032050', u'ENSMUSG00000032359', u'ENSMUSG00000026064', u'ENSMUSG00000027358', u'ENSMUSG00000027111', u'ENSMUSG00000032006', u'ENSMUSG00000045092', u'ENSMUSG00000038264', u'ENSMUSG00000059146', u'ENSMUSG00000000555', u'ENSMUSG00000046761', u'ENSMUSG00000026479', u'ENSMUSG00000001435', u'ENSMUSG00000031740', u'ENSMUSG00000024789', u'ENSMUSG00000016496', u'ENSMUSG00000025586', u'ENSMUSG00000042682', u'ENSMUSG00000062960', u'ENSMUSG00000023951', u'ENSMUSG00000068748', u'ENSMUSG00000001227', u'ENSMUSG00000031955', u'ENSMUSG00000021835', u'ENSMUSG00000024300', u'ENSMUSG00000021936', u'ENSMUSG00000052133']), pop_n=13836, p_sm_bonferroni=0.484015558662922, is_obsolete=False, GO='GO:0030335', name='positive regulation of cell migration', pop_count=168, alt_ids=[], level=6, depth=7, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000040254', u'ENSMUSG00000021451', u'ENSMUSG00000063450', u'ENSMUSG00000039621', u'ENSMUSG00000024397', u'ENSMUSG00000031740', u'ENSMUSG00000000957', u'ENSMUSG00000030538', u'ENSMUSG00000042228', u'ENSMUSG00000062960', u'ENSMUSG00000024965', u'ENSMUSG00000020689', u'ENSMUSG00000000555', u'ENSMUSG00000023348', u'ENSMUSG00000052593', u'ENSMUSG00000014599', u'ENSMUSG00000002603', u'ENSMUSG00000024621', u'ENSMUSG00000004056', u'ENSMUSG00000018920', u'ENSMUSG00000068748', u'ENSMUSG00000017774', u'ENSMUSG00000037926', u'ENSMUSG00000027087']), symbols=u'Adam17 Aif1 Akt2 Cib1 Csf1 Csf1r Cxcl16 Fermt3 Itga5 Itgav Itgb3 Kdr Lyn Mmp14 Mmp2 Myo1c Prex1 Ptprz1 Sema3d Sema4d Ssh2 Syne2 Tgfb1 Trip6', symbol_set=set([u'Ptprz1', u'Ssh2', u'Trip6', u'Tgfb1', u'Itgb3', u'Csf1', u'Itgav', u'Adam17', u'Syne2', u'Mmp2', u'Aif1', u'Mmp14', u'Prex1', u'Csf1r', u'Fermt3', u'Akt2', u'Sema4d', u'Sema3d', u'Myo1c', u'Itga5', u'Cib1', u'Lyn', u'Kdr', u'Cxcl16']), geneids='ENSMUSG00000021451 ENSMUSG00000063450 ENSMUSG00000024397 ENSMUSG00000000555 ENSMUSG00000023348 ENSMUSG00000052593 ENSMUSG00000002603 ENSMUSG00000024621 ENSMUSG00000004056 ENSMUSG00000018920 ENSMUSG00000068748 ENSMUSG00000027087 ENSMUSG00000039621 ENSMUSG00000040254 ENSMUSG00000031740 ENSMUSG00000000957 ENSMUSG00000030538 ENSMUSG00000042228 ENSMUSG00000062960 ENSMUSG00000024965 ENSMUSG00000020689 ENSMUSG00000014599 ENSMUSG00000017774 ENSMUSG00000037926', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=67, D1='A', GO_name='positive regulation of cell migration'),
Nt(p_uncorrected=2.6840419359548623e-11, study_count=15, study_n=794, namespace='biological_process', p_fdr_bh=3.893361146121815e-07, study_items=set([u'ENSMUSG00000024349', u'ENSMUSG00000058163', u'ENSMUSG00000043263', u'ENSMUSG00000069874', u'ENSMUSG00000090942', u'ENSMUSG00000040264', u'ENSMUSG00000073489', u'ENSMUSG00000078922', u'ENSMUSG00000020464', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000078920', u'ENSMUSG00000073555', u'ENSMUSG00000037860', u'ENSMUSG00000069893']), NS='BP', pop_items=set([u'ENSMUSG00000040264', u'ENSMUSG00000020307', u'ENSMUSG00000058163', u'ENSMUSG00000009293', u'ENSMUSG00000078922', u'ENSMUSG00000078920', u'ENSMUSG00000078921', u'ENSMUSG00000036199', u'ENSMUSG00000028268', u'ENSMUSG00000029203', u'ENSMUSG00000073555', u'ENSMUSG00000021494', u'ENSMUSG00000043263', u'ENSMUSG00000039997', u'ENSMUSG00000068329', u'ENSMUSG00000054072', u'ENSMUSG00000026104', u'ENSMUSG00000037860', u'ENSMUSG00000068606', u'ENSMUSG00000018899', u'ENSMUSG00000069874', u'ENSMUSG00000046879', u'ENSMUSG00000073489', u'ENSMUSG00000078853', u'ENSMUSG00000020464', u'ENSMUSG00000028270', u'ENSMUSG00000069893', u'ENSMUSG00000024349', u'ENSMUSG00000034459', u'ENSMUSG00000090942', u'ENSMUSG00000074896']), pop_n=13836, p_sm_bonferroni=4.066591937165212e-07, is_obsolete=False, GO='GO:0035458', name='cellular response to interferon-beta', pop_count=31, alt_ids=[], level=6, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000024349', u'ENSMUSG00000058163', u'ENSMUSG00000043263', u'ENSMUSG00000069874', u'ENSMUSG00000090942', u'ENSMUSG00000040264', u'ENSMUSG00000073489', u'ENSMUSG00000078922', u'ENSMUSG00000020464', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000078920', u'ENSMUSG00000069893', u'ENSMUSG00000037860', u'ENSMUSG00000073555']), symbols=u'9930111J21Rik1 Aim2 F830016B08Rik Gbp2 Gbp2b Gbp3 Gm4951 Gm5431 Ifi204 Ifi209 Ifi47 Irgm2 Pnpt1 Tgtp1 Tmem173', symbol_set=set(['Gbp2b', u'Gm4951', u'9930111J21Rik1', u'F830016B08Rik', 'Ifi209', u'Gbp3', u'Tgtp1', u'Ifi204', u'Pnpt1', u'Aim2', u'Gbp2', u'Ifi47', u'Gm5431', u'Irgm2', u'Tmem173']), geneids='ENSMUSG00000024349 ENSMUSG00000058163 ENSMUSG00000043263 ENSMUSG00000069874 ENSMUSG00000078920 ENSMUSG00000090942 ENSMUSG00000040264 ENSMUSG00000073489 ENSMUSG00000078922 ENSMUSG00000020464 ENSMUSG00000028270 ENSMUSG00000028268 ENSMUSG00000037860 ENSMUSG00000073555 ENSMUSG00000069893', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='BF', GO_name='cellular response to interferon-beta'),
Nt(p_uncorrected=5.138075067296199e-05, study_count=4, study_n=794, namespace='biological_process', p_fdr_bh=0.015569395068920942, study_items=set([u'ENSMUSG00000058715', u'ENSMUSG00000031838', u'ENSMUSG00000015947', u'ENSMUSG00000061232']), NS='BP', pop_items=set([u'ENSMUSG00000015947', u'ENSMUSG00000059498', u'ENSMUSG00000031838', u'ENSMUSG00000058715', u'ENSMUSG00000061232']), pop_n=13836, p_sm_bonferroni=0.7784697534460471, is_obsolete=False, GO='GO:0042590', name='antigen processing and presentation of exogenous peptide antigen via MHC class I', pop_count=5, alt_ids=[], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000058715', u'ENSMUSG00000031838', u'ENSMUSG00000015947', u'ENSMUSG00000061232']), symbols=u'Fcer1g Fcgr1 H2-K1 Ifi30', symbol_set=set([u'Fcer1g', u'Fcgr1', u'Ifi30', 'H2-K1']), geneids='ENSMUSG00000015947 ENSMUSG00000031838 ENSMUSG00000058715 ENSMUSG00000061232', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=2, D1='L', GO_name='antigen processing and presentation of exogenous peptide antigen via MHC class I'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0007165', name='', pop_count='', alt_ids='', level=2, depth=4, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=724, D1='AB', GO_name='signal transduction'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0023051', name='', pop_count='', alt_ids='', level=3, depth=3, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=1395, D1='A', GO_name='regulation of signaling'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0008219', name='', pop_count='', alt_ids='', level=3, depth=3, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=107, D1='BC', GO_name='cell death'),
Nt(p_uncorrected=0.00014711294614682194, study_count=4, study_n=794, namespace='biological_process', p_fdr_bh=0.037148470784508315, study_items=set([u'ENSMUSG00000031101', u'ENSMUSG00000045005', u'ENSMUSG00000026942', u'ENSMUSG00000060802']), NS='BP', pop_items=set([u'ENSMUSG00000032688', u'ENSMUSG00000060802', u'ENSMUSG00000031101', u'ENSMUSG00000027164', u'ENSMUSG00000026942', u'ENSMUSG00000045005']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0002726', name='positive regulation of T cell cytokine production', pop_count=6, alt_ids=[], level=7, depth=9, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000031101', u'ENSMUSG00000045005', u'ENSMUSG00000026942', u'ENSMUSG00000060802']), symbols=u'B2m Fzd5 Sash3 Traf2', symbol_set=set([u'Traf2', u'B2m', u'Sash3', u'Fzd5']), geneids='ENSMUSG00000031101 ENSMUSG00000045005 ENSMUSG00000026942 ENSMUSG00000060802', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=2, D1='A', GO_name='positive regulation of T cell cytokine production'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0045595', name='', pop_count='', alt_ids='', level=4, depth=4, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=864, D1='A', GO_name='regulation of cell differentiation'),
Nt(p_uncorrected=1.1301346989526834e-07, study_count=8, study_n=794, namespace='biological_process', p_fdr_bh=0.00010701669264895065, study_items=set([u'ENSMUSG00000060550', u'ENSMUSG00000073411', u'ENSMUSG00000060802', u'ENSMUSG00000067212', u'ENSMUSG00000035929', u'ENSMUSG00000073409', u'ENSMUSG00000053835', u'ENSMUSG00000061232']), NS='BP', pop_items=set([u'ENSMUSG00000060550', u'ENSMUSG00000073411', u'ENSMUSG00000053835', u'ENSMUSG00000060802', u'ENSMUSG00000024392', u'ENSMUSG00000035929', u'ENSMUSG00000056116', u'ENSMUSG00000079507', u'ENSMUSG00000073409', u'ENSMUSG00000016206', u'ENSMUSG00000067212', u'ENSMUSG00000061232', u'ENSMUSG00000026471']), pop_n=13836, p_sm_bonferroni=0.0017122670823832107, is_obsolete=False, GO='GO:0002474', name='antigen processing and presentation of peptide antigen via MHC class I', pop_count=13, alt_ids=[], level=4, depth=4, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000073411', u'ENSMUSG00000060802', u'ENSMUSG00000067212', u'ENSMUSG00000035929', u'ENSMUSG00000061232', u'ENSMUSG00000073409', u'ENSMUSG00000053835', u'ENSMUSG00000060550']), symbols=u'B2m H2-D1 H2-K1 H2-Q4 H2-Q6 H2-Q7 H2-T23 H2-T24', symbol_set=set(['H2-K1', 'H2-D1', 'H2-Q4', 'H2-T23', 'H2-Q6', 'H2-T24', u'B2m', 'H2-Q7']), geneids='ENSMUSG00000073411 ENSMUSG00000060802 ENSMUSG00000053835 ENSMUSG00000035929 ENSMUSG00000061232 ENSMUSG00000067212 ENSMUSG00000060550 ENSMUSG00000073409', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=8, D1='L', GO_name='antigen processing and presentation of peptide antigen via MHC class I'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0010941', name='', pop_count='', alt_ids='', level=4, depth=4, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=335, D1='A', GO_name='regulation of cell death'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0033993', name='', pop_count='', alt_ids='', level=4, depth=4, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=125, D1='F', GO_name='response to lipid'),
Nt(p_uncorrected=1.263279135958711e-06, study_count=10, study_n=794, namespace='biological_process', p_fdr_bh=0.0008321713995178447, study_items=set([u'ENSMUSG00000024927', u'ENSMUSG00000025888', u'ENSMUSG00000029468', u'ENSMUSG00000027995', u'ENSMUSG00000030793', u'ENSMUSG00000015947', u'ENSMUSG00000019810', u'ENSMUSG00000026433', u'ENSMUSG00000015950', u'ENSMUSG00000041515']), NS='BP', pop_items=set([u'ENSMUSG00000055447', u'ENSMUSG00000070034', u'ENSMUSG00000026656', u'ENSMUSG00000027684', u'ENSMUSG00000030793', u'ENSMUSG00000025372', u'ENSMUSG00000029468', u'ENSMUSG00000024927', u'ENSMUSG00000019804', u'ENSMUSG00000003184', u'ENSMUSG00000015947', u'ENSMUSG00000026433', u'ENSMUSG00000026104', u'ENSMUSG00000039005', u'ENSMUSG00000029684', u'ENSMUSG00000020525', u'ENSMUSG00000015478', u'ENSMUSG00000025888', u'ENSMUSG00000051439', u'ENSMUSG00000027995', u'ENSMUSG00000045827', u'ENSMUSG00000026177', u'ENSMUSG00000019810', u'ENSMUSG00000015950', u'ENSMUSG00000041515', u'ENSMUSG00000007655', u'ENSMUSG00000032041']), pop_n=13836, p_sm_bonferroni=0.01913994218891043, is_obsolete=False, GO='GO:0009617', name='response to bacterium', pop_count=27, alt_ids=['GO:0009618', 'GO:0009680'], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000024927', u'ENSMUSG00000025888', u'ENSMUSG00000029468', u'ENSMUSG00000027995', u'ENSMUSG00000030793', u'ENSMUSG00000015947', u'ENSMUSG00000019810', u'ENSMUSG00000026433', u'ENSMUSG00000015950', u'ENSMUSG00000041515']), symbols=u'Casp1 Fcgr1 Fuca2 Irf8 Ncf1 P2rx7 Pycard Rab29 Rela Tlr2', symbol_set=set([u'Ncf1', u'Casp1', 'Rab29', u'Irf8', u'Tlr2', u'P2rx7', u'Pycard', u'Rela', u'Fuca2', u'Fcgr1']), geneids='ENSMUSG00000024927 ENSMUSG00000025888 ENSMUSG00000029468 ENSMUSG00000027995 ENSMUSG00000030793 ENSMUSG00000015947 ENSMUSG00000019810 ENSMUSG00000026433 ENSMUSG00000015950 ENSMUSG00000041515', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=12, D1='F', GO_name='response to bacterium'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0001816', name='', pop_count='', alt_ids='', level=3, depth=3, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=110, D1='CJ', GO_name='cytokine production'),
Nt(p_uncorrected=8.063864985346781e-05, study_count=10, study_n=794, namespace='biological_process', p_fdr_bh=0.022625114517220202, study_items=set([u'ENSMUSG00000025492', u'ENSMUSG00000045322', u'ENSMUSG00000053175', u'ENSMUSG00000029826', u'ENSMUSG00000040725', u'ENSMUSG00000032690', u'ENSMUSG00000031639', u'ENSMUSG00000052776', u'ENSMUSG00000037921', u'ENSMUSG00000020075']), NS='BP', pop_items=set([u'ENSMUSG00000032661', u'ENSMUSG00000023341', u'ENSMUSG00000045322', u'ENSMUSG00000007659', u'ENSMUSG00000027951', u'ENSMUSG00000070583', u'ENSMUSG00000046718', u'ENSMUSG00000026469', u'ENSMUSG00000031639', u'ENSMUSG00000041827', u'ENSMUSG00000010051', u'ENSMUSG00000032508', u'ENSMUSG00000053175', u'ENSMUSG00000079017', u'ENSMUSG00000029826', u'ENSMUSG00000040725', u'ENSMUSG00000033777', u'ENSMUSG00000052776', u'ENSMUSG00000026896', u'ENSMUSG00000039236', u'ENSMUSG00000017830', u'ENSMUSG00000024079', u'ENSMUSG00000060591', u'ENSMUSG00000029561', u'ENSMUSG00000029605', u'ENSMUSG00000008683', u'ENSMUSG00000010047', u'ENSMUSG00000025492', u'ENSMUSG00000030421', u'ENSMUSG00000034459', u'ENSMUSG00000020075', u'ENSMUSG00000000787', u'ENSMUSG00000027770', u'ENSMUSG00000040296', u'ENSMUSG00000032690', u'ENSMUSG00000020641', u'ENSMUSG00000051451', u'ENSMUSG00000045932', u'ENSMUSG00000037921', u'ENSMUSG00000037149', u'ENSMUSG00000000386']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0009615', name='response to virus', pop_count=41, alt_ids=[], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000025492', u'ENSMUSG00000045322', u'ENSMUSG00000053175', u'ENSMUSG00000029826', u'ENSMUSG00000040725', u'ENSMUSG00000032690', u'ENSMUSG00000031639', u'ENSMUSG00000052776', u'ENSMUSG00000037921', u'ENSMUSG00000020075']), symbols=u'Bcl3 Ddx21 Ddx60 Hnrnpul1 Ifitm3 Oas1a Oas2 Tlr3 Tlr9 Zc3hav1', symbol_set=set([u'Ifitm3', u'Ddx21', u'Bcl3', u'Tlr3', u'Oas2', u'Zc3hav1', u'Ddx60', u'Oas1a', u'Tlr9', u'Hnrnpul1']), geneids='ENSMUSG00000025492 ENSMUSG00000045322 ENSMUSG00000053175 ENSMUSG00000029826 ENSMUSG00000032690 ENSMUSG00000040725 ENSMUSG00000031639 ENSMUSG00000052776 ENSMUSG00000037921 ENSMUSG00000020075', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=13, D1='F', GO_name='response to virus'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0002682', name='', pop_count='', alt_ids='', level=3, depth=3, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=1201, D1='A', GO_name='regulation of immune system process'),
Nt(p_uncorrected=0.00011413256783345521, study_count=7, study_n=794, namespace='biological_process', p_fdr_bh=0.030878973843654997, study_items=set([u'ENSMUSG00000027852', u'ENSMUSG00000053175', u'ENSMUSG00000031101', u'ENSMUSG00000030793', u'ENSMUSG00000022952', u'ENSMUSG00000041515', u'ENSMUSG00000045005']), NS='BP', pop_items=set([u'ENSMUSG00000025980', u'ENSMUSG00000027852', u'ENSMUSG00000022967', u'ENSMUSG00000053175', u'ENSMUSG00000039217', u'ENSMUSG00000025499', u'ENSMUSG00000031101', u'ENSMUSG00000028525', u'ENSMUSG00000030793', u'ENSMUSG00000020399', u'ENSMUSG00000050357', u'ENSMUSG00000026177', u'ENSMUSG00000041135', u'ENSMUSG00000022952', u'ENSMUSG00000016206', u'ENSMUSG00000021994', u'ENSMUSG00000041515', u'ENSMUSG00000045005', u'ENSMUSG00000051439', u'ENSMUSG00000042817', u'ENSMUSG00000039005']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0032729', name='positive regulation of interferon-gamma production', pop_count=21, alt_ids=[], level=6, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000027852', u'ENSMUSG00000022952', u'ENSMUSG00000053175', u'ENSMUSG00000031101', u'ENSMUSG00000045005', u'ENSMUSG00000041515', u'ENSMUSG00000030793']), symbols=u'Bcl3 Fzd5 Irf8 Nras Pycard Runx1 Sash3', symbol_set=set([u'Fzd5', u'Irf8', u'Nras', u'Runx1', u'Pycard', u'Sash3', u'Bcl3']), geneids='ENSMUSG00000027852 ENSMUSG00000053175 ENSMUSG00000031101 ENSMUSG00000030793 ENSMUSG00000022952 ENSMUSG00000041515 ENSMUSG00000045005', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=2, D1='A', GO_name='positive regulation of interferon-gamma production'),
Nt(p_uncorrected=0.00018720354472778378, study_count=9, study_n=794, namespace='biological_process', p_fdr_bh=0.04322935401568671, study_items=set([u'ENSMUSG00000028191', u'ENSMUSG00000022346', u'ENSMUSG00000030793', u'ENSMUSG00000032691', u'ENSMUSG00000039304', u'ENSMUSG00000038058', u'ENSMUSG00000029249', u'ENSMUSG00000007815', u'ENSMUSG00000024778']), NS='BP', pop_items=set([u'ENSMUSG00000035199', u'ENSMUSG00000020366', u'ENSMUSG00000002015', u'ENSMUSG00000021576', u'ENSMUSG00000021559', u'ENSMUSG00000030793', u'ENSMUSG00000027381', u'ENSMUSG00000036199', u'ENSMUSG00000019997', u'ENSMUSG00000029249', u'ENSMUSG00000027282', u'ENSMUSG00000024959', u'ENSMUSG00000020349', u'ENSMUSG00000022556', u'ENSMUSG00000022346', u'ENSMUSG00000053647', u'ENSMUSG00000068329', u'ENSMUSG00000071369', u'ENSMUSG00000048376', u'ENSMUSG00000020063', u'ENSMUSG00000018909', u'ENSMUSG00000034485', u'ENSMUSG00000028191', u'ENSMUSG00000028195', u'ENSMUSG00000026834', u'ENSMUSG00000019054', u'ENSMUSG00000024947', u'ENSMUSG00000030417', u'ENSMUSG00000026181', u'ENSMUSG00000037787', u'ENSMUSG00000000787', u'ENSMUSG00000066643', u'ENSMUSG00000032691', u'ENSMUSG00000039304', u'ENSMUSG00000038058', u'ENSMUSG00000007815', u'ENSMUSG00000024778']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0043280', name='positive regulation of cysteine-type endopeptidase activity involved in apoptotic process', pop_count=37, alt_ids=[], level=8, depth=12, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000028191', u'ENSMUSG00000022346', u'ENSMUSG00000030793', u'ENSMUSG00000032691', u'ENSMUSG00000039304', u'ENSMUSG00000038058', u'ENSMUSG00000029249', u'ENSMUSG00000007815', u'ENSMUSG00000024778']), symbols=u'Bcl10 Fas Myc Nlrp3 Nod1 Pycard Rest Rhoa Tnfsf10', symbol_set=set([u'Tnfsf10', u'Nlrp3', u'Fas', u'Rest', u'Rhoa', u'Nod1', u'Pycard', u'Myc', u'Bcl10']), geneids='ENSMUSG00000028191 ENSMUSG00000022346 ENSMUSG00000030793 ENSMUSG00000032691 ENSMUSG00000039304 ENSMUSG00000038058 ENSMUSG00000029249 ENSMUSG00000007815 ENSMUSG00000024778', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=7, D1='A', GO_name='positive regulation of cysteine-type endopeptidase activity involved in apoptotic process'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0022610', name='', pop_count='', alt_ids='', level=1, depth=1, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=127, D1='P', GO_name='biological adhesion'),
Nt(p_uncorrected=9.359622790877207e-05, study_count=17, study_n=794, namespace='biological_process', p_fdr_bh=0.025783208164469194, study_items=set([u'ENSMUSG00000024927', u'ENSMUSG00000022178', u'ENSMUSG00000029826', u'ENSMUSG00000026029', u'ENSMUSG00000045322', u'ENSMUSG00000021408', u'ENSMUSG00000000275', u'ENSMUSG00000047123', u'ENSMUSG00000026749', u'ENSMUSG00000025888', u'ENSMUSG00000002233', u'ENSMUSG00000026942', u'ENSMUSG00000038058', u'ENSMUSG00000031639', u'ENSMUSG00000020941', u'ENSMUSG00000022500', u'ENSMUSG00000047098']), NS='BP', pop_items=set([u'ENSMUSG00000067377', u'ENSMUSG00000027466', u'ENSMUSG00000045322', u'ENSMUSG00000059327', u'ENSMUSG00000028108', u'ENSMUSG00000074781', u'ENSMUSG00000022178', u'ENSMUSG00000066406', u'ENSMUSG00000068220', u'ENSMUSG00000000266', u'ENSMUSG00000006932', u'ENSMUSG00000015120', u'ENSMUSG00000037523', u'ENSMUSG00000019843', u'ENSMUSG00000035235', u'ENSMUSG00000026842', u'ENSMUSG00000021408', u'ENSMUSG00000045038', u'ENSMUSG00000031887', u'ENSMUSG00000027397', u'ENSMUSG00000027399', u'ENSMUSG00000025199', u'ENSMUSG00000057367', u'ENSMUSG00000000275', u'ENSMUSG00000037089', u'ENSMUSG00000022255', u'ENSMUSG00000020941', u'ENSMUSG00000038058', u'ENSMUSG00000055762', u'ENSMUSG00000026942', u'ENSMUSG00000025034', u'ENSMUSG00000023826', u'ENSMUSG00000022552', u'ENSMUSG00000026029', u'ENSMUSG00000029826', u'ENSMUSG00000003184', u'ENSMUSG00000047123', u'ENSMUSG00000020134', u'ENSMUSG00000051675', u'ENSMUSG00000031392', u'ENSMUSG00000004394', u'ENSMUSG00000041000', u'ENSMUSG00000059883', u'ENSMUSG00000031155', u'ENSMUSG00000032966', u'ENSMUSG00000002688', u'ENSMUSG00000055204', u'ENSMUSG00000034457', u'ENSMUSG00000041241', u'ENSMUSG00000032570', u'ENSMUSG00000031021', u'ENSMUSG00000053253', u'ENSMUSG00000026596', u'ENSMUSG00000078923', u'ENSMUSG00000031143', u'ENSMUSG00000039713', u'ENSMUSG00000042312', u'ENSMUSG00000031639', u'ENSMUSG00000047098', u'ENSMUSG00000004221', u'ENSMUSG00000032508', u'ENSMUSG00000024425', u'ENSMUSG00000060548', u'ENSMUSG00000041135', u'ENSMUSG00000027699', u'ENSMUSG00000030245', u'ENSMUSG00000028701', u'ENSMUSG00000046668', u'ENSMUSG00000021096', u'ENSMUSG00000057193', u'ENSMUSG00000025888', u'ENSMUSG00000059866', u'ENSMUSG00000025647', u'ENSMUSG00000005413', u'ENSMUSG00000033208', u'ENSMUSG00000026749', u'ENSMUSG00000050953', u'ENSMUSG00000030339', u'ENSMUSG00000037820', u'ENSMUSG00000036299', u'ENSMUSG00000008734', u'ENSMUSG00000022500', u'ENSMUSG00000028284', u'ENSMUSG00000024927', u'ENSMUSG00000028179', u'ENSMUSG00000032688', u'ENSMUSG00000024091', u'ENSMUSG00000022757', u'ENSMUSG00000035798', u'ENSMUSG00000002233', u'ENSMUSG00000039005', u'ENSMUSG00000021701', u'ENSMUSG00000033430', u'ENSMUSG00000036686', u'ENSMUSG00000029060', u'ENSMUSG00000028756', u'ENSMUSG00000028522', u'ENSMUSG00000003099', u'ENSMUSG00000031328', u'ENSMUSG00000027164', u'ENSMUSG00000020921', u'ENSMUSG00000025575', u'ENSMUSG00000052889', u'ENSMUSG00000032041', u'ENSMUSG00000030471']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0043123', name='positive regulation of I-kappaB kinase/NF-kappaB signaling', pop_count=105, alt_ids=[], level=7, depth=8, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000045322', u'ENSMUSG00000021408', u'ENSMUSG00000026942', u'ENSMUSG00000031639', u'ENSMUSG00000022500', u'ENSMUSG00000047098', u'ENSMUSG00000024927', u'ENSMUSG00000022178', u'ENSMUSG00000025888', u'ENSMUSG00000026029', u'ENSMUSG00000029826', u'ENSMUSG00000000275', u'ENSMUSG00000047123', u'ENSMUSG00000026749', u'ENSMUSG00000002233', u'ENSMUSG00000038058', u'ENSMUSG00000020941']), symbols=u'Ajuba Casp1 Casp8 Litaf Map3k14 Nek6 Nod1 Rela Rhoc Ripk1 Rnf31 Ticam1 Tlr3 Tlr9 Traf2 Trim25 Zc3hav1', symbol_set=set([u'Nek6', u'Ticam1', u'Traf2', u'Casp1', u'Trim25', u'Litaf', u'Casp8', u'Ripk1', u'Tlr3', u'Nod1', u'Zc3hav1', u'Rhoc', u'Map3k14', u'Rela', u'Tlr9', 'Ajuba', u'Rnf31']), geneids='ENSMUSG00000024927 ENSMUSG00000022178 ENSMUSG00000025888 ENSMUSG00000026029 ENSMUSG00000045322 ENSMUSG00000021408 ENSMUSG00000000275 ENSMUSG00000047123 ENSMUSG00000026749 ENSMUSG00000002233 ENSMUSG00000026942 ENSMUSG00000029826 ENSMUSG00000031639 ENSMUSG00000020941 ENSMUSG00000022500 ENSMUSG00000047098 ENSMUSG00000038058', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=4, D1='A', GO_name='positive regulation of I-kappaB kinase/NF-kappaB signaling'),
Nt(p_uncorrected=4.979940413768524e-05, study_count=37, study_n=794, namespace='biological_process', p_fdr_bh=0.015569395068920942, study_items=set([u'ENSMUSG00000034165', u'ENSMUSG00000022831', u'ENSMUSG00000028333', u'ENSMUSG00000004508', u'ENSMUSG00000024927', u'ENSMUSG00000052593', u'ENSMUSG00000028639', u'ENSMUSG00000022346', u'ENSMUSG00000004266', u'ENSMUSG00000030760', u'ENSMUSG00000024621', u'ENSMUSG00000045730', u'ENSMUSG00000021109', u'ENSMUSG00000027506', u'ENSMUSG00000000751', u'ENSMUSG00000025403', u'ENSMUSG00000027087', u'ENSMUSG00000022146', u'ENSMUSG00000056220', u'ENSMUSG00000027947', u'ENSMUSG00000032492', u'ENSMUSG00000006356', u'ENSMUSG00000003283', u'ENSMUSG00000030538', u'ENSMUSG00000027803', u'ENSMUSG00000042228', u'ENSMUSG00000062960', u'ENSMUSG00000029175', u'ENSMUSG00000029838', u'ENSMUSG00000033220', u'ENSMUSG00000004040', u'ENSMUSG00000005373', u'ENSMUSG00000014599', u'ENSMUSG00000026628', u'ENSMUSG00000006728', u'ENSMUSG00000002603', u'ENSMUSG00000037846']), NS='BP', pop_items=set([u'ENSMUSG00000028224', u'ENSMUSG00000014773', u'ENSMUSG00000068154', u'ENSMUSG00000022831', u'ENSMUSG00000030093', u'ENSMUSG00000062604', u'ENSMUSG00000025856', u'ENSMUSG00000001288', u'ENSMUSG00000015994', u'ENSMUSG00000001910', u'ENSMUSG00000019997', u'ENSMUSG00000021342', u'ENSMUSG00000036585', u'ENSMUSG00000018500', u'ENSMUSG00000018501', u'ENSMUSG00000051790', u'ENSMUSG00000002250', u'ENSMUSG00000030849', u'ENSMUSG00000001761', u'ENSMUSG00000018160', u'ENSMUSG00000002489', u'ENSMUSG00000004043', u'ENSMUSG00000030846', u'ENSMUSG00000048376', u'ENSMUSG00000006932', u'ENSMUSG00000032035', u'ENSMUSG00000053110', u'ENSMUSG00000052534', u'ENSMUSG00000070348', u'ENSMUSG00000021732', u'ENSMUSG00000019966', u'ENSMUSG00000021109', u'ENSMUSG00000022770', u'ENSMUSG00000019777', u'ENSMUSG00000016319', u'ENSMUSG00000004266', u'ENSMUSG00000028333', u'ENSMUSG00000027947', u'ENSMUSG00000010797', u'ENSMUSG00000020422', u'ENSMUSG00000032492', u'ENSMUSG00000059146', u'ENSMUSG00000039239', u'ENSMUSG00000000489', u'ENSMUSG00000033417', u'ENSMUSG00000003814', u'ENSMUSG00000025499', u'ENSMUSG00000039662', u'ENSMUSG00000029337', u'ENSMUSG00000031537', u'ENSMUSG00000032397', u'ENSMUSG00000030760', u'ENSMUSG00000031575', u'ENSMUSG00000000561', u'ENSMUSG00000030057', u'ENSMUSG00000007888', u'ENSMUSG00000003228', u'ENSMUSG00000027651', u'ENSMUSG00000000567', u'ENSMUSG00000034724', u'ENSMUSG00000021822', u'ENSMUSG00000056947', u'ENSMUSG00000006398', u'ENSMUSG00000032253', u'ENSMUSG00000024477', u'ENSMUSG00000014599', u'ENSMUSG00000026628', u'ENSMUSG00000004508', u'ENSMUSG00000027087', u'ENSMUSG00000029283', u'ENSMUSG00000059456', u'ENSMUSG00000033940', u'ENSMUSG00000016477', u'ENSMUSG00000059363', u'ENSMUSG00000020074', u'ENSMUSG00000031870', u'ENSMUSG00000061436', u'ENSMUSG00000026672', u'ENSMUSG00000055447', u'ENSMUSG00000013663', u'ENSMUSG00000020362', u'ENSMUSG00000032487', u'ENSMUSG00000013584', u'ENSMUSG00000015709', u'ENSMUSG00000006517', u'ENSMUSG00000028019', u'ENSMUSG00000033208', u'ENSMUSG00000027835', u'ENSMUSG00000018442', u'ENSMUSG00000038894', u'ENSMUSG00000028444', u'ENSMUSG00000005672', u'ENSMUSG00000024913', u'ENSMUSG00000045515', u'ENSMUSG00000052684', u'ENSMUSG00000030283', u'ENSMUSG00000032565', u'ENSMUSG00000045983', u'ENSMUSG00000004473', u'ENSMUSG00000028639', u'ENSMUSG00000037010', u'ENSMUSG00000047146', u'ENSMUSG00000022528', u'ENSMUSG00000029838', u'ENSMUSG00000045730', u'ENSMUSG00000021779', u'ENSMUSG00000019803', u'ENSMUSG00000041498', u'ENSMUSG00000040359', u'ENSMUSG00000054263', u'ENSMUSG00000022010', u'ENSMUSG00000028800', u'ENSMUSG00000027523', u'ENSMUSG00000029053', u'ENSMUSG00000061887', u'ENSMUSG00000027524', u'ENSMUSG00000011179', u'ENSMUSG00000020053', u'ENSMUSG00000062209', u'ENSMUSG00000069135', u'ENSMUSG00000026313', u'ENSMUSG00000030265', u'ENSMUSG00000021175', u'ENSMUSG00000037706', u'ENSMUSG00000020357', u'ENSMUSG00000025358', u'ENSMUSG00000023912', u'ENSMUSG00000003283', u'ENSMUSG00000004791', u'ENSMUSG00000020902', u'ENSMUSG00000034855', u'ENSMUSG00000055254', u'ENSMUSG00000033373', u'ENSMUSG00000027803', u'ENSMUSG00000028397', u'ENSMUSG00000078812', u'ENSMUSG00000067847', u'ENSMUSG00000022540', u'ENSMUSG00000039191', u'ENSMUSG00000030774', u'ENSMUSG00000024997', u'ENSMUSG00000041710', u'ENSMUSG00000001300', u'ENSMUSG00000054693', u'ENSMUSG00000020467', u'ENSMUSG00000061353', u'ENSMUSG00000005373', u'ENSMUSG00000041488', u'ENSMUSG00000030898', u'ENSMUSG00000056481', u'ENSMUSG00000022146', u'ENSMUSG00000020644', u'ENSMUSG00000021974', u'ENSMUSG00000048154', u'ENSMUSG00000020122', u'ENSMUSG00000031393', u'ENSMUSG00000035109', u'ENSMUSG00000031380', u'ENSMUSG00000026923', u'ENSMUSG00000036923', u'ENSMUSG00000021670', u'ENSMUSG00000021756', u'ENSMUSG00000023951', u'ENSMUSG00000024620', u'ENSMUSG00000047945', u'ENSMUSG00000033762', u'ENSMUSG00000028261', u'ENSMUSG00000003031', u'ENSMUSG00000050697', u'ENSMUSG00000042312', u'ENSMUSG00000032185', u'ENSMUSG00000027878', u'ENSMUSG00000017548', u'ENSMUSG00000031723', u'ENSMUSG00000030397', u'ENSMUSG00000050192', u'ENSMUSG00000033249', u'ENSMUSG00000006134', u'ENSMUSG00000010175', u'ENSMUSG00000020919', u'ENSMUSG00000030805', u'ENSMUSG00000024927', u'ENSMUSG00000057342', u'ENSMUSG00000002603', u'ENSMUSG00000087679', u'ENSMUSG00000048001', u'ENSMUSG00000022382', u'ENSMUSG00000024621', u'ENSMUSG00000028864', u'ENSMUSG00000062960', u'ENSMUSG00000026193', u'ENSMUSG00000037992', u'ENSMUSG00000049107', u'ENSMUSG00000034997', u'ENSMUSG00000031314', u'ENSMUSG00000005469', u'ENSMUSG00000022037', u'ENSMUSG00000027506', u'ENSMUSG00000008683', u'ENSMUSG00000038984', u'ENSMUSG00000040152', u'ENSMUSG00000001131', u'ENSMUSG00000024232', u'ENSMUSG00000056220', u'ENSMUSG00000073616', u'ENSMUSG00000059588', u'ENSMUSG00000020235', u'ENSMUSG00000057113', u'ENSMUSG00000034165', u'ENSMUSG00000053647', u'ENSMUSG00000025809', u'ENSMUSG00000028821', u'ENSMUSG00000001435', u'ENSMUSG00000006356', u'ENSMUSG00000033751', u'ENSMUSG00000042228', u'ENSMUSG00000048402', u'ENSMUSG00000031601', u'ENSMUSG00000020063', u'ENSMUSG00000030538', u'ENSMUSG00000044562', u'ENSMUSG00000037846', u'ENSMUSG00000040274', u'ENSMUSG00000061589', u'ENSMUSG00000022607', u'ENSMUSG00000034563', u'ENSMUSG00000057329', u'ENSMUSG00000031548', u'ENSMUSG00000035000', u'ENSMUSG00000020496', u'ENSMUSG00000062312', u'ENSMUSG00000017491', u'ENSMUSG00000020515', u'ENSMUSG00000006728', u'ENSMUSG00000037169', u'ENSMUSG00000022122', u'ENSMUSG00000039781', u'ENSMUSG00000061878', u'ENSMUSG00000026674', u'ENSMUSG00000002731', u'ENSMUSG00000055817', u'ENSMUSG00000044014', u'ENSMUSG00000073889', u'ENSMUSG00000028364', u'ENSMUSG00000007613', u'ENSMUSG00000068290', u'ENSMUSG00000038279', u'ENSMUSG00000008730', u'ENSMUSG00000024795', u'ENSMUSG00000031750', u'ENSMUSG00000043991', u'ENSMUSG00000029287', u'ENSMUSG00000021360', u'ENSMUSG00000005534', u'ENSMUSG00000021994', u'ENSMUSG00000031616', u'ENSMUSG00000022505', u'ENSMUSG00000052957', u'ENSMUSG00000020484', u'ENSMUSG00000032187', u'ENSMUSG00000050335', u'ENSMUSG00000024486', u'ENSMUSG00000022346', u'ENSMUSG00000031446', u'ENSMUSG00000024073', u'ENSMUSG00000076431', u'ENSMUSG00000027985', u'ENSMUSG00000000127', u'ENSMUSG00000063632', u'ENSMUSG00000030525', u'ENSMUSG00000031565', u'ENSMUSG00000021835', u'ENSMUSG00000085795', u'ENSMUSG00000046532', u'ENSMUSG00000038007', u'ENSMUSG00000026104', u'ENSMUSG00000022425', u'ENSMUSG00000032359', u'ENSMUSG00000000751', u'ENSMUSG00000025403', u'ENSMUSG00000027447', u'ENSMUSG00000032725', u'ENSMUSG00000045092', u'ENSMUSG00000034394', u'ENSMUSG00000001517', u'ENSMUSG00000038260', u'ENSMUSG00000029231', u'ENSMUSG00000039153', u'ENSMUSG00000029999', u'ENSMUSG00000026479', u'ENSMUSG00000024789', u'ENSMUSG00000068037', u'ENSMUSG00000038943', u'ENSMUSG00000028291', u'ENSMUSG00000052593', u'ENSMUSG00000031980', u'ENSMUSG00000021379', u'ENSMUSG00000030890', u'ENSMUSG00000042680', u'ENSMUSG00000000184', u'ENSMUSG00000074698', u'ENSMUSG00000028982', u'ENSMUSG00000032006', u'ENSMUSG00000032562', u'ENSMUSG00000029175', u'ENSMUSG00000054387', u'ENSMUSG00000033220', u'ENSMUSG00000057506', u'ENSMUSG00000004040', u'ENSMUSG00000024256', u'ENSMUSG00000054252', u'ENSMUSG00000027859', u'ENSMUSG00000048616', u'ENSMUSG00000062352', u'ENSMUSG00000028249', u'ENSMUSG00000032532', u'ENSMUSG00000007659', u'ENSMUSG00000006333', u'ENSMUSG00000054302', u'ENSMUSG00000018604', u'ENSMUSG00000016308', u'ENSMUSG00000025578']), pop_n=13836, p_sm_bonferroni=0.7545107720900691, is_obsolete=False, GO='GO:0008284', name='positive regulation of cell proliferation', pop_count=322, alt_ids=[], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000034165', u'ENSMUSG00000022831', u'ENSMUSG00000056220', u'ENSMUSG00000004508', u'ENSMUSG00000024927', u'ENSMUSG00000052593', u'ENSMUSG00000028639', u'ENSMUSG00000022346', u'ENSMUSG00000004266', u'ENSMUSG00000042228', u'ENSMUSG00000024621', u'ENSMUSG00000045730', u'ENSMUSG00000021109', u'ENSMUSG00000027506', u'ENSMUSG00000000751', u'ENSMUSG00000025403', u'ENSMUSG00000027087', u'ENSMUSG00000006728', u'ENSMUSG00000028333', u'ENSMUSG00000027947', u'ENSMUSG00000032492', u'ENSMUSG00000006356', u'ENSMUSG00000003283', u'ENSMUSG00000030538', u'ENSMUSG00000027803', u'ENSMUSG00000030760', u'ENSMUSG00000062960', u'ENSMUSG00000029838', u'ENSMUSG00000029175', u'ENSMUSG00000033220', u'ENSMUSG00000004040', u'ENSMUSG00000005373', u'ENSMUSG00000014599', u'ENSMUSG00000026628', u'ENSMUSG00000022146', u'ENSMUSG00000002603', u'ENSMUSG00000037846']), symbols=u'Acer3 Adam17 Adrb2 Anp32b Atf3 Ccnd3 Cdk4 Cib1 Crip2 Csf1 Csf1r Gab2 Hck Hcls1 Hif1a Il6ra Itgav Kdr Lyn Mlxipl Myc Osmr Pla2g4a Pth1r Ptn Ptpn6 Rac2 Rela Rpa1 Rtkn2 Shmt2 Slc35f6 Stat3 Tgfb1 Tpd52 Wwtr1 Ybx1', symbol_set=set([u'Acer3', 'Slc35f6', u'Rac2', u'Il6ra', u'Ccnd3', u'Gab2', u'Itgav', u'Hcls1', u'Hck', u'Rela', u'Tgfb1', u'Tpd52', u'Rpa1', u'Csf1', u'Hif1a', u'Anp32b', u'Adam17', u'Ybx1', u'Atf3', u'Stat3', u'Wwtr1', u'Rtkn2', u'Csf1r', u'Adrb2', u'Osmr', u'Pla2g4a', u'Lyn', u'Crip2', u'Pth1r', u'Ptn', u'Ptpn6', u'Mlxipl', u'Cib1', u'Cdk4', u'Myc', u'Shmt2', u'Kdr']), geneids='ENSMUSG00000034165 ENSMUSG00000022831 ENSMUSG00000056220 ENSMUSG00000062960 ENSMUSG00000004508 ENSMUSG00000024927 ENSMUSG00000052593 ENSMUSG00000028639 ENSMUSG00000022346 ENSMUSG00000004266 ENSMUSG00000002603 ENSMUSG00000024621 ENSMUSG00000045730 ENSMUSG00000021109 ENSMUSG00000027506 ENSMUSG00000000751 ENSMUSG00000025403 ENSMUSG00000027087 ENSMUSG00000022146 ENSMUSG00000028333 ENSMUSG00000027947 ENSMUSG00000032492 ENSMUSG00000006356 ENSMUSG00000042228 ENSMUSG00000003283 ENSMUSG00000030538 ENSMUSG00000027803 ENSMUSG00000030760 ENSMUSG00000037846 ENSMUSG00000029838 ENSMUSG00000029175 ENSMUSG00000014599 ENSMUSG00000004040 ENSMUSG00000005373 ENSMUSG00000033220 ENSMUSG00000026628 ENSMUSG00000006728', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=88, D1='A', GO_name='positive regulation of cell proliferation'),
Nt(p_uncorrected=2.149389719025013e-08, study_count=14, study_n=794, namespace='biological_process', p_fdr_bh=2.5050310486883053e-05, study_items=set([u'ENSMUSG00000079227', u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000027995', u'ENSMUSG00000021408', u'ENSMUSG00000030793', u'ENSMUSG00000047123', u'ENSMUSG00000031101', u'ENSMUSG00000038058', u'ENSMUSG00000031639', u'ENSMUSG00000058715', u'ENSMUSG00000030341', u'ENSMUSG00000067212', u'ENSMUSG00000006519']), NS='BP', pop_items=set([u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000031101', u'ENSMUSG00000030793', u'ENSMUSG00000031639', u'ENSMUSG00000058715', u'ENSMUSG00000056529', u'ENSMUSG00000006519', u'ENSMUSG00000041417', u'ENSMUSG00000032508', u'ENSMUSG00000066551', u'ENSMUSG00000047123', u'ENSMUSG00000016024', u'ENSMUSG00000037523', u'ENSMUSG00000035385', u'ENSMUSG00000074582', u'ENSMUSG00000028800', u'ENSMUSG00000019777', u'ENSMUSG00000026234', u'ENSMUSG00000041135', u'ENSMUSG00000067212', u'ENSMUSG00000039005', u'ENSMUSG00000028059', u'ENSMUSG00000000982', u'ENSMUSG00000039217', u'ENSMUSG00000021408', u'ENSMUSG00000027551', u'ENSMUSG00000030341', u'ENSMUSG00000022037', u'ENSMUSG00000024789', u'ENSMUSG00000042682', u'ENSMUSG00000022708', u'ENSMUSG00000051439', u'ENSMUSG00000027995', u'ENSMUSG00000027347', u'ENSMUSG00000001123', u'ENSMUSG00000038058', u'ENSMUSG00000018930', u'ENSMUSG00000079227', u'ENSMUSG00000032041']), pop_n=13836, p_sm_bonferroni=0.0003256540363294797, is_obsolete=False, GO='GO:0032760', name='positive regulation of tumor necrosis factor production', pop_count=40, alt_ids=[], level=7, depth=7, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000027995', u'ENSMUSG00000021408', u'ENSMUSG00000030793', u'ENSMUSG00000047123', u'ENSMUSG00000031101', u'ENSMUSG00000038058', u'ENSMUSG00000067212', u'ENSMUSG00000031639', u'ENSMUSG00000058715', u'ENSMUSG00000030341', u'ENSMUSG00000079227', u'ENSMUSG00000006519']), symbols=u'Card9 Ccr5 Cyba Fcer1g H2-T23 Nod1 Pycard Ripk1 Sash3 Ticam1 Tlr2 Tlr3 Tlr9 Tnfrsf1a', symbol_set=set([u'Ripk1', u'Fcer1g', u'Ticam1', 'H2-T23', u'Tnfrsf1a', u'Tlr2', u'Tlr3', u'Card9', u'Cyba', u'Nod1', u'Ccr5', u'Pycard', u'Tlr9', u'Sash3']), geneids='ENSMUSG00000079227 ENSMUSG00000045322 ENSMUSG00000026928 ENSMUSG00000027995 ENSMUSG00000021408 ENSMUSG00000030793 ENSMUSG00000030341 ENSMUSG00000038058 ENSMUSG00000067212 ENSMUSG00000031639 ENSMUSG00000058715 ENSMUSG00000047123 ENSMUSG00000031101 ENSMUSG00000006519', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=2, D1='A', GO_name='positive regulation of tumor necrosis factor production'),
Nt(p_uncorrected=0.00018831346875026884, study_count=3, study_n=794, namespace='biological_process', p_fdr_bh=0.04322935401568671, study_items=set([u'ENSMUSG00000015947', u'ENSMUSG00000022216', u'ENSMUSG00000079197']), NS='BP', pop_items=set([u'ENSMUSG00000015947', u'ENSMUSG00000022216', u'ENSMUSG00000079197']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0019884', name='antigen processing and presentation of exogenous antigen', pop_count=3, alt_ids=[], level=3, depth=3, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000015947', u'ENSMUSG00000022216', u'ENSMUSG00000079197']), symbols=u'Fcgr1 Psme1 Psme2', symbol_set=set([u'Psme2', u'Fcgr1', u'Psme1']), geneids='ENSMUSG00000015947 ENSMUSG00000022216 ENSMUSG00000079197', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=9, D1='L', GO_name='antigen processing and presentation of exogenous antigen'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0034097', name='', pop_count='', alt_ids='', level=4, depth=4, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=59, D1='F', GO_name='response to cytokine'),
Nt(p_uncorrected=0.00021235432852051886, study_count=5, study_n=794, namespace='biological_process', p_fdr_bh=0.04731441810903502, study_items=set([u'ENSMUSG00000079547', u'ENSMUSG00000058715', u'ENSMUSG00000031838', u'ENSMUSG00000036594', u'ENSMUSG00000073421']), NS='BP', pop_items=set([u'ENSMUSG00000037649', u'ENSMUSG00000031838', u'ENSMUSG00000024610', u'ENSMUSG00000036594', u'ENSMUSG00000027164', u'ENSMUSG00000060586', u'ENSMUSG00000036908', u'ENSMUSG00000026656', u'ENSMUSG00000079547', u'ENSMUSG00000058715', u'ENSMUSG00000073421']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0019886', name='antigen processing and presentation of exogenous peptide antigen via MHC class II', pop_count=11, alt_ids=['GO:0042591', 'GO:0048005'], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000079547', u'ENSMUSG00000058715', u'ENSMUSG00000031838', u'ENSMUSG00000036594', u'ENSMUSG00000073421']), symbols=u'Fcer1g H2-Aa H2-Ab1 H2-DMb1 Ifi30', symbol_set=set(['H2-Ab1', 'H2-DMb1', 'H2-Aa', u'Fcer1g', u'Ifi30']), geneids='ENSMUSG00000079547 ENSMUSG00000058715 ENSMUSG00000031838 ENSMUSG00000036594 ENSMUSG00000073421', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='L', GO_name='antigen processing and presentation of exogenous peptide antigen via MHC class II'),
Nt(p_uncorrected=5.500703269831228e-09, study_count=21, study_n=794, namespace='biological_process', p_fdr_bh=8.334115524121294e-06, study_items=set([u'ENSMUSG00000024927', u'ENSMUSG00000028191', u'ENSMUSG00000038058', u'ENSMUSG00000045322', u'ENSMUSG00000000290', u'ENSMUSG00000002603', u'ENSMUSG00000026321', u'ENSMUSG00000021408', u'ENSMUSG00000000275', u'ENSMUSG00000030793', u'ENSMUSG00000047123', u'ENSMUSG00000079110', u'ENSMUSG00000026942', u'ENSMUSG00000021108', u'ENSMUSG00000031639', u'ENSMUSG00000030538', u'ENSMUSG00000037860', u'ENSMUSG00000032691', u'ENSMUSG00000047098', u'ENSMUSG00000027995', u'ENSMUSG00000039853']), NS='BP', pop_items=set([u'ENSMUSG00000031537', u'ENSMUSG00000035235', u'ENSMUSG00000025034', u'ENSMUSG00000045322', u'ENSMUSG00000074781', u'ENSMUSG00000024079', u'ENSMUSG00000068290', u'ENSMUSG00000030793', u'ENSMUSG00000031639', u'ENSMUSG00000052688', u'ENSMUSG00000026942', u'ENSMUSG00000021180', u'ENSMUSG00000032185', u'ENSMUSG00000023755', u'ENSMUSG00000021994', u'ENSMUSG00000018548', u'ENSMUSG00000047098', u'ENSMUSG00000046532', u'ENSMUSG00000027466', u'ENSMUSG00000024927', u'ENSMUSG00000037405', u'ENSMUSG00000004221', u'ENSMUSG00000032688', u'ENSMUSG00000032508', u'ENSMUSG00000024997', u'ENSMUSG00000029238', u'ENSMUSG00000002603', u'ENSMUSG00000029840', u'ENSMUSG00000000266', u'ENSMUSG00000029287', u'ENSMUSG00000000127', u'ENSMUSG00000047123', u'ENSMUSG00000079110', u'ENSMUSG00000057113', u'ENSMUSG00000027187', u'ENSMUSG00000026778', u'ENSMUSG00000039853', u'ENSMUSG00000021108', u'ENSMUSG00000022037', u'ENSMUSG00000021024', u'ENSMUSG00000037860', u'ENSMUSG00000029053', u'ENSMUSG00000041135', u'ENSMUSG00000060477', u'ENSMUSG00000039005', u'ENSMUSG00000031392', u'ENSMUSG00000028059', u'ENSMUSG00000028191', u'ENSMUSG00000051675', u'ENSMUSG00000028179', u'ENSMUSG00000030265', u'ENSMUSG00000033618', u'ENSMUSG00000021408', u'ENSMUSG00000033430', u'ENSMUSG00000032497', u'ENSMUSG00000022514', u'ENSMUSG00000047921', u'ENSMUSG00000030538', u'ENSMUSG00000002688', u'ENSMUSG00000022255', u'ENSMUSG00000061878', u'ENSMUSG00000025199', u'ENSMUSG00000041343', u'ENSMUSG00000041187', u'ENSMUSG00000026305', u'ENSMUSG00000078923', u'ENSMUSG00000034457', u'ENSMUSG00000026321', u'ENSMUSG00000000290', u'ENSMUSG00000000275', u'ENSMUSG00000027164', u'ENSMUSG00000032691', u'ENSMUSG00000024617', u'ENSMUSG00000026031', u'ENSMUSG00000041000', u'ENSMUSG00000038058', u'ENSMUSG00000037643', u'ENSMUSG00000026171', u'ENSMUSG00000025473', u'ENSMUSG00000052889', u'ENSMUSG00000027995', u'ENSMUSG00000032041', u'ENSMUSG00000038147']), pop_n=13836, p_sm_bonferroni=8.334115524121294e-05, is_obsolete=False, GO='GO:0051092', name='positive regulation of NF-kappaB transcription factor activity', pop_count=83, alt_ids=[], level=5, depth=12, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000028191', u'ENSMUSG00000021108', u'ENSMUSG00000045322', u'ENSMUSG00000021408', u'ENSMUSG00000030793', u'ENSMUSG00000026942', u'ENSMUSG00000031639', u'ENSMUSG00000030538', u'ENSMUSG00000032691', u'ENSMUSG00000047098', u'ENSMUSG00000039853', u'ENSMUSG00000024927', u'ENSMUSG00000000290', u'ENSMUSG00000002603', u'ENSMUSG00000026321', u'ENSMUSG00000000275', u'ENSMUSG00000047123', u'ENSMUSG00000079110', u'ENSMUSG00000038058', u'ENSMUSG00000037860', u'ENSMUSG00000027995']), symbols=u'Aim2 Bcl10 Capn3 Cib1 Itgb2 Nlrp3 Nod1 Prkch Pycard Rela Ripk1 Rnf31 Tgfb1 Ticam1 Tlr2 Tlr3 Tlr9 Tnfrsf11a Traf2 Trim14 Trim25', symbol_set=set([u'Nod1', u'Prkch', u'Traf2', u'Itgb2', u'Trim14', u'Nlrp3', u'Tgfb1', u'Trim25', u'Tlr2', u'Aim2', u'Ripk1', u'Tlr3', u'Cib1', u'Capn3', u'Tnfrsf11a', u'Pycard', u'Rela', u'Tlr9', u'Bcl10', u'Ticam1', u'Rnf31']), geneids='ENSMUSG00000024927 ENSMUSG00000028191 ENSMUSG00000038058 ENSMUSG00000045322 ENSMUSG00000000290 ENSMUSG00000002603 ENSMUSG00000026321 ENSMUSG00000021408 ENSMUSG00000000275 ENSMUSG00000030793 ENSMUSG00000032691 ENSMUSG00000079110 ENSMUSG00000026942 ENSMUSG00000021108 ENSMUSG00000031639 ENSMUSG00000030538 ENSMUSG00000037860 ENSMUSG00000047123 ENSMUSG00000047098 ENSMUSG00000027995 ENSMUSG00000039853', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=1, D1='A', GO_name='positive regulation of NF-kappaB transcription factor activity'),
Nt(p_uncorrected=1.2060839744647535e-10, study_count=47, study_n=794, namespace='biological_process', p_fdr_bh=5.120588891920086e-07, study_items=set([u'ENSMUSG00000045322', u'ENSMUSG00000031103', u'ENSMUSG00000026928', u'ENSMUSG00000035279', u'ENSMUSG00000030793', u'ENSMUSG00000070390', u'ENSMUSG00000030966', u'ENSMUSG00000055172', u'ENSMUSG00000031639', u'ENSMUSG00000009585', u'ENSMUSG00000058715', u'ENSMUSG00000047123', u'ENSMUSG00000006519', u'ENSMUSG00000024927', u'ENSMUSG00000043496', u'ENSMUSG00000029826', u'ENSMUSG00000024621', u'ENSMUSG00000031805', u'ENSMUSG00000015947', u'ENSMUSG00000039853', u'ENSMUSG00000025225', u'ENSMUSG00000071203', u'ENSMUSG00000037860', u'ENSMUSG00000036905', u'ENSMUSG00000032312', u'ENSMUSG00000020707', u'ENSMUSG00000072115', u'ENSMUSG00000024349', u'ENSMUSG00000003283', u'ENSMUSG00000033538', u'ENSMUSG00000029217', u'ENSMUSG00000042228', u'ENSMUSG00000036896', u'ENSMUSG00000025492', u'ENSMUSG00000060802', u'ENSMUSG00000029798', u'ENSMUSG00000014599', u'ENSMUSG00000056612', u'ENSMUSG00000027639', u'ENSMUSG00000000275', u'ENSMUSG00000043279', u'ENSMUSG00000032690', u'ENSMUSG00000032691', u'ENSMUSG00000024457', u'ENSMUSG00000038058', u'ENSMUSG00000021423', u'ENSMUSG00000027995']), NS='BP', pop_items=set([u'ENSMUSG00000028099', u'ENSMUSG00000035834', u'ENSMUSG00000045322', u'ENSMUSG00000028874', u'ENSMUSG00000031103', u'ENSMUSG00000021703', u'ENSMUSG00000024079', u'ENSMUSG00000074151', u'ENSMUSG00000016481', u'ENSMUSG00000055172', u'ENSMUSG00000036887', u'ENSMUSG00000009585', u'ENSMUSG00000020455', u'ENSMUSG00000047123', u'ENSMUSG00000022476', u'ENSMUSG00000024371', u'ENSMUSG00000042228', u'ENSMUSG00000000266', u'ENSMUSG00000017707', u'ENSMUSG00000071369', u'ENSMUSG00000026399', u'ENSMUSG00000037523', u'ENSMUSG00000038628', u'ENSMUSG00000020115', u'ENSMUSG00000076617', u'ENSMUSG00000000776', u'ENSMUSG00000019843', u'ENSMUSG00000035235', u'ENSMUSG00000015217', u'ENSMUSG00000028530', u'ENSMUSG00000039236', u'ENSMUSG00000036908', u'ENSMUSG00000024164', u'ENSMUSG00000026842', u'ENSMUSG00000020275', u'ENSMUSG00000024045', u'ENSMUSG00000015340', u'ENSMUSG00000075705', u'ENSMUSG00000025280', u'ENSMUSG00000022791', u'ENSMUSG00000005566', u'ENSMUSG00000026596', u'ENSMUSG00000029605', u'ENSMUSG00000026896', u'ENSMUSG00000025199', u'ENSMUSG00000014599', u'ENSMUSG00000022607', u'ENSMUSG00000030966', u'ENSMUSG00000032312', u'ENSMUSG00000051439', u'ENSMUSG00000038058', u'ENSMUSG00000025512', u'ENSMUSG00000050199', u'ENSMUSG00000026154', u'ENSMUSG00000001366', u'ENSMUSG00000000275', u'ENSMUSG00000027951', u'ENSMUSG00000021326', u'ENSMUSG00000070390', u'ENSMUSG00000036986', u'ENSMUSG00000025139', u'ENSMUSG00000004707', u'ENSMUSG00000025034', u'ENSMUSG00000026883', u'ENSMUSG00000058715', u'ENSMUSG00000032691', u'ENSMUSG00000018446', u'ENSMUSG00000006519', u'ENSMUSG00000027995', u'ENSMUSG00000028885', u'ENSMUSG00000029826', u'ENSMUSG00000027646', u'ENSMUSG00000003184', u'ENSMUSG00000031805', u'ENSMUSG00000016024', u'ENSMUSG00000054072', u'ENSMUSG00000051675', u'ENSMUSG00000037860', u'ENSMUSG00000036905', u'ENSMUSG00000004933', u'ENSMUSG00000031392', u'ENSMUSG00000060591', u'ENSMUSG00000055204', u'ENSMUSG00000041000', u'ENSMUSG00000045932', u'ENSMUSG00000072115', u'ENSMUSG00000033777', u'ENSMUSG00000059883', u'ENSMUSG00000027598', u'ENSMUSG00000003283', u'ENSMUSG00000033124', u'ENSMUSG00000044583', u'ENSMUSG00000002688', u'ENSMUSG00000039853', u'ENSMUSG00000078942', u'ENSMUSG00000078945', u'ENSMUSG00000027639', u'ENSMUSG00000033307', u'ENSMUSG00000034889', u'ENSMUSG00000000787', u'ENSMUSG00000034453', u'ENSMUSG00000020641', u'ENSMUSG00000024457', u'ENSMUSG00000023992', u'ENSMUSG00000025532', u'ENSMUSG00000000386', u'ENSMUSG00000052384', u'ENSMUSG00000070034', u'ENSMUSG00000027427', u'ENSMUSG00000026928', u'ENSMUSG00000035279', u'ENSMUSG00000030793', u'ENSMUSG00000021457', u'ENSMUSG00000074896', u'ENSMUSG00000024948', u'ENSMUSG00000031639', u'ENSMUSG00000020823', u'ENSMUSG00000028041', u'ENSMUSG00000025498', u'ENSMUSG00000032508', u'ENSMUSG00000002602', u'ENSMUSG00000053647', u'ENSMUSG00000053158', u'ENSMUSG00000024621', u'ENSMUSG00000066232', u'ENSMUSG00000036896', u'ENSMUSG00000025225', u'ENSMUSG00000071203', u'ENSMUSG00000030880', u'ENSMUSG00000032175', u'ENSMUSG00000041135', u'ENSMUSG00000014932', u'ENSMUSG00000021624', u'ENSMUSG00000020707', u'ENSMUSG00000017830', u'ENSMUSG00000046879', u'ENSMUSG00000024349', u'ENSMUSG00000033454', u'ENSMUSG00000022514', u'ENSMUSG00000022887', u'ENSMUSG00000023973', u'ENSMUSG00000043496', u'ENSMUSG00000057982', u'ENSMUSG00000036712', u'ENSMUSG00000025492', u'ENSMUSG00000032109', u'ENSMUSG00000060802', u'ENSMUSG00000029798', u'ENSMUSG00000032344', u'ENSMUSG00000059456', u'ENSMUSG00000026117', u'ENSMUSG00000040296', u'ENSMUSG00000032690', u'ENSMUSG00000056851', u'ENSMUSG00000038160', u'ENSMUSG00000022043', u'ENSMUSG00000034317', u'ENSMUSG00000001128', u'ENSMUSG00000029771', u'ENSMUSG00000013707', u'ENSMUSG00000032661', u'ENSMUSG00000038521', u'ENSMUSG00000020399', u'ENSMUSG00000031750', u'ENSMUSG00000046718', u'ENSMUSG00000024927', u'ENSMUSG00000031537', u'ENSMUSG00000024483', u'ENSMUSG00000050335', u'ENSMUSG00000032688', u'ENSMUSG00000026471', u'ENSMUSG00000032905', u'ENSMUSG00000038517', u'ENSMUSG00000023341', u'ENSMUSG00000000127', u'ENSMUSG00000023224', u'ENSMUSG00000015947', u'ENSMUSG00000027514', u'ENSMUSG00000002983', u'ENSMUSG00000039005', u'ENSMUSG00000041827', u'ENSMUSG00000028059', u'ENSMUSG00000054717', u'ENSMUSG00000018899', u'ENSMUSG00000021277', u'ENSMUSG00000024789', u'ENSMUSG00000033538', u'ENSMUSG00000028291', u'ENSMUSG00000029217', u'ENSMUSG00000022575', u'ENSMUSG00000066839', u'ENSMUSG00000029561', u'ENSMUSG00000029915', u'ENSMUSG00000056612', u'ENSMUSG00000044827', u'ENSMUSG00000028163', u'ENSMUSG00000043279', u'ENSMUSG00000026365', u'ENSMUSG00000032322', u'ENSMUSG00000034459', u'ENSMUSG00000039936', u'ENSMUSG00000021423', u'ENSMUSG00000038147', u'ENSMUSG00000050132', u'ENSMUSG00000032041', u'ENSMUSG00000046034']), pop_n=13836, p_sm_bonferroni=1.8273378297115479e-06, is_obsolete=False, GO='GO:0045087', name='innate immune response', pop_count=206, alt_ids=['GO:0002226'], level=3, depth=4, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000045322', u'ENSMUSG00000031103', u'ENSMUSG00000026928', u'ENSMUSG00000035279', u'ENSMUSG00000030793', u'ENSMUSG00000043279', u'ENSMUSG00000000275', u'ENSMUSG00000055172', u'ENSMUSG00000031639', u'ENSMUSG00000009585', u'ENSMUSG00000058715', u'ENSMUSG00000031805', u'ENSMUSG00000006519', u'ENSMUSG00000024927', u'ENSMUSG00000043496', u'ENSMUSG00000029826', u'ENSMUSG00000024621', u'ENSMUSG00000047123', u'ENSMUSG00000015947', u'ENSMUSG00000036896', u'ENSMUSG00000025225', u'ENSMUSG00000071203', u'ENSMUSG00000037860', u'ENSMUSG00000036905', u'ENSMUSG00000032312', u'ENSMUSG00000020707', u'ENSMUSG00000072115', u'ENSMUSG00000025492', u'ENSMUSG00000003283', u'ENSMUSG00000033538', u'ENSMUSG00000029217', u'ENSMUSG00000042228', u'ENSMUSG00000039853', u'ENSMUSG00000024349', u'ENSMUSG00000060802', u'ENSMUSG00000029798', u'ENSMUSG00000014599', u'ENSMUSG00000056612', u'ENSMUSG00000027639', u'ENSMUSG00000030966', u'ENSMUSG00000070390', u'ENSMUSG00000032690', u'ENSMUSG00000032691', u'ENSMUSG00000024457', u'ENSMUSG00000038058', u'ENSMUSG00000021423', u'ENSMUSG00000027995']), symbols=u'Aim2 Ang Apobec3 B2m C1qb C1qc C1ra Card9 Casp4 Csf1 Csf1r Csk Cyba Elf4 Fcer1g Fcgr1 Hck Herc6 Ifitm3 Jak3 Ly86 Lyn Naip5 Nfkb2 Nlrp1b Nlrp3 Nod1 Oas2 Ppp1r14b Pycard Rela Rnf135 Samhd1 Ssc5d Tec Ticam1 Tlr2 Tlr3 Tlr9 Tmem173 Tril Trim14 Trim21 Trim25 Trim26 Trim56 Zc3hav1', symbol_set=set([u'Ifitm3', u'Casp4', u'Tec', u'Csf1', u'Csk', u'Naip5', u'Aim2', u'Apobec3', u'Card9', u'Hck', u'Pycard', u'Nfkb2', u'Rela', u'Ticam1', u'Trim14', 'Ssc5d', u'C1qc', u'C1qb', u'B2m', u'Oas2', u'Herc6', u'Trim56', u'Ppp1r14b', u'Tmem173', u'Zc3hav1', u'Nlrp3', u'Ly86', u'Csf1r', u'Elf4', u'Tril', u'Fcer1g', u'Ang', u'Samhd1', u'Nlrp1b', u'Trim25', u'Trim26', u'C1ra', u'Trim21', u'Tlr2', u'Tlr3', u'Cyba', u'Nod1', u'Jak3', u'Lyn', u'Tlr9', u'Fcgr1', u'Rnf135']), geneids='ENSMUSG00000045322 ENSMUSG00000031103 ENSMUSG00000026928 ENSMUSG00000035279 ENSMUSG00000030793 ENSMUSG00000070390 ENSMUSG00000030966 ENSMUSG00000055172 ENSMUSG00000031639 ENSMUSG00000009585 ENSMUSG00000058715 ENSMUSG00000047123 ENSMUSG00000006519 ENSMUSG00000024927 ENSMUSG00000043496 ENSMUSG00000029826 ENSMUSG00000024621 ENSMUSG00000031805 ENSMUSG00000015947 ENSMUSG00000039853 ENSMUSG00000025225 ENSMUSG00000071203 ENSMUSG00000037860 ENSMUSG00000036905 ENSMUSG00000032312 ENSMUSG00000020707 ENSMUSG00000072115 ENSMUSG00000025492 ENSMUSG00000003283 ENSMUSG00000033538 ENSMUSG00000029217 ENSMUSG00000042228 ENSMUSG00000036896 ENSMUSG00000024349 ENSMUSG00000060802 ENSMUSG00000027639 ENSMUSG00000014599 ENSMUSG00000027995 ENSMUSG00000029798 ENSMUSG00000000275 ENSMUSG00000043279 ENSMUSG00000032690 ENSMUSG00000032691 ENSMUSG00000024457 ENSMUSG00000038058 ENSMUSG00000021423 ENSMUSG00000056612', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=26, D1='FL', GO_name='innate immune response'),
Nt(p_uncorrected=3.5146529337187653e-06, study_count=5, study_n=794, namespace='biological_process', p_fdr_bh=0.0017177582773797745, study_items=set([u'ENSMUSG00000032691', u'ENSMUSG00000025888', u'ENSMUSG00000070390', u'ENSMUSG00000030793', u'ENSMUSG00000062210']), NS='BP', pop_items=set([u'ENSMUSG00000025888', u'ENSMUSG00000026471', u'ENSMUSG00000070390', u'ENSMUSG00000030793', u'ENSMUSG00000032691', u'ENSMUSG00000062210']), pop_n=13836, p_sm_bonferroni=0.05325050659877301, is_obsolete=False, GO='GO:0032611', name='interleukin-1 beta production', pop_count=6, alt_ids=[], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000062210', u'ENSMUSG00000025888', u'ENSMUSG00000070390', u'ENSMUSG00000030793', u'ENSMUSG00000032691']), symbols=u'Casp1 Nlrp1b Nlrp3 Pycard Tnfaip8', symbol_set=set([u'Pycard', u'Nlrp1b', u'Tnfaip8', u'Nlrp3', u'Casp1']), geneids='ENSMUSG00000062210 ENSMUSG00000025888 ENSMUSG00000070390 ENSMUSG00000030793 ENSMUSG00000032691', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='CJ', GO_name='interleukin-1 beta production'),
Nt(p_uncorrected=6.380037801368506e-05, study_count=5, study_n=794, namespace='biological_process', p_fdr_bh=0.01895371622128122, study_items=set([u'ENSMUSG00000037860', u'ENSMUSG00000025888', u'ENSMUSG00000033538', u'ENSMUSG00000070390', u'ENSMUSG00000071203']), NS='BP', pop_items=set([u'ENSMUSG00000078942', u'ENSMUSG00000025888', u'ENSMUSG00000078945', u'ENSMUSG00000070390', u'ENSMUSG00000033538', u'ENSMUSG00000071203', u'ENSMUSG00000010911', u'ENSMUSG00000037860', u'ENSMUSG00000022575']), pop_n=13836, p_sm_bonferroni=0.9666395272853423, is_obsolete=False, GO='GO:0070269', name='pyroptosis', pop_count=9, alt_ids=[], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000037860', u'ENSMUSG00000025888', u'ENSMUSG00000033538', u'ENSMUSG00000070390', u'ENSMUSG00000071203']), symbols=u'Aim2 Casp1 Casp4 Naip5 Nlrp1b', symbol_set=set([u'Naip5', u'Casp4', u'Casp1', u'Nlrp1b', u'Aim2']), geneids='ENSMUSG00000037860 ENSMUSG00000025888 ENSMUSG00000033538 ENSMUSG00000070390 ENSMUSG00000071203', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='BC', GO_name='pyroptosis'),
Nt(p_uncorrected=4.245077894981495e-05, study_count=6, study_n=794, namespace='biological_process', p_fdr_bh=0.014617539815196508, study_items=set([u'ENSMUSG00000025492', u'ENSMUSG00000069874', u'ENSMUSG00000030966', u'ENSMUSG00000036594', u'ENSMUSG00000078922', u'ENSMUSG00000018920']), NS='BP', pop_items=set([u'ENSMUSG00000025492', u'ENSMUSG00000004069', u'ENSMUSG00000046718', u'ENSMUSG00000069874', u'ENSMUSG00000060591', u'ENSMUSG00000030966', u'ENSMUSG00000025889', u'ENSMUSG00000036594', u'ENSMUSG00000078922', u'ENSMUSG00000026177', u'ENSMUSG00000018920', u'ENSMUSG00000038884', u'ENSMUSG00000060586']), pop_n=13836, p_sm_bonferroni=0.6431717518686463, is_obsolete=False, GO='GO:0034341', name='response to interferon-gamma', pop_count=13, alt_ids=[], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000078922', u'ENSMUSG00000025492', u'ENSMUSG00000018920', u'ENSMUSG00000069874', u'ENSMUSG00000030966', u'ENSMUSG00000036594']), symbols=u'Cxcl16 H2-Aa Ifitm3 Irgm2 Tgtp1 Trim21', symbol_set=set([u'Ifitm3', u'Tgtp1', 'H2-Aa', u'Trim21', u'Irgm2', u'Cxcl16']), geneids='ENSMUSG00000025492 ENSMUSG00000069874 ENSMUSG00000030966 ENSMUSG00000036594 ENSMUSG00000078922 ENSMUSG00000018920', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=1, D1='F', GO_name='response to interferon-gamma'),
Nt(p_uncorrected=6.718708756207653e-10, study_count=14, study_n=794, namespace='biological_process', p_fdr_bh=1.6965859394217025e-06, study_items=set([u'ENSMUSG00000024927', u'ENSMUSG00000015396', u'ENSMUSG00000058163', u'ENSMUSG00000002603', u'ENSMUSG00000069874', u'ENSMUSG00000035279', u'ENSMUSG00000030341', u'ENSMUSG00000078920', u'ENSMUSG00000038058', u'ENSMUSG00000031639', u'ENSMUSG00000090942', u'ENSMUSG00000073555', u'ENSMUSG00000079227', u'ENSMUSG00000069893']), NS='BP', pop_items=set([u'ENSMUSG00000029417', u'ENSMUSG00000058163', u'ENSMUSG00000026656', u'ENSMUSG00000035279', u'ENSMUSG00000037321', u'ENSMUSG00000078920', u'ENSMUSG00000078921', u'ENSMUSG00000031639', u'ENSMUSG00000073555', u'ENSMUSG00000024927', u'ENSMUSG00000015396', u'ENSMUSG00000022556', u'ENSMUSG00000061353', u'ENSMUSG00000002603', u'ENSMUSG00000023078', u'ENSMUSG00000027447', u'ENSMUSG00000068606', u'ENSMUSG00000069874', u'ENSMUSG00000046879', u'ENSMUSG00000030120', u'ENSMUSG00000030341', u'ENSMUSG00000078853', u'ENSMUSG00000034855', u'ENSMUSG00000029371', u'ENSMUSG00000069893', u'ENSMUSG00000024610', u'ENSMUSG00000044827', u'ENSMUSG00000090942', u'ENSMUSG00000018819', u'ENSMUSG00000038058', u'ENSMUSG00000079227', u'ENSMUSG00000024308']), pop_n=13836, p_sm_bonferroni=1.0179515636530216e-05, is_obsolete=False, GO='GO:0006952', name='defense response', pop_count=32, alt_ids=['GO:0002217', 'GO:0042829'], level=3, depth=3, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000024927', u'ENSMUSG00000015396', u'ENSMUSG00000058163', u'ENSMUSG00000002603', u'ENSMUSG00000069874', u'ENSMUSG00000035279', u'ENSMUSG00000030341', u'ENSMUSG00000078920', u'ENSMUSG00000038058', u'ENSMUSG00000031639', u'ENSMUSG00000090942', u'ENSMUSG00000069893', u'ENSMUSG00000079227', u'ENSMUSG00000073555']), symbols=u'9930111J21Rik1 Ccr5 Cd83 F830016B08Rik Gm4951 Gm5431 Ifi47 Irgm2 Nod1 Rela Ssc5d Tgfb1 Tlr3 Tnfrsf1a', symbol_set=set([u'Tgfb1', u'9930111J21Rik1', u'F830016B08Rik', 'Ssc5d', u'Cd83', u'Tnfrsf1a', u'Tlr3', u'Nod1', u'Ccr5', u'Ifi47', u'Gm5431', u'Rela', u'Irgm2', u'Gm4951']), geneids='ENSMUSG00000024927 ENSMUSG00000015396 ENSMUSG00000058163 ENSMUSG00000002603 ENSMUSG00000069874 ENSMUSG00000035279 ENSMUSG00000030341 ENSMUSG00000078920 ENSMUSG00000038058 ENSMUSG00000031639 ENSMUSG00000090942 ENSMUSG00000073555 ENSMUSG00000079227 ENSMUSG00000069893', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=93, D1='F', GO_name='defense response'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0006950', name='', pop_count='', alt_ids='', level=2, depth=2, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=491, D1='F', GO_name='response to stress'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0098542', name='', pop_count='', alt_ids='', level=4, depth=5, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=37, D1='F', GO_name='defense response to other organism'),
Nt(p_uncorrected=5.139411452870194e-11, study_count=38, study_n=794, namespace='biological_process', p_fdr_bh=3.893361146121815e-07, study_items=set([u'ENSMUSG00000045322', u'ENSMUSG00000030793', u'ENSMUSG00000070390', u'ENSMUSG00000063415', u'ENSMUSG00000016528', u'ENSMUSG00000031639', u'ENSMUSG00000062585', u'ENSMUSG00000006519', u'ENSMUSG00000024927', u'ENSMUSG00000047557', u'ENSMUSG00000002603', u'ENSMUSG00000043496', u'ENSMUSG00000042286', u'ENSMUSG00000024621', u'ENSMUSG00000047250', u'ENSMUSG00000047123', u'ENSMUSG00000025083', u'ENSMUSG00000025225', u'ENSMUSG00000037731', u'ENSMUSG00000037860', u'ENSMUSG00000027087', u'ENSMUSG00000079227', u'ENSMUSG00000029468', u'ENSMUSG00000071203', u'ENSMUSG00000030341', u'ENSMUSG00000003283', u'ENSMUSG00000033538', u'ENSMUSG00000016495', u'ENSMUSG00000042228', u'ENSMUSG00000052821', u'ENSMUSG00000014599', u'ENSMUSG00000022040', u'ENSMUSG00000009281', u'ENSMUSG00000032691', u'ENSMUSG00000015950', u'ENSMUSG00000021423', u'ENSMUSG00000027995', u'ENSMUSG00000024778']), NS='BP', pop_items=set([u'ENSMUSG00000029199', u'ENSMUSG00000045322', u'ENSMUSG00000028029', u'ENSMUSG00000039145', u'ENSMUSG00000028108', u'ENSMUSG00000016528', u'ENSMUSG00000026883', u'ENSMUSG00000031778', u'ENSMUSG00000029053', u'ENSMUSG00000022475', u'ENSMUSG00000008845', u'ENSMUSG00000040552', u'ENSMUSG00000029371', u'ENSMUSG00000023078', u'ENSMUSG00000075316', u'ENSMUSG00000004698', u'ENSMUSG00000026778', u'ENSMUSG00000020592', u'ENSMUSG00000060477', u'ENSMUSG00000008855', u'ENSMUSG00000024164', u'ENSMUSG00000020275', u'ENSMUSG00000038128', u'ENSMUSG00000015340', u'ENSMUSG00000039936', u'ENSMUSG00000027399', u'ENSMUSG00000042228', u'ENSMUSG00000006445', u'ENSMUSG00000025199', u'ENSMUSG00000000982', u'ENSMUSG00000052821', u'ENSMUSG00000040152', u'ENSMUSG00000014599', u'ENSMUSG00000061762', u'ENSMUSG00000007836', u'ENSMUSG00000066551', u'ENSMUSG00000025512', u'ENSMUSG00000054008', u'ENSMUSG00000024778', u'ENSMUSG00000019850', u'ENSMUSG00000021180', u'ENSMUSG00000032487', u'ENSMUSG00000021680', u'ENSMUSG00000031425', u'ENSMUSG00000025139', u'ENSMUSG00000015839', u'ENSMUSG00000062585', u'ENSMUSG00000056529', u'ENSMUSG00000005672', u'ENSMUSG00000006519', u'ENSMUSG00000028885', u'ENSMUSG00000054509', u'ENSMUSG00000027312', u'ENSMUSG00000047123', u'ENSMUSG00000035352', u'ENSMUSG00000037860', u'ENSMUSG00000035356', u'ENSMUSG00000027087', u'ENSMUSG00000039217', u'ENSMUSG00000033777', u'ENSMUSG00000020400', u'ENSMUSG00000064246', u'ENSMUSG00000027221', u'ENSMUSG00000009281', u'ENSMUSG00000044583', u'ENSMUSG00000023915', u'ENSMUSG00000034855', u'ENSMUSG00000002688', u'ENSMUSG00000061878', u'ENSMUSG00000023913', u'ENSMUSG00000078942', u'ENSMUSG00000032508', u'ENSMUSG00000033307', u'ENSMUSG00000053004', u'ENSMUSG00000003283', u'ENSMUSG00000018476', u'ENSMUSG00000037872', u'ENSMUSG00000079227', u'ENSMUSG00000052384', u'ENSMUSG00000029417', u'ENSMUSG00000042286', u'ENSMUSG00000030793', u'ENSMUSG00000028577', u'ENSMUSG00000019122', u'ENSMUSG00000031639', u'ENSMUSG00000033467', u'ENSMUSG00000029026', u'ENSMUSG00000047557', u'ENSMUSG00000002602', u'ENSMUSG00000002603', u'ENSMUSG00000053647', u'ENSMUSG00000024621', u'ENSMUSG00000047250', u'ENSMUSG00000023031', u'ENSMUSG00000018927', u'ENSMUSG00000025225', u'ENSMUSG00000071203', u'ENSMUSG00000001729', u'ENSMUSG00000041135', u'ENSMUSG00000021624', u'ENSMUSG00000008318', u'ENSMUSG00000006344', u'ENSMUSG00000055633', u'ENSMUSG00000022074', u'ENSMUSG00000067586', u'ENSMUSG00000022514', u'ENSMUSG00000033350', u'ENSMUSG00000078945', u'ENSMUSG00000031536', u'ENSMUSG00000059866', u'ENSMUSG00000051439', u'ENSMUSG00000022040', u'ENSMUSG00000026117', u'ENSMUSG00000070390', u'ENSMUSG00000032691', u'ENSMUSG00000028964', u'ENSMUSG00000015950', u'ENSMUSG00000018932', u'ENSMUSG00000018930', u'ENSMUSG00000032911', u'ENSMUSG00000030339', u'ENSMUSG00000027684', u'ENSMUSG00000020573', u'ENSMUSG00000024952', u'ENSMUSG00000031750', u'ENSMUSG00000022508', u'ENSMUSG00000024793', u'ENSMUSG00000063415', u'ENSMUSG00000024927', u'ENSMUSG00000010051', u'ENSMUSG00000010054', u'ENSMUSG00000043496', u'ENSMUSG00000031681', u'ENSMUSG00000025083', u'ENSMUSG00000048376', u'ENSMUSG00000040451', u'ENSMUSG00000002983', u'ENSMUSG00000027358', u'ENSMUSG00000039004', u'ENSMUSG00000039005', u'ENSMUSG00000038264', u'ENSMUSG00000054717', u'ENSMUSG00000029468', u'ENSMUSG00000031537', u'ENSMUSG00000030341', u'ENSMUSG00000024789', u'ENSMUSG00000033538', u'ENSMUSG00000016495', u'ENSMUSG00000037731', u'ENSMUSG00000024781', u'ENSMUSG00000022575', u'ENSMUSG00000033885', u'ENSMUSG00000052430', u'ENSMUSG00000035385', u'ENSMUSG00000025473', u'ENSMUSG00000027995', u'ENSMUSG00000044827', u'ENSMUSG00000020399', u'ENSMUSG00000027858', u'ENSMUSG00000026177', u'ENSMUSG00000032322', u'ENSMUSG00000021936', u'ENSMUSG00000021423', u'ENSMUSG00000032041', u'ENSMUSG00000028599']), pop_n=13836, p_sm_bonferroni=7.786722292243632e-07, is_obsolete=False, GO='GO:0006954', name='inflammatory response', pop_count=165, alt_ids=[], level=4, depth=4, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000045322', u'ENSMUSG00000030793', u'ENSMUSG00000063415', u'ENSMUSG00000016528', u'ENSMUSG00000031639', u'ENSMUSG00000062585', u'ENSMUSG00000006519', u'ENSMUSG00000024927', u'ENSMUSG00000027995', u'ENSMUSG00000047557', u'ENSMUSG00000002603', u'ENSMUSG00000043496', u'ENSMUSG00000042286', u'ENSMUSG00000024621', u'ENSMUSG00000047250', u'ENSMUSG00000047123', u'ENSMUSG00000025083', u'ENSMUSG00000025225', u'ENSMUSG00000037731', u'ENSMUSG00000037860', u'ENSMUSG00000027087', u'ENSMUSG00000029468', u'ENSMUSG00000071203', u'ENSMUSG00000030341', u'ENSMUSG00000003283', u'ENSMUSG00000033538', u'ENSMUSG00000016495', u'ENSMUSG00000009281', u'ENSMUSG00000042228', u'ENSMUSG00000052821', u'ENSMUSG00000014599', u'ENSMUSG00000022040', u'ENSMUSG00000070390', u'ENSMUSG00000032691', u'ENSMUSG00000015950', u'ENSMUSG00000021423', u'ENSMUSG00000079227', u'ENSMUSG00000024778']), symbols=u'Afap1l2 Aim2 Casp4 Ccr5 Cnr2 Csf1 Csf1r Cyba Cyp26b1 Cysltr1 Ephx2 Fas Hck Itgav Lxn Ly86 Lyn Mapkapk2 Naip5 Ncf1 Nfkb2 Nlrp1b Nlrp3 P2rx7 Plgrkt Ptgs1 Pycard Rarres2 Rela Stab1 Tgfb1 Themis2 Ticam1 Tlr2 Tlr3 Tlr9 Tnfrsf1a Tril', symbol_set=set([u'Casp4', u'Naip5', 'Plgrkt', u'Aim2', 'Themis2', u'Hck', u'Pycard', u'Rela', u'Mapkapk2', u'P2rx7', u'Tgfb1', u'Nfkb2', u'Stab1', u'Nlrp3', u'Tnfrsf1a', u'Fas', u'Csf1', u'Itgav', u'Lxn', u'Cysltr1', u'Ncf1', u'Afap1l2', u'Ticam1', u'Rarres2', u'Csf1r', u'Ephx2', u'Cyp26b1', u'Ccr5', u'Tril', u'Ptgs1', u'Cnr2', u'Nlrp1b', u'Tlr2', u'Tlr3', u'Cyba', u'Lyn', u'Tlr9', u'Ly86']), geneids='ENSMUSG00000045322 ENSMUSG00000030793 ENSMUSG00000070390 ENSMUSG00000063415 ENSMUSG00000016528 ENSMUSG00000031639 ENSMUSG00000062585 ENSMUSG00000006519 ENSMUSG00000024927 ENSMUSG00000047557 ENSMUSG00000002603 ENSMUSG00000043496 ENSMUSG00000042286 ENSMUSG00000024621 ENSMUSG00000047250 ENSMUSG00000047123 ENSMUSG00000025083 ENSMUSG00000025225 ENSMUSG00000037731 ENSMUSG00000037860 ENSMUSG00000027087 ENSMUSG00000079227 ENSMUSG00000029468 ENSMUSG00000071203 ENSMUSG00000030341 ENSMUSG00000009281 ENSMUSG00000033538 ENSMUSG00000016495 ENSMUSG00000042228 ENSMUSG00000052821 ENSMUSG00000014599 ENSMUSG00000022040 ENSMUSG00000003283 ENSMUSG00000032691 ENSMUSG00000015950 ENSMUSG00000021423 ENSMUSG00000027995 ENSMUSG00000024778', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=25, D1='F', GO_name='inflammatory response'),
Nt(p_uncorrected=8.913005151044472e-08, study_count=20, study_n=794, namespace='biological_process', p_fdr_bh=9.002729402898319e-05, study_items=set([u'ENSMUSG00000079362', u'ENSMUSG00000073411', u'ENSMUSG00000073421', u'ENSMUSG00000034116', u'ENSMUSG00000045322', u'ENSMUSG00000061132', u'ENSMUSG00000026321', u'ENSMUSG00000036594', u'ENSMUSG00000032690', u'ENSMUSG00000030341', u'ENSMUSG00000061232', u'ENSMUSG00000020941', u'ENSMUSG00000060802', u'ENSMUSG00000027995', u'ENSMUSG00000079547', u'ENSMUSG00000041515', u'ENSMUSG00000022148', u'ENSMUSG00000079227', u'ENSMUSG00000024778', u'ENSMUSG00000039304']), NS='BP', pop_items=set([u'ENSMUSG00000045322', u'ENSMUSG00000026656', u'ENSMUSG00000032661', u'ENSMUSG00000059327', u'ENSMUSG00000060586', u'ENSMUSG00000031778', u'ENSMUSG00000002015', u'ENSMUSG00000061353', u'ENSMUSG00000029371', u'ENSMUSG00000023078', u'ENSMUSG00000021796', u'ENSMUSG00000022074', u'ENSMUSG00000036103', u'ENSMUSG00000079362', u'ENSMUSG00000006014', u'ENSMUSG00000027399', u'ENSMUSG00000029605', u'ENSMUSG00000000982', u'ENSMUSG00000037649', u'ENSMUSG00000045827', u'ENSMUSG00000032251', u'ENSMUSG00000029287', u'ENSMUSG00000020941', u'ENSMUSG00000024778', u'ENSMUSG00000031165', u'ENSMUSG00000022425', u'ENSMUSG00000061132', u'ENSMUSG00000032402', u'ENSMUSG00000036867', u'ENSMUSG00000035352', u'ENSMUSG00000061232', u'ENSMUSG00000034116', u'ENSMUSG00000039217', u'ENSMUSG00000044583', u'ENSMUSG00000034855', u'ENSMUSG00000022637', u'ENSMUSG00000031706', u'ENSMUSG00000037370', u'ENSMUSG00000032369', u'ENSMUSG00000001016', u'ENSMUSG00000017344', u'ENSMUSG00000024610', u'ENSMUSG00000026321', u'ENSMUSG00000034987', u'ENSMUSG00000021508', u'ENSMUSG00000022148', u'ENSMUSG00000079227', u'ENSMUSG00000052384', u'ENSMUSG00000029417', u'ENSMUSG00000056216', u'ENSMUSG00000019122', u'ENSMUSG00000002699', u'ENSMUSG00000073421', u'ENSMUSG00000032508', u'ENSMUSG00000018927', u'ENSMUSG00000079547', u'ENSMUSG00000008318', u'ENSMUSG00000029561', u'ENSMUSG00000056749', u'ENSMUSG00000036469', u'ENSMUSG00000033510', u'ENSMUSG00000028776', u'ENSMUSG00000060802', u'ENSMUSG00000026117', u'ENSMUSG00000032690', u'ENSMUSG00000022468', u'ENSMUSG00000039304', u'ENSMUSG00000018930', u'ENSMUSG00000030339', u'ENSMUSG00000006342', u'ENSMUSG00000028362', u'ENSMUSG00000024793', u'ENSMUSG00000005533', u'ENSMUSG00000041827', u'ENSMUSG00000021846', u'ENSMUSG00000038642', u'ENSMUSG00000039004', u'ENSMUSG00000034394', u'ENSMUSG00000030341', u'ENSMUSG00000016496', u'ENSMUSG00000073411', u'ENSMUSG00000035385', u'ENSMUSG00000027995', u'ENSMUSG00000044827', u'ENSMUSG00000036594', u'ENSMUSG00000027164', u'ENSMUSG00000041515', u'ENSMUSG00000028599']), pop_n=13836, p_sm_bonferroni=0.001350409410434748, is_obsolete=False, GO='GO:0006955', name='immune response', pop_count=88, alt_ids=[], level=2, depth=2, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000079362', u'ENSMUSG00000034116', u'ENSMUSG00000045322', u'ENSMUSG00000030341', u'ENSMUSG00000073421', u'ENSMUSG00000073411', u'ENSMUSG00000060802', u'ENSMUSG00000061132', u'ENSMUSG00000026321', u'ENSMUSG00000036594', u'ENSMUSG00000032690', u'ENSMUSG00000079227', u'ENSMUSG00000020941', u'ENSMUSG00000024778', u'ENSMUSG00000079547', u'ENSMUSG00000041515', u'ENSMUSG00000022148', u'ENSMUSG00000027995', u'ENSMUSG00000061232', u'ENSMUSG00000039304']), symbols=u'B2m Blnk Ccr5 Fas Fyb Gm43302 H2-Aa H2-Ab1 H2-D1 H2-DMb1 H2-K1 Irf8 Map3k14 Oas2 Tlr2 Tlr9 Tnfrsf11a Tnfrsf1a Tnfsf10 Vav1', symbol_set=set([u'Fyb', 'H2-K1', 'H2-D1', u'Oas2', u'Tnfrsf1a', u'Vav1', 'H2-Ab1', u'Tnfsf10', u'Irf8', 'H2-Aa', 'H2-DMb1', u'Tlr2', u'B2m', 'Gm43302', u'Map3k14', u'Tnfrsf11a', u'Ccr5', u'Fas', u'Tlr9', u'Blnk']), geneids='ENSMUSG00000060802 ENSMUSG00000061232 ENSMUSG00000073421 ENSMUSG00000034116 ENSMUSG00000045322 ENSMUSG00000079227 ENSMUSG00000061132 ENSMUSG00000026321 ENSMUSG00000079362 ENSMUSG00000036594 ENSMUSG00000032690 ENSMUSG00000030341 ENSMUSG00000020941 ENSMUSG00000039304 ENSMUSG00000073411 ENSMUSG00000079547 ENSMUSG00000041515 ENSMUSG00000022148 ENSMUSG00000027995 ENSMUSG00000024778', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=107, D1='FL', GO_name='immune response'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0032879', name='', pop_count='', alt_ids='', level=3, depth=3, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=1780, D1='A', GO_name='regulation of localization'),
Nt(p_uncorrected=5.378962372457587e-07, study_count=7, study_n=794, namespace='biological_process', p_fdr_bh=0.0004289297837110785, study_items=set([u'ENSMUSG00000073411', u'ENSMUSG00000029468', u'ENSMUSG00000060802', u'ENSMUSG00000024339', u'ENSMUSG00000067212', u'ENSMUSG00000021871', u'ENSMUSG00000061232']), NS='BP', pop_items=set([u'ENSMUSG00000073411', u'ENSMUSG00000029468', u'ENSMUSG00000060802', u'ENSMUSG00000024339', u'ENSMUSG00000067212', u'ENSMUSG00000021871', u'ENSMUSG00000019998', u'ENSMUSG00000015656', u'ENSMUSG00000016206', u'ENSMUSG00000061232', u'ENSMUSG00000026395']), pop_n=13836, p_sm_bonferroni=0.00814966589051049, is_obsolete=False, GO='GO:0001916', name='positive regulation of T cell mediated cytotoxicity', pop_count=11, alt_ids=[], level=6, depth=9, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000073411', u'ENSMUSG00000029468', u'ENSMUSG00000060802', u'ENSMUSG00000024339', u'ENSMUSG00000067212', u'ENSMUSG00000021871', u'ENSMUSG00000061232']), symbols=u'B2m H2-D1 H2-K1 H2-T23 P2rx7 Pnp Tap2', symbol_set=set(['H2-K1', 'H2-D1', u'Pnp', 'H2-T23', u'B2m', u'Tap2', u'P2rx7']), geneids='ENSMUSG00000073411 ENSMUSG00000029468 ENSMUSG00000060802 ENSMUSG00000024339 ENSMUSG00000067212 ENSMUSG00000021871 ENSMUSG00000061232', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=3, D1='A', GO_name='positive regulation of T cell mediated cytotoxicity'),
Nt(p_uncorrected=1.1717303088250284e-05, study_count=5, study_n=794, namespace='biological_process', p_fdr_bh=0.004931357196946668, study_items=set([u'ENSMUSG00000057948', u'ENSMUSG00000052593', u'ENSMUSG00000053175', u'ENSMUSG00000025225', u'ENSMUSG00000043008']), NS='BP', pop_items=set([u'ENSMUSG00000052593', u'ENSMUSG00000053175', u'ENSMUSG00000043008', u'ENSMUSG00000022508', u'ENSMUSG00000025225', u'ENSMUSG00000057948', u'ENSMUSG00000005583']), pop_n=13836, p_sm_bonferroni=0.17752885909008007, is_obsolete=False, GO='GO:0002467', name='germinal center formation', pop_count=7, alt_ids=[], level=3, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000043008', u'ENSMUSG00000053175', u'ENSMUSG00000052593', u'ENSMUSG00000025225', u'ENSMUSG00000057948']), symbols=u'Adam17 Bcl3 Klhl6 Nfkb2 Unc13d', symbol_set=set([u'Klhl6', u'Nfkb2', u'Unc13d', u'Bcl3', u'Adam17']), geneids='ENSMUSG00000053175 ENSMUSG00000052593 ENSMUSG00000057948 ENSMUSG00000025225 ENSMUSG00000043008', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='EFL', GO_name='germinal center formation'),
Nt(p_uncorrected=1.935958720767685e-06, study_count=26, study_n=794, namespace='biological_process', p_fdr_bh=0.001128142714551969, study_items=set([u'ENSMUSG00000023972', u'ENSMUSG00000058056', u'ENSMUSG00000031995', u'ENSMUSG00000004508', u'ENSMUSG00000024835', u'ENSMUSG00000022568', u'ENSMUSG00000021990', u'ENSMUSG00000002603', u'ENSMUSG00000031805', u'ENSMUSG00000072115', u'ENSMUSG00000036862', u'ENSMUSG00000039286', u'ENSMUSG00000032312', u'ENSMUSG00000027087', u'ENSMUSG00000005087', u'ENSMUSG00000026814', u'ENSMUSG00000033295', u'ENSMUSG00000036995', u'ENSMUSG00000031740', u'ENSMUSG00000009621', u'ENSMUSG00000000957', u'ENSMUSG00000062960', u'ENSMUSG00000020689', u'ENSMUSG00000027654', u'ENSMUSG00000007815', u'ENSMUSG00000021998']), NS='BP', pop_items=set([u'ENSMUSG00000033721', u'ENSMUSG00000026490', u'ENSMUSG00000028024', u'ENSMUSG00000027544', u'ENSMUSG00000061136', u'ENSMUSG00000004508', u'ENSMUSG00000022568', u'ENSMUSG00000019997', u'ENSMUSG00000025351', u'ENSMUSG00000031805', u'ENSMUSG00000029053', u'ENSMUSG00000029648', u'ENSMUSG00000058230', u'ENSMUSG00000002489', u'ENSMUSG00000004263', u'ENSMUSG00000021065', u'ENSMUSG00000015647', u'ENSMUSG00000020592', u'ENSMUSG00000028530', u'ENSMUSG00000026814', u'ENSMUSG00000020422', u'ENSMUSG00000039239', u'ENSMUSG00000030685', u'ENSMUSG00000022791', u'ENSMUSG00000009621', u'ENSMUSG00000018849', u'ENSMUSG00000005958', u'ENSMUSG00000006445', u'ENSMUSG00000027654', u'ENSMUSG00000025608', u'ENSMUSG00000040152', u'ENSMUSG00000043061', u'ENSMUSG00000037643', u'ENSMUSG00000030967', u'ENSMUSG00000032312', u'ENSMUSG00000025510', u'ENSMUSG00000021763', u'ENSMUSG00000052504', u'ENSMUSG00000020745', u'ENSMUSG00000055447', u'ENSMUSG00000013663', u'ENSMUSG00000021338', u'ENSMUSG00000027954', u'ENSMUSG00000021994', u'ENSMUSG00000026782', u'ENSMUSG00000020282', u'ENSMUSG00000029231', u'ENSMUSG00000001847', u'ENSMUSG00000037014', u'ENSMUSG00000027009', u'ENSMUSG00000027646', u'ENSMUSG00000004364', u'ENSMUSG00000020135', u'ENSMUSG00000036862', u'ENSMUSG00000039286', u'ENSMUSG00000004933', u'ENSMUSG00000027087', u'ENSMUSG00000062209', u'ENSMUSG00000072115', u'ENSMUSG00000026837', u'ENSMUSG00000066877', u'ENSMUSG00000001552', u'ENSMUSG00000036995', u'ENSMUSG00000031626', u'ENSMUSG00000008475', u'ENSMUSG00000000957', u'ENSMUSG00000031555', u'ENSMUSG00000047139', u'ENSMUSG00000019889', u'ENSMUSG00000017615', u'ENSMUSG00000025239', u'ENSMUSG00000040732', u'ENSMUSG00000032475', u'ENSMUSG00000045180', u'ENSMUSG00000001300', u'ENSMUSG00000021877', u'ENSMUSG00000025437', u'ENSMUSG00000037902', u'ENSMUSG00000056481', u'ENSMUSG00000020121', u'ENSMUSG00000007815', u'ENSMUSG00000074305', u'ENSMUSG00000033444', u'ENSMUSG00000050295', u'ENSMUSG00000049313', u'ENSMUSG00000032740', u'ENSMUSG00000026596', u'ENSMUSG00000017670', u'ENSMUSG00000024835', u'ENSMUSG00000021998', u'ENSMUSG00000022528', u'ENSMUSG00000018736', u'ENSMUSG00000021990', u'ENSMUSG00000058571', u'ENSMUSG00000030707', u'ENSMUSG00000027239', u'ENSMUSG00000022200', u'ENSMUSG00000002603', u'ENSMUSG00000024620', u'ENSMUSG00000029787', u'ENSMUSG00000031785', u'ENSMUSG00000024232', u'ENSMUSG00000000148', u'ENSMUSG00000000631', u'ENSMUSG00000042453', u'ENSMUSG00000032175', u'ENSMUSG00000025429', u'ENSMUSG00000061665', u'ENSMUSG00000026755', u'ENSMUSG00000005087', u'ENSMUSG00000039629', u'ENSMUSG00000026587', u'ENSMUSG00000025809', u'ENSMUSG00000024122', u'ENSMUSG00000023972', u'ENSMUSG00000058056', u'ENSMUSG00000002900', u'ENSMUSG00000023008', u'ENSMUSG00000022602', u'ENSMUSG00000025810', u'ENSMUSG00000062991', u'ENSMUSG00000028701', u'ENSMUSG00000020149', u'ENSMUSG00000014791', u'ENSMUSG00000028969', u'ENSMUSG00000041112', u'ENSMUSG00000030134', u'ENSMUSG00000016933', u'ENSMUSG00000072437', u'ENSMUSG00000039637', u'ENSMUSG00000040990', u'ENSMUSG00000031995', u'ENSMUSG00000031990', u'ENSMUSG00000038976', u'ENSMUSG00000045382', u'ENSMUSG00000028514', u'ENSMUSG00000022505', u'ENSMUSG00000058325', u'ENSMUSG00000026478', u'ENSMUSG00000024486', u'ENSMUSG00000030720', u'ENSMUSG00000040511', u'ENSMUSG00000022261', u'ENSMUSG00000033295', u'ENSMUSG00000025743', u'ENSMUSG00000025089', u'ENSMUSG00000022812', u'ENSMUSG00000045092', u'ENSMUSG00000021279', u'ENSMUSG00000057177', u'ENSMUSG00000005871', u'ENSMUSG00000031740', u'ENSMUSG00000024789', u'ENSMUSG00000029581', u'ENSMUSG00000025348', u'ENSMUSG00000062960', u'ENSMUSG00000023951', u'ENSMUSG00000020689', u'ENSMUSG00000033392', u'ENSMUSG00000031955', u'ENSMUSG00000050357', u'ENSMUSG00000024304', u'ENSMUSG00000032322']), pop_n=13836, p_sm_bonferroni=0.029331710578351194, is_obsolete=False, GO='GO:0016477', name='cell migration', pop_count=163, alt_ids=[], level=3, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000058056', u'ENSMUSG00000026814', u'ENSMUSG00000072115', u'ENSMUSG00000000957', u'ENSMUSG00000005087', u'ENSMUSG00000036995', u'ENSMUSG00000031995', u'ENSMUSG00000004508', u'ENSMUSG00000024835', u'ENSMUSG00000022568', u'ENSMUSG00000023972', u'ENSMUSG00000021990', u'ENSMUSG00000020689', u'ENSMUSG00000027654', u'ENSMUSG00000007815', u'ENSMUSG00000031740', u'ENSMUSG00000002603', u'ENSMUSG00000062960', u'ENSMUSG00000031805', u'ENSMUSG00000033295', u'ENSMUSG00000036862', u'ENSMUSG00000039286', u'ENSMUSG00000009621', u'ENSMUSG00000032312', u'ENSMUSG00000027087', u'ENSMUSG00000021998']), symbols=u'Ang Asap3 Cd44 Coro1b Csk Dchs1 Eng Fam83d Fndc3b Gab2 Itgav Itgb3 Jak3 Kdr Lcp1 Mmp14 Mmp2 Palld Ptk7 Ptprf Rhoa Scrib Spata13 St14 Tgfb1 Vav2', symbol_set=set([u'Spata13', u'Csk', u'Eng', u'Gab2', u'Coro1b', u'Lcp1', u'Asap3', u'Dchs1', u'Tgfb1', u'Itgb3', u'Vav2', u'Palld', u'Itgav', u'Fndc3b', u'Mmp2', u'Fam83d', u'Mmp14', u'Ptk7', u'Rhoa', u'Ang', u'Cd44', u'Scrib', u'Ptprf', u'St14', u'Jak3', u'Kdr']), geneids='ENSMUSG00000000957 ENSMUSG00000058056 ENSMUSG00000031995 ENSMUSG00000004508 ENSMUSG00000024835 ENSMUSG00000021998 ENSMUSG00000021990 ENSMUSG00000002603 ENSMUSG00000031805 ENSMUSG00000072115 ENSMUSG00000036862 ENSMUSG00000039286 ENSMUSG00000032312 ENSMUSG00000027087 ENSMUSG00000005087 ENSMUSG00000026814 ENSMUSG00000033295 ENSMUSG00000036995 ENSMUSG00000031740 ENSMUSG00000009621 ENSMUSG00000023972 ENSMUSG00000062960 ENSMUSG00000020689 ENSMUSG00000027654 ENSMUSG00000007815 ENSMUSG00000022568', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=236, D1='BCM', GO_name='cell migration'),
Nt(p_uncorrected=1.5494522500375623e-06, study_count=20, study_n=794, namespace='biological_process', p_fdr_bh=0.0009390300416127642, study_items=set([u'ENSMUSG00000056144', u'ENSMUSG00000024349', u'ENSMUSG00000029826', u'ENSMUSG00000009585', u'ENSMUSG00000045322', u'ENSMUSG00000027639', u'ENSMUSG00000026928', u'ENSMUSG00000033450', u'ENSMUSG00000000275', u'ENSMUSG00000040264', u'ENSMUSG00000032690', u'ENSMUSG00000047123', u'ENSMUSG00000030793', u'ENSMUSG00000057948', u'ENSMUSG00000031639', u'ENSMUSG00000025492', u'ENSMUSG00000037921', u'ENSMUSG00000032691', u'ENSMUSG00000043279', u'ENSMUSG00000062488']), NS='BP', pop_items=set([u'ENSMUSG00000029826', u'ENSMUSG00000045322', u'ENSMUSG00000070583', u'ENSMUSG00000024079', u'ENSMUSG00000009585', u'ENSMUSG00000020455', u'ENSMUSG00000024978', u'ENSMUSG00000021494', u'ENSMUSG00000022476', u'ENSMUSG00000066568', u'ENSMUSG00000061298', u'ENSMUSG00000017707', u'ENSMUSG00000037523', u'ENSMUSG00000038628', u'ENSMUSG00000020115', u'ENSMUSG00000000776', u'ENSMUSG00000026395', u'ENSMUSG00000056144', u'ENSMUSG00000039236', u'ENSMUSG00000066800', u'ENSMUSG00000029605', u'ENSMUSG00000026896', u'ENSMUSG00000061286', u'ENSMUSG00000078566', u'ENSMUSG00000003234', u'ENSMUSG00000045932', u'ENSMUSG00000020108', u'ENSMUSG00000000275', u'ENSMUSG00000027951', u'ENSMUSG00000036986', u'ENSMUSG00000056851', u'ENSMUSG00000041415', u'ENSMUSG00000035834', u'ENSMUSG00000003184', u'ENSMUSG00000047123', u'ENSMUSG00000036908', u'ENSMUSG00000057948', u'ENSMUSG00000021703', u'ENSMUSG00000022901', u'ENSMUSG00000030789', u'ENSMUSG00000044583', u'ENSMUSG00000034855', u'ENSMUSG00000024349', u'ENSMUSG00000032369', u'ENSMUSG00000035692', u'ENSMUSG00000034459', u'ENSMUSG00000034453', u'ENSMUSG00000022865', u'ENSMUSG00000020641', u'ENSMUSG00000040033', u'ENSMUSG00000025532', u'ENSMUSG00000000386', u'ENSMUSG00000079339', u'ENSMUSG00000032661', u'ENSMUSG00000029417', u'ENSMUSG00000030314', u'ENSMUSG00000027427', u'ENSMUSG00000026928', u'ENSMUSG00000046718', u'ENSMUSG00000030793', u'ENSMUSG00000074896', u'ENSMUSG00000031639', u'ENSMUSG00000062488', u'ENSMUSG00000028099', u'ENSMUSG00000022971', u'ENSMUSG00000032178', u'ENSMUSG00000030880', u'ENSMUSG00000035086', u'ENSMUSG00000027514', u'ENSMUSG00000033450', u'ENSMUSG00000030247', u'ENSMUSG00000029561', u'ENSMUSG00000029165', u'ENSMUSG00000020783', u'ENSMUSG00000030249', u'ENSMUSG00000025492', u'ENSMUSG00000027598', u'ENSMUSG00000032344', u'ENSMUSG00000057329', u'ENSMUSG00000027639', u'ENSMUSG00000040296', u'ENSMUSG00000032690', u'ENSMUSG00000032691', u'ENSMUSG00000022965', u'ENSMUSG00000022967', u'ENSMUSG00000019726', u'ENSMUSG00000022969', u'ENSMUSG00000037921', u'ENSMUSG00000029771', u'ENSMUSG00000020009', u'ENSMUSG00000040264', u'ENSMUSG00000043279', u'ENSMUSG00000024810', u'ENSMUSG00000041827', u'ENSMUSG00000022051', u'ENSMUSG00000017830', u'ENSMUSG00000023341', u'ENSMUSG00000040613', u'ENSMUSG00000018899', u'ENSMUSG00000060591', u'ENSMUSG00000010047', u'ENSMUSG00000005102', u'ENSMUSG00000034259', u'ENSMUSG00000038884']), pop_n=13836, p_sm_bonferroni=0.023475751040319105, is_obsolete=False, GO='GO:0051607', name='defense response to virus', pop_count=104, alt_ids=[], level=3, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000056144', u'ENSMUSG00000040264', u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000033450', u'ENSMUSG00000025492', u'ENSMUSG00000030793', u'ENSMUSG00000031639', u'ENSMUSG00000009585', u'ENSMUSG00000032691', u'ENSMUSG00000062488', u'ENSMUSG00000024349', u'ENSMUSG00000027639', u'ENSMUSG00000029826', u'ENSMUSG00000000275', u'ENSMUSG00000043279', u'ENSMUSG00000032690', u'ENSMUSG00000047123', u'ENSMUSG00000057948', u'ENSMUSG00000037921']), symbols=u'Apobec3 Card9 Ddx60 Gbp2b Ifit3b Ifitm3 Nlrp3 Oas2 Pycard Samhd1 Tagap Ticam1 Tlr3 Tlr9 Tmem173 Trim25 Trim34a Trim56 Unc13d Zc3hav1', symbol_set=set(['Gbp2b', u'Ifitm3', u'Trim56', u'Samhd1', u'Nlrp3', u'Tagap', u'Ticam1', u'Trim25', u'Ddx60', u'Unc13d', 'Ifit3b', u'Apobec3', u'Tlr3', u'Card9', u'Oas2', u'Zc3hav1', u'Tmem173', u'Pycard', u'Tlr9', u'Trim34a']), geneids='ENSMUSG00000056144 ENSMUSG00000024349 ENSMUSG00000033450 ENSMUSG00000040264 ENSMUSG00000045322 ENSMUSG00000027639 ENSMUSG00000026928 ENSMUSG00000029826 ENSMUSG00000000275 ENSMUSG00000025492 ENSMUSG00000030793 ENSMUSG00000032691 ENSMUSG00000032690 ENSMUSG00000057948 ENSMUSG00000031639 ENSMUSG00000009585 ENSMUSG00000037921 ENSMUSG00000047123 ENSMUSG00000043279 ENSMUSG00000062488', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=3, D1='FL', GO_name='defense response to virus'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0048870', name='', pop_count='', alt_ids='', level=2, depth=4, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=261, D1='BCM', GO_name='cell motility'),
Nt(p_uncorrected=2.7763769242448692e-05, study_count=11, study_n=794, namespace='biological_process', p_fdr_bh=0.011069707047166847, study_items=set([u'ENSMUSG00000032902', u'ENSMUSG00000048779', u'ENSMUSG00000024349', u'ENSMUSG00000029468', u'ENSMUSG00000002603', u'ENSMUSG00000004040', u'ENSMUSG00000036362', u'ENSMUSG00000004056', u'ENSMUSG00000036353', u'ENSMUSG00000002459', u'ENSMUSG00000052681']), NS='BP', pop_items=set([u'ENSMUSG00000031284', u'ENSMUSG00000030602', u'ENSMUSG00000022781', u'ENSMUSG00000003031', u'ENSMUSG00000068798', u'ENSMUSG00000052681', u'ENSMUSG00000030059', u'ENSMUSG00000028766', u'ENSMUSG00000034957', u'ENSMUSG00000027765', u'ENSMUSG00000022556', u'ENSMUSG00000002603', u'ENSMUSG00000031681', u'ENSMUSG00000004056', u'ENSMUSG00000036353', u'ENSMUSG00000027509', u'ENSMUSG00000026104', u'ENSMUSG00000021025', u'ENSMUSG00000001729', u'ENSMUSG00000027358', u'ENSMUSG00000032902', u'ENSMUSG00000048779', u'ENSMUSG00000027796', u'ENSMUSG00000029468', u'ENSMUSG00000021540', u'ENSMUSG00000039217', u'ENSMUSG00000057177', u'ENSMUSG00000035352', u'ENSMUSG00000026185', u'ENSMUSG00000020516', u'ENSMUSG00000024349', u'ENSMUSG00000024087', u'ENSMUSG00000030774', u'ENSMUSG00000004043', u'ENSMUSG00000004040', u'ENSMUSG00000048402', u'ENSMUSG00000036362', u'ENSMUSG00000025217', u'ENSMUSG00000024182', u'ENSMUSG00000028163', u'ENSMUSG00000002459', u'ENSMUSG00000002458', u'ENSMUSG00000000142', u'ENSMUSG00000034640']), pop_n=13836, p_sm_bonferroni=0.4206488677923401, is_obsolete=False, GO='GO:0071407', name='cellular response to organic cyclic compound', pop_count=44, alt_ids=[], level=5, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000032902', u'ENSMUSG00000048779', u'ENSMUSG00000024349', u'ENSMUSG00000029468', u'ENSMUSG00000002603', u'ENSMUSG00000004040', u'ENSMUSG00000036362', u'ENSMUSG00000004056', u'ENSMUSG00000036353', u'ENSMUSG00000002459', u'ENSMUSG00000052681']), symbols=u'Akt2 P2rx7 P2ry12 P2ry13 P2ry6 Rap1b Rgs20 Slc16a1 Stat3 Tgfb1 Tmem173', symbol_set=set([u'Tgfb1', u'Rgs20', u'Stat3', u'Slc16a1', u'Rap1b', u'P2ry12', u'P2ry13', u'Akt2', u'P2ry6', u'Tmem173', u'P2rx7']), geneids='ENSMUSG00000032902 ENSMUSG00000048779 ENSMUSG00000024349 ENSMUSG00000029468 ENSMUSG00000002603 ENSMUSG00000004040 ENSMUSG00000036362 ENSMUSG00000004056 ENSMUSG00000036353 ENSMUSG00000002459 ENSMUSG00000052681', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=122, D1='BF', GO_name='cellular response to organic cyclic compound'),
Nt(p_uncorrected='', study_count='', study_n='', namespace='', p_fdr_bh='', study_items='', NS='BP', pop_items='', pop_n='', p_sm_bonferroni='', is_obsolete='', GO='GO:0050896', name='', pop_count='', alt_ids='', level=1, depth=1, enrichment='', Cluster='', geneid_set='', symbols='', symbol_set='', geneids='', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=False, hdr1usr01='*', dcnt=2241, D1='F', GO_name='response to stimulus'),
Nt(p_uncorrected=2.5263641063287124e-06, study_count=7, study_n=794, namespace='biological_process', p_fdr_bh=0.0013251369739822624, study_items=set([u'ENSMUSG00000025888', u'ENSMUSG00000029468', u'ENSMUSG00000030793', u'ENSMUSG00000032691', u'ENSMUSG00000033538', u'ENSMUSG00000037860', u'ENSMUSG00000079227']), NS='BP', pop_items=set([u'ENSMUSG00000000982', u'ENSMUSG00000025888', u'ENSMUSG00000029468', u'ENSMUSG00000030793', u'ENSMUSG00000032691', u'ENSMUSG00000066551', u'ENSMUSG00000033538', u'ENSMUSG00000031934', u'ENSMUSG00000022967', u'ENSMUSG00000021994', u'ENSMUSG00000037860', u'ENSMUSG00000022575', u'ENSMUSG00000079227']), pop_n=13836, p_sm_bonferroni=0.038276942574986324, is_obsolete=False, GO='GO:0050718', name='positive regulation of interleukin-1 beta secretion', pop_count=13, alt_ids=[], level=8, depth=11, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000025888', u'ENSMUSG00000033538', u'ENSMUSG00000029468', u'ENSMUSG00000037860', u'ENSMUSG00000079227', u'ENSMUSG00000030793', u'ENSMUSG00000032691']), symbols=u'Aim2 Casp1 Casp4 Ccr5 Nlrp3 P2rx7 Pycard', symbol_set=set([u'Casp4', u'Nlrp3', u'Casp1', u'Aim2', u'Ccr5', u'Pycard', u'P2rx7']), geneids='ENSMUSG00000025888 ENSMUSG00000029468 ENSMUSG00000030793 ENSMUSG00000032691 ENSMUSG00000033538 ENSMUSG00000037860 ENSMUSG00000079227', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='A', GO_name='positive regulation of interleukin-1 beta secretion'),
Nt(p_uncorrected=4.858068668518285e-10, study_count=16, study_n=794, namespace='biological_process', p_fdr_bh=1.4720919679344106e-06, study_items=set([u'ENSMUSG00000040264', u'ENSMUSG00000052593', u'ENSMUSG00000038058', u'ENSMUSG00000060802', u'ENSMUSG00000072115', u'ENSMUSG00000026928', u'ENSMUSG00000067212', u'ENSMUSG00000035279', u'ENSMUSG00000062210', u'ENSMUSG00000003283', u'ENSMUSG00000015950', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000027995', u'ENSMUSG00000029298', u'ENSMUSG00000029468']), NS='BP', pop_items=set([u'ENSMUSG00000058385', u'ENSMUSG00000040264', u'ENSMUSG00000015950', u'ENSMUSG00000028874', u'ENSMUSG00000026928', u'ENSMUSG00000067212', u'ENSMUSG00000035279', u'ENSMUSG00000028362', u'ENSMUSG00000062727', u'ENSMUSG00000028268', u'ENSMUSG00000022191', u'ENSMUSG00000022575', u'ENSMUSG00000052593', u'ENSMUSG00000032508', u'ENSMUSG00000069516', u'ENSMUSG00000079641', u'ENSMUSG00000016024', u'ENSMUSG00000020115', u'ENSMUSG00000041135', u'ENSMUSG00000029298', u'ENSMUSG00000054717', u'ENSMUSG00000029468', u'ENSMUSG00000072115', u'ENSMUSG00000068854', u'ENSMUSG00000040253', u'ENSMUSG00000027695', u'ENSMUSG00000037071', u'ENSMUSG00000021194', u'ENSMUSG00000003283', u'ENSMUSG00000028270', u'ENSMUSG00000018102', u'ENSMUSG00000062210', u'ENSMUSG00000006445', u'ENSMUSG00000060802', u'ENSMUSG00000027995', u'ENSMUSG00000047246', u'ENSMUSG00000020399', u'ENSMUSG00000038058', u'ENSMUSG00000024300', u'ENSMUSG00000079614', u'ENSMUSG00000032041', u'ENSMUSG00000069268']), pop_n=13836, p_sm_bonferroni=7.360459839672053e-06, is_obsolete=False, GO='GO:0050830', name='defense response to Gram-positive bacterium', pop_count=42, alt_ids=[], level=6, depth=7, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000040264', u'ENSMUSG00000015950', u'ENSMUSG00000072115', u'ENSMUSG00000026928', u'ENSMUSG00000067212', u'ENSMUSG00000035279', u'ENSMUSG00000062210', u'ENSMUSG00000003283', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000029468', u'ENSMUSG00000052593', u'ENSMUSG00000060802', u'ENSMUSG00000027995', u'ENSMUSG00000038058', u'ENSMUSG00000029298']), symbols=u'Adam17 Ang B2m Card9 Gbp2 Gbp2b Gbp3 Gbp9 H2-T23 Hck Ncf1 Nod1 P2rx7 Ssc5d Tlr2 Tnfaip8', symbol_set=set(['Gbp2b', u'Ncf1', u'Ang', u'Tnfaip8', 'Ssc5d', u'Gbp3', u'Gbp2', 'H2-T23', u'B2m', u'Tlr2', u'Card9', u'Nod1', u'Adam17', u'Hck', u'Gbp9', u'P2rx7']), geneids='ENSMUSG00000040264 ENSMUSG00000052593 ENSMUSG00000029468 ENSMUSG00000060802 ENSMUSG00000072115 ENSMUSG00000026928 ENSMUSG00000027995 ENSMUSG00000035279 ENSMUSG00000067212 ENSMUSG00000062210 ENSMUSG00000003283 ENSMUSG00000015950 ENSMUSG00000028270 ENSMUSG00000028268 ENSMUSG00000038058 ENSMUSG00000029298', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=0, D1='F', GO_name='defense response to Gram-positive bacterium'),
Nt(p_uncorrected=1.3518814314355716e-10, study_count=61, study_n=794, namespace='biological_process', p_fdr_bh=5.120588891920086e-07, study_items=set([u'ENSMUSG00000040264', u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000067212', u'ENSMUSG00000035279', u'ENSMUSG00000030793', u'ENSMUSG00000070390', u'ENSMUSG00000055172', u'ENSMUSG00000031639', u'ENSMUSG00000009585', u'ENSMUSG00000031805', u'ENSMUSG00000047798', u'ENSMUSG00000024338', u'ENSMUSG00000024339', u'ENSMUSG00000043496', u'ENSMUSG00000029826', u'ENSMUSG00000024621', u'ENSMUSG00000073421', u'ENSMUSG00000047123', u'ENSMUSG00000024235', u'ENSMUSG00000027508', u'ENSMUSG00000071203', u'ENSMUSG00000079547', u'ENSMUSG00000025492', u'ENSMUSG00000037860', u'ENSMUSG00000036905', u'ENSMUSG00000032312', u'ENSMUSG00000061232', u'ENSMUSG00000038213', u'ENSMUSG00000020707', u'ENSMUSG00000028191', u'ENSMUSG00000057058', u'ENSMUSG00000037731', u'ENSMUSG00000024349', u'ENSMUSG00000030263', u'ENSMUSG00000003283', u'ENSMUSG00000032691', u'ENSMUSG00000033538', u'ENSMUSG00000020437', u'ENSMUSG00000029217', u'ENSMUSG00000062593', u'ENSMUSG00000042228', u'ENSMUSG00000036896', u'ENSMUSG00000015947', u'ENSMUSG00000030748', u'ENSMUSG00000021583', u'ENSMUSG00000073411', u'ENSMUSG00000060802', u'ENSMUSG00000029798', u'ENSMUSG00000014599', u'ENSMUSG00000027995', u'ENSMUSG00000027639', u'ENSMUSG00000000275', u'ENSMUSG00000036594', u'ENSMUSG00000032690', u'ENSMUSG00000040751', u'ENSMUSG00000038058', u'ENSMUSG00000021423', u'ENSMUSG00000031838', u'ENSMUSG00000043279', u'ENSMUSG00000060550']), NS='BP', pop_items=set([u'ENSMUSG00000035834', u'ENSMUSG00000045322', u'ENSMUSG00000028874', u'ENSMUSG00000004730', u'ENSMUSG00000021703', u'ENSMUSG00000024079', u'ENSMUSG00000074151', u'ENSMUSG00000060586', u'ENSMUSG00000016481', u'ENSMUSG00000024338', u'ENSMUSG00000055172', u'ENSMUSG00000036887', u'ENSMUSG00000009585', u'ENSMUSG00000031805', u'ENSMUSG00000022476', u'ENSMUSG00000024371', u'ENSMUSG00000042228', u'ENSMUSG00000017707', u'ENSMUSG00000032035', u'ENSMUSG00000071369', u'ENSMUSG00000026399', u'ENSMUSG00000037523', u'ENSMUSG00000038628', u'ENSMUSG00000020115', u'ENSMUSG00000000776', u'ENSMUSG00000019843', u'ENSMUSG00000015217', u'ENSMUSG00000035629', u'ENSMUSG00000039748', u'ENSMUSG00000039236', u'ENSMUSG00000049686', u'ENSMUSG00000024164', u'ENSMUSG00000035279', u'ENSMUSG00000024045', u'ENSMUSG00000038128', u'ENSMUSG00000075705', u'ENSMUSG00000041187', u'ENSMUSG00000038517', u'ENSMUSG00000046034', u'ENSMUSG00000062593', u'ENSMUSG00000029605', u'ENSMUSG00000026896', u'ENSMUSG00000037321', u'ENSMUSG00000037649', u'ENSMUSG00000014599', u'ENSMUSG00000000275', u'ENSMUSG00000024789', u'ENSMUSG00000051439', u'ENSMUSG00000038058', u'ENSMUSG00000025512', u'ENSMUSG00000050199', u'ENSMUSG00000034889', u'ENSMUSG00000024621', u'ENSMUSG00000027951', u'ENSMUSG00000020437', u'ENSMUSG00000070390', u'ENSMUSG00000036986', u'ENSMUSG00000030751', u'ENSMUSG00000025139', u'ENSMUSG00000000134', u'ENSMUSG00000026883', u'ENSMUSG00000015837', u'ENSMUSG00000032691', u'ENSMUSG00000018446', u'ENSMUSG00000018899', u'ENSMUSG00000028885', u'ENSMUSG00000029826', u'ENSMUSG00000028633', u'ENSMUSG00000027646', u'ENSMUSG00000003184', u'ENSMUSG00000047123', u'ENSMUSG00000024767', u'ENSMUSG00000016024', u'ENSMUSG00000054072', u'ENSMUSG00000036908', u'ENSMUSG00000037860', u'ENSMUSG00000036905', u'ENSMUSG00000032312', u'ENSMUSG00000061232', u'ENSMUSG00000001123', u'ENSMUSG00000031392', u'ENSMUSG00000028099', u'ENSMUSG00000055204', u'ENSMUSG00000022901', u'ENSMUSG00000045932', u'ENSMUSG00000033777', u'ENSMUSG00000059883', u'ENSMUSG00000027598', u'ENSMUSG00000030263', u'ENSMUSG00000039936', u'ENSMUSG00000003283', u'ENSMUSG00000051212', u'ENSMUSG00000044583', u'ENSMUSG00000023915', u'ENSMUSG00000002688', u'ENSMUSG00000022636', u'ENSMUSG00000021583', u'ENSMUSG00000078942', u'ENSMUSG00000031838', u'ENSMUSG00000078945', u'ENSMUSG00000025702', u'ENSMUSG00000027639', u'ENSMUSG00000033307', u'ENSMUSG00000024610', u'ENSMUSG00000000787', u'ENSMUSG00000034453', u'ENSMUSG00000026288', u'ENSMUSG00000028793', u'ENSMUSG00000034218', u'ENSMUSG00000023990', u'ENSMUSG00000025532', u'ENSMUSG00000060550', u'ENSMUSG00000000386', u'ENSMUSG00000052384', u'ENSMUSG00000070034', u'ENSMUSG00000028064', u'ENSMUSG00000027427', u'ENSMUSG00000026928', u'ENSMUSG00000000732', u'ENSMUSG00000020399', u'ENSMUSG00000046718', u'ENSMUSG00000013707', u'ENSMUSG00000030793', u'ENSMUSG00000021457', u'ENSMUSG00000074896', u'ENSMUSG00000024948', u'ENSMUSG00000031639', u'ENSMUSG00000020823', u'ENSMUSG00000025498', u'ENSMUSG00000047798', u'ENSMUSG00000026285', u'ENSMUSG00000032508', u'ENSMUSG00000024339', u'ENSMUSG00000002602', u'ENSMUSG00000053647', u'ENSMUSG00000060591', u'ENSMUSG00000041439', u'ENSMUSG00000032076', u'ENSMUSG00000024235', u'ENSMUSG00000001150', u'ENSMUSG00000027508', u'ENSMUSG00000036896', u'ENSMUSG00000071203', u'ENSMUSG00000079547', u'ENSMUSG00000030880', u'ENSMUSG00000041135', u'ENSMUSG00000024392', u'ENSMUSG00000022575', u'ENSMUSG00000021624', u'ENSMUSG00000020707', u'ENSMUSG00000038213', u'ENSMUSG00000057058', u'ENSMUSG00000027514', u'ENSMUSG00000037731', u'ENSMUSG00000030122', u'ENSMUSG00000024349', u'ENSMUSG00000033454', u'ENSMUSG00000022514', u'ENSMUSG00000022887', u'ENSMUSG00000036469', u'ENSMUSG00000023973', u'ENSMUSG00000043496', u'ENSMUSG00000020573', u'ENSMUSG00000034652', u'ENSMUSG00000036712', u'ENSMUSG00000025492', u'ENSMUSG00000032109', u'ENSMUSG00000060802', u'ENSMUSG00000029798', u'ENSMUSG00000032344', u'ENSMUSG00000059456', u'ENSMUSG00000026117', u'ENSMUSG00000040296', u'ENSMUSG00000032690', u'ENSMUSG00000040751', u'ENSMUSG00000032905', u'ENSMUSG00000020476', u'ENSMUSG00000004707', u'ENSMUSG00000038160', u'ENSMUSG00000038495', u'ENSMUSG00000001128', u'ENSMUSG00000029771', u'ENSMUSG00000058818', u'ENSMUSG00000040264', u'ENSMUSG00000056851', u'ENSMUSG00000032661', u'ENSMUSG00000067212', u'ENSMUSG00000038521', u'ENSMUSG00000036594', u'ENSMUSG00000031750', u'ENSMUSG00000022508', u'ENSMUSG00000073421', u'ENSMUSG00000041827', u'ENSMUSG00000050335', u'ENSMUSG00000044811', u'ENSMUSG00000026471', u'ENSMUSG00000017830', u'ENSMUSG00000019256', u'ENSMUSG00000023341', u'ENSMUSG00000023224', u'ENSMUSG00000015947', u'ENSMUSG00000020641', u'ENSMUSG00000026778', u'ENSMUSG00000039005', u'ENSMUSG00000026648', u'ENSMUSG00000028059', u'ENSMUSG00000028191', u'ENSMUSG00000054717', u'ENSMUSG00000038260', u'ENSMUSG00000046879', u'ENSMUSG00000021277', u'ENSMUSG00000030748', u'ENSMUSG00000033538', u'ENSMUSG00000028291', u'ENSMUSG00000029217', u'ENSMUSG00000051256', u'ENSMUSG00000066839', u'ENSMUSG00000073411', u'ENSMUSG00000005102', u'ENSMUSG00000029561', u'ENSMUSG00000029915', u'ENSMUSG00000027995', u'ENSMUSG00000044827', u'ENSMUSG00000043279', u'ENSMUSG00000027164', u'ENSMUSG00000026365', u'ENSMUSG00000032322', u'ENSMUSG00000032041', u'ENSMUSG00000034459', u'ENSMUSG00000045038', u'ENSMUSG00000021423', u'ENSMUSG00000038147', u'ENSMUSG00000052889', u'ENSMUSG00000050132', u'ENSMUSG00000032737']), pop_n=13836, p_sm_bonferroni=2.048235556768035e-06, is_obsolete=False, GO='GO:0002376', name='immune system process', pop_count=235, alt_ids=[], level=1, depth=1, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000040264', u'ENSMUSG00000040751', u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000067212', u'ENSMUSG00000035279', u'ENSMUSG00000030793', u'ENSMUSG00000036594', u'ENSMUSG00000055172', u'ENSMUSG00000031639', u'ENSMUSG00000009585', u'ENSMUSG00000047123', u'ENSMUSG00000073411', u'ENSMUSG00000062593', u'ENSMUSG00000047798', u'ENSMUSG00000024338', u'ENSMUSG00000024339', u'ENSMUSG00000043496', u'ENSMUSG00000029826', u'ENSMUSG00000024621', u'ENSMUSG00000031805', u'ENSMUSG00000024235', u'ENSMUSG00000027508', u'ENSMUSG00000071203', u'ENSMUSG00000079547', u'ENSMUSG00000037860', u'ENSMUSG00000036905', u'ENSMUSG00000032312', u'ENSMUSG00000061232', u'ENSMUSG00000038213', u'ENSMUSG00000020707', u'ENSMUSG00000028191', u'ENSMUSG00000057058', u'ENSMUSG00000037731', u'ENSMUSG00000024349', u'ENSMUSG00000030263', u'ENSMUSG00000003283', u'ENSMUSG00000033538', u'ENSMUSG00000020437', u'ENSMUSG00000029217', u'ENSMUSG00000073421', u'ENSMUSG00000042228', u'ENSMUSG00000036896', u'ENSMUSG00000015947', u'ENSMUSG00000030748', u'ENSMUSG00000021583', u'ENSMUSG00000025492', u'ENSMUSG00000060802', u'ENSMUSG00000029798', u'ENSMUSG00000014599', u'ENSMUSG00000027995', u'ENSMUSG00000027639', u'ENSMUSG00000000275', u'ENSMUSG00000070390', u'ENSMUSG00000032690', u'ENSMUSG00000032691', u'ENSMUSG00000038058', u'ENSMUSG00000021423', u'ENSMUSG00000031838', u'ENSMUSG00000043279', u'ENSMUSG00000060550']), symbols=u'Aim2 Apobec3 B2m Bcl10 C1qb C1qc C1ra Card9 Casp4 Cd300lf Csf1 Csf1r Csk Erap1 Fcgr1 Gbp2b H2-Aa H2-Ab1 H2-D1 H2-DMb1 H2-K1 H2-Q7 H2-T23 Hck Herc6 Ifi30 Ifitm3 Il4ra Jak3 Lat2 Lilrb4a Lrmp Ly86 Lyn Map3k8 Myo1g Naip5 Nlrp1b Nlrp3 Nod1 Oas2 Pag1 Psmb8 Pycard Rnf135 Samhd1 Skap1 Ssc5d Tap2 Tapbpl Tec Themis2 Ticam1 Tlr2 Tlr3 Tlr9 Tmem173 Tril Trim25 Trim56 Zc3hav1', symbol_set=set(['Gbp2b', u'Ifitm3', u'Casp4', u'Tec', u'Csf1', u'Csk', u'Tapbpl', u'Naip5', u'Pag1', u'Ifi30', u'Aim2', u'Apobec3', u'Card9', 'H2-D1', u'Hck', u'Psmb8', u'Pycard', u'Bcl10', u'Ticam1', 'H2-K1', 'Ssc5d', 'H2-T23', u'Skap1', u'C1qc', u'C1qb', u'Cd300lf', u'B2m', u'Oas2', u'Herc6', u'Csf1r', u'Lrmp', u'Trim56', u'Tmem173', u'Zc3hav1', u'Map3k8', u'Tap2', u'Il4ra', u'Nlrp3', u'Lat2', 'Lilrb4a', 'H2-Q7', u'Ly86', 'H2-Aa', 'H2-DMb1', u'Tril', u'Erap1', u'Samhd1', u'Nlrp1b', u'Myo1g', 'H2-Ab1', u'C1ra', u'Tlr2', u'Tlr3', 'Themis2', u'Nod1', u'Jak3', u'Trim25', u'Lyn', u'Tlr9', u'Fcgr1', u'Rnf135']), geneids='ENSMUSG00000040264 ENSMUSG00000045322 ENSMUSG00000026928 ENSMUSG00000067212 ENSMUSG00000035279 ENSMUSG00000020437 ENSMUSG00000036594 ENSMUSG00000055172 ENSMUSG00000031639 ENSMUSG00000009585 ENSMUSG00000047123 ENSMUSG00000040751 ENSMUSG00000071203 ENSMUSG00000047798 ENSMUSG00000024338 ENSMUSG00000024339 ENSMUSG00000043496 ENSMUSG00000029826 ENSMUSG00000024621 ENSMUSG00000073421 ENSMUSG00000031805 ENSMUSG00000015947 ENSMUSG00000027508 ENSMUSG00000037731 ENSMUSG00000079547 ENSMUSG00000037860 ENSMUSG00000036905 ENSMUSG00000032312 ENSMUSG00000061232 ENSMUSG00000038213 ENSMUSG00000020707 ENSMUSG00000028191 ENSMUSG00000057058 ENSMUSG00000043279 ENSMUSG00000025492 ENSMUSG00000030263 ENSMUSG00000003283 ENSMUSG00000033538 ENSMUSG00000030793 ENSMUSG00000073411 ENSMUSG00000029217 ENSMUSG00000062593 ENSMUSG00000042228 ENSMUSG00000036896 ENSMUSG00000021583 ENSMUSG00000024349 ENSMUSG00000031838 ENSMUSG00000060802 ENSMUSG00000029798 ENSMUSG00000014599 ENSMUSG00000027995 ENSMUSG00000027639 ENSMUSG00000000275 ENSMUSG00000030748 ENSMUSG00000032690 ENSMUSG00000032691 ENSMUSG00000038058 ENSMUSG00000070390 ENSMUSG00000021423 ENSMUSG00000024235 ENSMUSG00000060550', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=572, D1='L', GO_name='immune system process'),
Nt(p_uncorrected=2.9764833787115223e-05, study_count=5, study_n=794, namespace='biological_process', p_fdr_bh=0.011274174917714569, study_items=set([u'ENSMUSG00000036461', u'ENSMUSG00000022500', u'ENSMUSG00000042228', u'ENSMUSG00000032312', u'ENSMUSG00000038235']), NS='BP', pop_items=set([u'ENSMUSG00000036461', u'ENSMUSG00000021277', u'ENSMUSG00000022208', u'ENSMUSG00000030536', u'ENSMUSG00000022500', u'ENSMUSG00000042228', u'ENSMUSG00000032312', u'ENSMUSG00000038235']), pop_n=13836, p_sm_bonferroni=0.45096699670858276, is_obsolete=False, GO='GO:0001817', name='regulation of cytokine production', pop_count=8, alt_ids=[], level=4, depth=4, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000042228', u'ENSMUSG00000022500', u'ENSMUSG00000036461', u'ENSMUSG00000032312', u'ENSMUSG00000038235']), symbols=u'Csk Elf1 F11r Litaf Lyn', symbol_set=set([u'Elf1', u'Lyn', u'Litaf', u'F11r', u'Csk']), geneids='ENSMUSG00000042228 ENSMUSG00000022500 ENSMUSG00000036461 ENSMUSG00000032312 ENSMUSG00000038235', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=476, D1='A', GO_name='regulation of cytokine production'),
Nt(p_uncorrected=0.00018831346875026884, study_count=3, study_n=794, namespace='biological_process', p_fdr_bh=0.04322935401568671, study_items=set([u'ENSMUSG00000031639', u'ENSMUSG00000021408', u'ENSMUSG00000024778']), NS='BP', pop_items=set([u'ENSMUSG00000031639', u'ENSMUSG00000021408', u'ENSMUSG00000024778']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0097527', name='necroptotic signaling pathway', pop_count=3, alt_ids=[], level=3, depth=5, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000031639', u'ENSMUSG00000021408', u'ENSMUSG00000024778']), symbols=u'Fas Ripk1 Tlr3', symbol_set=set([u'Ripk1', u'Tlr3', u'Fas']), geneids='ENSMUSG00000031639 ENSMUSG00000021408 ENSMUSG00000024778', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=0, D1='AB', GO_name='necroptotic signaling pathway'),
Nt(p_uncorrected=1.1072215607900484e-06, study_count=14, study_n=794, namespace='biological_process', p_fdr_bh=0.0007625233576150012, study_items=set([u'ENSMUSG00000021583', u'ENSMUSG00000062593', u'ENSMUSG00000028191', u'ENSMUSG00000057058', u'ENSMUSG00000024339', u'ENSMUSG00000026321', u'ENSMUSG00000020437', u'ENSMUSG00000031805', u'ENSMUSG00000027508', u'ENSMUSG00000067212', u'ENSMUSG00000029217', u'ENSMUSG00000040751', u'ENSMUSG00000042228', u'ENSMUSG00000032312']), NS='BP', pop_items=set([u'ENSMUSG00000026648', u'ENSMUSG00000028064', u'ENSMUSG00000000732', u'ENSMUSG00000020437', u'ENSMUSG00000021457', u'ENSMUSG00000037321', u'ENSMUSG00000020476', u'ENSMUSG00000031990', u'ENSMUSG00000004707', u'ENSMUSG00000023990', u'ENSMUSG00000018446', u'ENSMUSG00000038260', u'ENSMUSG00000022636', u'ENSMUSG00000024339', u'ENSMUSG00000004730', u'ENSMUSG00000041439', u'ENSMUSG00000031805', u'ENSMUSG00000024767', u'ENSMUSG00000027508', u'ENSMUSG00000036908', u'ENSMUSG00000019843', u'ENSMUSG00000041135', u'ENSMUSG00000032312', u'ENSMUSG00000058818', u'ENSMUSG00000026812', u'ENSMUSG00000049686', u'ENSMUSG00000022901', u'ENSMUSG00000059456', u'ENSMUSG00000028191', u'ENSMUSG00000024789', u'ENSMUSG00000028793', u'ENSMUSG00000039936', u'ENSMUSG00000038128', u'ENSMUSG00000023915', u'ENSMUSG00000029217', u'ENSMUSG00000062593', u'ENSMUSG00000042228', u'ENSMUSG00000021583', u'ENSMUSG00000041187', u'ENSMUSG00000032216', u'ENSMUSG00000005102', u'ENSMUSG00000025702', u'ENSMUSG00000024610', u'ENSMUSG00000026321', u'ENSMUSG00000026117', u'ENSMUSG00000020399', u'ENSMUSG00000000134', u'ENSMUSG00000040751', u'ENSMUSG00000051212', u'ENSMUSG00000057058', u'ENSMUSG00000067212', u'ENSMUSG00000038147', u'ENSMUSG00000052889']), pop_n=13836, p_sm_bonferroni=0.016775513867530022, is_obsolete=False, GO='GO:0002250', name='adaptive immune response', pop_count=53, alt_ids=[], level=3, depth=3, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000021583', u'ENSMUSG00000028191', u'ENSMUSG00000057058', u'ENSMUSG00000024339', u'ENSMUSG00000026321', u'ENSMUSG00000020437', u'ENSMUSG00000031805', u'ENSMUSG00000040751', u'ENSMUSG00000032312', u'ENSMUSG00000027508', u'ENSMUSG00000029217', u'ENSMUSG00000062593', u'ENSMUSG00000042228', u'ENSMUSG00000067212']), symbols=u'Bcl10 Csk Erap1 H2-T23 Jak3 Lat2 Lilrb4a Lyn Myo1g Pag1 Skap1 Tap2 Tec Tnfrsf11a', symbol_set=set([u'Erap1', u'Tap2', u'Csk', u'Lat2', u'Myo1g', u'Pag1', 'H2-T23', u'Skap1', u'Tnfrsf11a', 'Lilrb4a', u'Tec', u'Lyn', u'Bcl10', u'Jak3']), geneids='ENSMUSG00000021583 ENSMUSG00000062593 ENSMUSG00000028191 ENSMUSG00000057058 ENSMUSG00000024339 ENSMUSG00000026321 ENSMUSG00000020437 ENSMUSG00000031805 ENSMUSG00000027508 ENSMUSG00000067212 ENSMUSG00000029217 ENSMUSG00000040751 ENSMUSG00000042228 ENSMUSG00000032312', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=52, D1='FL', GO_name='adaptive immune response'),
Nt(p_uncorrected=0.0001215684959084734, study_count=5, study_n=794, namespace='biological_process', p_fdr_bh=0.03175662554326346, study_items=set([u'ENSMUSG00000014599', u'ENSMUSG00000022105', u'ENSMUSG00000021408', u'ENSMUSG00000026029', u'ENSMUSG00000022831']), NS='BP', pop_items=set([u'ENSMUSG00000034394', u'ENSMUSG00000026029', u'ENSMUSG00000022831', u'ENSMUSG00000014599', u'ENSMUSG00000021408', u'ENSMUSG00000032501', u'ENSMUSG00000050965', u'ENSMUSG00000031750', u'ENSMUSG00000020644', u'ENSMUSG00000022105']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0045651', name='positive regulation of macrophage differentiation', pop_count=10, alt_ids=[], level=8, depth=9, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000014599', u'ENSMUSG00000022105', u'ENSMUSG00000021408', u'ENSMUSG00000026029', u'ENSMUSG00000022831']), symbols=u'Casp8 Csf1 Hcls1 Rb1 Ripk1', symbol_set=set([u'Ripk1', u'Hcls1', u'Casp8', u'Csf1', u'Rb1']), geneids='ENSMUSG00000014599 ENSMUSG00000022105 ENSMUSG00000021408 ENSMUSG00000026029 ENSMUSG00000022831', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=1, D1='A', GO_name='positive regulation of macrophage differentiation'),
Nt(p_uncorrected=2.1406708605252513e-09, study_count=10, study_n=794, namespace='biological_process', p_fdr_bh=4.05416302597726e-06, study_items=set([u'ENSMUSG00000040264', u'ENSMUSG00000027852', u'ENSMUSG00000053175', u'ENSMUSG00000069874', u'ENSMUSG00000030748', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000047810', u'ENSMUSG00000041515', u'ENSMUSG00000029298']), NS='BP', pop_items=set([u'ENSMUSG00000040264', u'ENSMUSG00000027852', u'ENSMUSG00000030798', u'ENSMUSG00000053175', u'ENSMUSG00000025499', u'ENSMUSG00000069874', u'ENSMUSG00000028270', u'ENSMUSG00000040253', u'ENSMUSG00000026177', u'ENSMUSG00000030748', u'ENSMUSG00000054072', u'ENSMUSG00000019726', u'ENSMUSG00000028268', u'ENSMUSG00000047810', u'ENSMUSG00000041515', u'ENSMUSG00000029298']), pop_n=13836, p_sm_bonferroni=3.243330420781808e-05, is_obsolete=False, GO='GO:0042832', name='defense response to protozoan', pop_count=16, alt_ids=[], level=5, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000040264', u'ENSMUSG00000027852', u'ENSMUSG00000053175', u'ENSMUSG00000069874', u'ENSMUSG00000030748', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000047810', u'ENSMUSG00000041515', u'ENSMUSG00000029298']), symbols=u'Bcl3 Ccdc88b Gbp2 Gbp2b Gbp3 Gbp9 Il4ra Irf8 Irgm2 Nras', symbol_set=set(['Gbp2b', u'Gbp9', u'Il4ra', u'Gbp3', u'Gbp2', u'Ccdc88b', u'Irf8', u'Nras', u'Irgm2', u'Bcl3']), geneids='ENSMUSG00000040264 ENSMUSG00000027852 ENSMUSG00000053175 ENSMUSG00000069874 ENSMUSG00000030748 ENSMUSG00000028270 ENSMUSG00000028268 ENSMUSG00000047810 ENSMUSG00000041515 ENSMUSG00000029298', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=1, D1='F', GO_name='defense response to protozoan'),
Nt(p_uncorrected=2.9764833787115223e-05, study_count=5, study_n=794, namespace='biological_process', p_fdr_bh=0.011274174917714569, study_items=set([u'ENSMUSG00000028268', u'ENSMUSG00000062300', u'ENSMUSG00000040264', u'ENSMUSG00000029298', u'ENSMUSG00000028270']), NS='BP', pop_items=set([u'ENSMUSG00000037405', u'ENSMUSG00000040264', u'ENSMUSG00000040253', u'ENSMUSG00000062300', u'ENSMUSG00000037936', u'ENSMUSG00000028270', u'ENSMUSG00000028268', u'ENSMUSG00000029298']), pop_n=13836, p_sm_bonferroni=0.45096699670858276, is_obsolete=False, GO='GO:0044406', name='adhesion of symbiont to host', pop_count=8, alt_ids=['GO:0051825', 'GO:0051856'], level=2, depth=2, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000028268', u'ENSMUSG00000062300', u'ENSMUSG00000040264', u'ENSMUSG00000029298', u'ENSMUSG00000028270']), symbols=u'Gbp2 Gbp2b Gbp3 Gbp9 Nectin2', symbol_set=set([u'Gbp3', 'Nectin2', u'Gbp9', 'Gbp2b', u'Gbp2']), geneids='ENSMUSG00000028268 ENSMUSG00000062300 ENSMUSG00000040264 ENSMUSG00000029298 ENSMUSG00000028270', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=22, D1='IP', GO_name='adhesion of symbiont to host'),
Nt(p_uncorrected=6.627253573428696e-05, study_count=11, study_n=794, namespace='biological_process', p_fdr_bh=0.019309522863657343, study_items=set([u'ENSMUSG00000000555', u'ENSMUSG00000034116', u'ENSMUSG00000000290', u'ENSMUSG00000004508', u'ENSMUSG00000027087', u'ENSMUSG00000029217', u'ENSMUSG00000058715', u'ENSMUSG00000020120', u'ENSMUSG00000007815', u'ENSMUSG00000024965', u'ENSMUSG00000020689']), NS='BP', pop_items=set([u'ENSMUSG00000033721', u'ENSMUSG00000028041', u'ENSMUSG00000021457', u'ENSMUSG00000004508', u'ENSMUSG00000019997', u'ENSMUSG00000058715', u'ENSMUSG00000000555', u'ENSMUSG00000040945', u'ENSMUSG00000027009', u'ENSMUSG00000020857', u'ENSMUSG00000027646', u'ENSMUSG00000042284', u'ENSMUSG00000004364', u'ENSMUSG00000021065', u'ENSMUSG00000026043', u'ENSMUSG00000028874', u'ENSMUSG00000022817', u'ENSMUSG00000001507', u'ENSMUSG00000020758', u'ENSMUSG00000027087', u'ENSMUSG00000015647', u'ENSMUSG00000038264', u'ENSMUSG00000034116', u'ENSMUSG00000030789', u'ENSMUSG00000030786', u'ENSMUSG00000025809', u'ENSMUSG00000026768', u'ENSMUSG00000029528', u'ENSMUSG00000030579', u'ENSMUSG00000029217', u'ENSMUSG00000025348', u'ENSMUSG00000037712', u'ENSMUSG00000043733', u'ENSMUSG00000020689', u'ENSMUSG00000029860', u'ENSMUSG00000000290', u'ENSMUSG00000059456', u'ENSMUSG00000022607', u'ENSMUSG00000024965', u'ENSMUSG00000031425', u'ENSMUSG00000007815', u'ENSMUSG00000058230', u'ENSMUSG00000027111', u'ENSMUSG00000039115', u'ENSMUSG00000030890', u'ENSMUSG00000020120', u'ENSMUSG00000062352', u'ENSMUSG00000031955']), pop_n=13836, p_sm_bonferroni=1.0, is_obsolete=False, GO='GO:0007229', name='integrin-mediated signaling pathway', pop_count=48, alt_ids=[], level=4, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000000555', u'ENSMUSG00000034116', u'ENSMUSG00000000290', u'ENSMUSG00000004508', u'ENSMUSG00000024965', u'ENSMUSG00000029217', u'ENSMUSG00000058715', u'ENSMUSG00000020120', u'ENSMUSG00000007815', u'ENSMUSG00000027087', u'ENSMUSG00000020689']), symbols=u'Fcer1g Fermt3 Gab2 Itga5 Itgav Itgb2 Itgb3 Plek Rhoa Tec Vav1', symbol_set=set([u'Fcer1g', u'Itgb2', u'Vav1', u'Tec', u'Gab2', u'Itga5', u'Itgav', u'Fermt3', u'Itgb3', u'Rhoa', u'Plek']), geneids='ENSMUSG00000000555 ENSMUSG00000034116 ENSMUSG00000000290 ENSMUSG00000004508 ENSMUSG00000027087 ENSMUSG00000029217 ENSMUSG00000058715 ENSMUSG00000020120 ENSMUSG00000007815 ENSMUSG00000024965 ENSMUSG00000020689', format_txt=1, hdr_idx=1, is_hdrgo=True, is_usrgo=True, hdr1usr01='**', dcnt=0, D1='AB', GO_name='integrin-mediated signaling pathway'),
Nt(p_uncorrected=1.36080987204659e-07, study_count=12, study_n=794, namespace='biological_process', p_fdr_bh=0.00012128017865516403, study_items=set([u'ENSMUSG00000027947', u'ENSMUSG00000029468', u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000027995', u'ENSMUSG00000030793', u'ENSMUSG00000047123', u'ENSMUSG00000038058', u'ENSMUSG00000031639', u'ENSMUSG00000058715', u'ENSMUSG00000079227', u'ENSMUSG00000006519']), NS='BP', pop_items=set([u'ENSMUSG00000031380', u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000030793', u'ENSMUSG00000028291', u'ENSMUSG00000052688', u'ENSMUSG00000031639', u'ENSMUSG00000021994', u'ENSMUSG00000058715', u'ENSMUSG00000024810', u'ENSMUSG00000018500', u'ENSMUSG00000006519', u'ENSMUSG00000032508', u'ENSMUSG00000024256', u'ENSMUSG00000034610', u'ENSMUSG00000047123', u'ENSMUSG00000016024', u'ENSMUSG00000041135', u'ENSMUSG00000039005', u'ENSMUSG00000028059', u'ENSMUSG00000027947', u'ENSMUSG00000029468', u'ENSMUSG00000027551', u'ENSMUSG00000044583', u'ENSMUSG00000027399', u'ENSMUSG00000022708', u'ENSMUSG00000025980', u'ENSMUSG00000027995', u'ENSMUSG00000040296', u'ENSMUSG00000066551', u'ENSMUSG00000038058', u'ENSMUSG00000022749', u'ENSMUSG00000079227']), pop_n=13836, p_sm_bonferroni=0.002061763037137789, is_obsolete=False, GO='GO:0032755', name='positive regulation of interleukin-6 production', pop_count=33, alt_ids=[], level=6, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000027947', u'ENSMUSG00000029468', u'ENSMUSG00000045322', u'ENSMUSG00000026928', u'ENSMUSG00000027995', u'ENSMUSG00000030793', u'ENSMUSG00000047123', u'ENSMUSG00000038058', u'ENSMUSG00000031639', u'ENSMUSG00000058715', u'ENSMUSG00000079227', u'ENSMUSG00000006519']), symbols=u'Card9 Ccr5 Cyba Fcer1g Il6ra Nod1 P2rx7 Pycard Ticam1 Tlr2 Tlr3 Tlr9', symbol_set=set([u'Ticam1', u'Fcer1g', u'Il6ra', u'Tlr2', u'Tlr3', u'Card9', u'Cyba', u'Nod1', u'Ccr5', u'Pycard', u'Tlr9', u'P2rx7']), geneids='ENSMUSG00000027947 ENSMUSG00000029468 ENSMUSG00000045322 ENSMUSG00000026928 ENSMUSG00000027995 ENSMUSG00000030793 ENSMUSG00000047123 ENSMUSG00000038058 ENSMUSG00000031639 ENSMUSG00000058715 ENSMUSG00000079227 ENSMUSG00000006519', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=2, D1='A', GO_name='positive regulation of interleukin-6 production'),
Nt(p_uncorrected=7.956004359417217e-07, study_count=12, study_n=794, namespace='biological_process', p_fdr_bh=0.0006027071102476514, study_items=set([u'ENSMUSG00000079362', u'ENSMUSG00000038058', u'ENSMUSG00000053175', u'ENSMUSG00000069874', u'ENSMUSG00000042286', u'ENSMUSG00000030341', u'ENSMUSG00000015947', u'ENSMUSG00000015950', u'ENSMUSG00000071203', u'ENSMUSG00000058715', u'ENSMUSG00000041515', u'ENSMUSG00000061232']), NS='BP', pop_items=set([u'ENSMUSG00000039191', u'ENSMUSG00000042286', u'ENSMUSG00000021457', u'ENSMUSG00000022894', u'ENSMUSG00000056501', u'ENSMUSG00000058715', u'ENSMUSG00000030059', u'ENSMUSG00000021948', u'ENSMUSG00000053175', u'ENSMUSG00000069516', u'ENSMUSG00000023078', u'ENSMUSG00000015947', u'ENSMUSG00000016024', u'ENSMUSG00000037523', u'ENSMUSG00000020953', u'ENSMUSG00000071203', u'ENSMUSG00000061232', u'ENSMUSG00000079362', u'ENSMUSG00000015950', u'ENSMUSG00000069874', u'ENSMUSG00000055204', u'ENSMUSG00000030341', u'ENSMUSG00000006403', u'ENSMUSG00000067847', u'ENSMUSG00000026878', u'ENSMUSG00000027878', u'ENSMUSG00000078942', u'ENSMUSG00000078945', u'ENSMUSG00000035692', u'ENSMUSG00000029484', u'ENSMUSG00000045827', u'ENSMUSG00000044827', u'ENSMUSG00000026177', u'ENSMUSG00000020149', u'ENSMUSG00000038058', u'ENSMUSG00000019726', u'ENSMUSG00000016206', u'ENSMUSG00000041515']), pop_n=13836, p_sm_bonferroni=0.012054142204953025, is_obsolete=False, GO='GO:0042742', name='defense response to bacterium', pop_count=38, alt_ids=['GO:0042830'], level=5, depth=6, enrichment='e', Cluster='Consistent Increase', geneid_set=set([u'ENSMUSG00000079362', u'ENSMUSG00000015950', u'ENSMUSG00000053175', u'ENSMUSG00000069874', u'ENSMUSG00000042286', u'ENSMUSG00000030341', u'ENSMUSG00000015947', u'ENSMUSG00000038058', u'ENSMUSG00000071203', u'ENSMUSG00000058715', u'ENSMUSG00000041515', u'ENSMUSG00000061232']), symbols=u'Bcl3 Fcer1g Fcgr1 Gm43302 H2-K1 Irf8 Irgm2 Naip5 Ncf1 Nod1 Stab1 Tnfrsf1a', symbol_set=set([u'Ncf1', u'Stab1', u'Tnfrsf1a', u'Naip5', u'Irf8', u'Fcer1g', 'Gm43302', u'Nod1', u'Fcgr1', u'Irgm2', u'Bcl3', 'H2-K1']), geneids='ENSMUSG00000079362 ENSMUSG00000015950 ENSMUSG00000053175 ENSMUSG00000069874 ENSMUSG00000042286 ENSMUSG00000030341 ENSMUSG00000015947 ENSMUSG00000038058 ENSMUSG00000071203 ENSMUSG00000041515 ENSMUSG00000058715 ENSMUSG00000061232', format_txt=0, hdr_idx=0, is_hdrgo=False, is_usrgo=True, hdr1usr01='', dcnt=8, D1='F', GO_name='defense response to bacterium'),
]
| 1,304.238095 | 10,983 | 0.802687 |
81c2f0fbd5f3786cebea402de87eba7639135ebb | 2,594 | py | Python | tests/envs/test_luby.py | goktug97/DACBench | 953bc8efacdb993889b223110e25f7e453c86b2d | [
"Apache-2.0"
] | 1 | 2021-02-05T16:18:56.000Z | 2021-02-05T16:18:56.000Z | tests/envs/test_luby.py | goktug97/DACBench | 953bc8efacdb993889b223110e25f7e453c86b2d | [
"Apache-2.0"
] | null | null | null | tests/envs/test_luby.py | goktug97/DACBench | 953bc8efacdb993889b223110e25f7e453c86b2d | [
"Apache-2.0"
] | null | null | null | import pytest
import unittest
import numpy as np
from dacbench import AbstractEnv
from dacbench.envs import LubyEnv
from dacbench.benchmarks.luby_benchmark import LUBY_DEFAULTS
class TestLubyEnv(unittest.TestCase):
def make_env(self):
config = LUBY_DEFAULTS
config["instance_set"] = {0: [1, 1]}
env = LubyEnv(config)
return env
def test_setup(self):
env = self.make_env()
self.assertTrue(issubclass(type(env), AbstractEnv))
self.assertFalse(env.rng is None)
self.assertFalse(env._genny is None)
self.assertFalse(env._next_goal is None)
self.assertFalse(env._seq is None)
self.assertTrue(env._ms == LUBY_DEFAULTS["cutoff"])
self.assertTrue(env._mi == LUBY_DEFAULTS["min_steps"])
self.assertTrue(env._hist_len == LUBY_DEFAULTS["hist_length"])
self.assertTrue(env._start_shift == 0)
self.assertTrue(env._sticky_shif == 0)
def test_reset(self):
env = self.make_env()
state = env.reset()
self.assertTrue(env._start_shift, 1)
self.assertTrue(env._sticky_shif, 1)
self.assertTrue(
np.array_equal(-1 * np.ones(LUBY_DEFAULTS["hist_length"] + 1), state)
)
def test_step(self):
env = self.make_env()
env.reset()
state, reward, done, meta = env.step(1)
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertTrue(state[-1] == 0)
self.assertTrue(state[0] == 1)
self.assertTrue(np.array_equal(state[1:-1], -1 * np.ones(4)))
self.assertTrue(len(state) == env._hist_len + 1)
self.assertFalse(done)
self.assertTrue(len(meta.keys()) == 0)
config = LUBY_DEFAULTS
config["instance_set"] = {1: [-4, -4]}
env = LubyEnv(config)
env.reset()
state, reward, done, meta = env.step(1)
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertTrue(state[-1] == 0)
self.assertTrue(state[0] == 1)
self.assertTrue(np.array_equal(state[1:-1], -1 * np.ones(4)))
self.assertTrue(len(state) == env._hist_len + 1)
self.assertFalse(done)
self.assertTrue(len(meta.keys()) == 0)
def test_close(self):
env = self.make_env()
self.assertTrue(env.close())
def test_render(self):
env = self.make_env()
env.render("human")
with pytest.raises(NotImplementedError):
env.render("random")
| 34.586667 | 81 | 0.616423 |
d25eaa0f9a4913052690cce737dc218b24b3fb80 | 374 | py | Python | hapi/networks.py | telegnom/pyhapi | 2124fbbf8d67313040bb96a2e2df0ec3cf1a3ab4 | [
"MIT"
] | null | null | null | hapi/networks.py | telegnom/pyhapi | 2124fbbf8d67313040bb96a2e2df0ec3cf1a3ab4 | [
"MIT"
] | null | null | null | hapi/networks.py | telegnom/pyhapi | 2124fbbf8d67313040bb96a2e2df0ec3cf1a3ab4 | [
"MIT"
] | null | null | null | networks = {
'RMV': {'accessId': True,
'longname': 'Rhein-Main-Verkehrsverbund',
'url': 'https://www.rmv.de/hapi/',
'name': 'RMV',
},
'VBB': {'accessId': True,
'longname': 'Verkehrsverbund Berlin-Brandenburg',
'url':'http://fahrinfo.vbb.de/restproxy/',
'name': 'VBB'
}
} | 31.166667 | 61 | 0.462567 |
67fc4d3c2fe265db9d14057dbc35cdf3fb417578 | 21,594 | py | Python | synapse/handlers/device.py | Sorunome/synapse | 260b66c01c35d7a5a3c33e73f5b8c5a207cbb903 | [
"Apache-2.0"
] | null | null | null | synapse/handlers/device.py | Sorunome/synapse | 260b66c01c35d7a5a3c33e73f5b8c5a207cbb903 | [
"Apache-2.0"
] | null | null | null | synapse/handlers/device.py | Sorunome/synapse | 260b66c01c35d7a5a3c33e73f5b8c5a207cbb903 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api import errors
from synapse.api.constants import EventTypes
from synapse.api.errors import FederationDeniedError
from synapse.util import stringutils
from synapse.util.async import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.metrics import measure_func
from synapse.types import get_domain_from_id, RoomStreamToken
from twisted.internet import defer
from ._base import BaseHandler
import logging
from synapse.util.urlbuilder import urlbuilder
from six import itervalues, iteritems
logger = logging.getLogger(__name__)
class DeviceHandler(BaseHandler):
def __init__(self, hs):
super(DeviceHandler, self).__init__(hs)
self.hs = hs
self.state = hs.get_state_handler()
self._auth_handler = hs.get_auth_handler()
self.federation_sender = hs.get_federation_sender()
self._edu_updater = DeviceListEduUpdater(hs, self)
federation_registry = hs.get_federation_registry()
federation_registry.register_edu_handler(
"m.device_list_update", self._edu_updater.incoming_device_list_update,
)
federation_registry.register_query_handler(
"user_devices", self.on_federation_query_user_devices,
)
hs.get_distributor().observe("user_left_room", self.user_left_room)
@defer.inlineCallbacks
def check_device_registered(self, user_id, device_id,
initial_device_display_name=None):
"""
If the given device has not been registered, register it with the
supplied display name.
If no device_id is supplied, we make one up.
Args:
user_id (str): @user:id
device_id (str | None): device id supplied by client
initial_device_display_name (str | None): device display name from
client
Returns:
str: device id (generated if none was supplied)
"""
if device_id is not None:
new_device = yield self.store.store_device(
user_id=user_id,
device_id=device_id,
initial_device_display_name=initial_device_display_name,
)
if new_device:
yield self.notify_device_update(user_id, [device_id])
defer.returnValue(device_id)
# if the device id is not specified, we'll autogen one, but loop a few
# times in case of a clash.
attempts = 0
while attempts < 5:
device_id = urlbuilder("^P's ^A")
new_device = yield self.store.store_device(
user_id=user_id,
device_id=device_id,
initial_device_display_name=initial_device_display_name,
)
if new_device:
yield self.notify_device_update(user_id, [device_id])
defer.returnValue(device_id)
attempts += 1
raise errors.StoreError(500, "Couldn't generate a device ID.")
@defer.inlineCallbacks
def get_devices_by_user(self, user_id):
"""
Retrieve the given user's devices
Args:
user_id (str):
Returns:
defer.Deferred: list[dict[str, X]]: info on each device
"""
device_map = yield self.store.get_devices_by_user(user_id)
ips = yield self.store.get_last_client_ip_by_device(
user_id, device_id=None
)
devices = list(device_map.values())
for device in devices:
_update_device_from_client_ips(device, ips)
defer.returnValue(devices)
@defer.inlineCallbacks
def get_device(self, user_id, device_id):
""" Retrieve the given device
Args:
user_id (str):
device_id (str):
Returns:
defer.Deferred: dict[str, X]: info on the device
Raises:
errors.NotFoundError: if the device was not found
"""
try:
device = yield self.store.get_device(user_id, device_id)
except errors.StoreError:
raise errors.NotFoundError
ips = yield self.store.get_last_client_ip_by_device(
user_id, device_id,
)
_update_device_from_client_ips(device, ips)
defer.returnValue(device)
@defer.inlineCallbacks
def delete_device(self, user_id, device_id):
""" Delete the given device
Args:
user_id (str):
device_id (str):
Returns:
defer.Deferred:
"""
try:
yield self.store.delete_device(user_id, device_id)
except errors.StoreError as e:
if e.code == 404:
# no match
pass
else:
raise
yield self._auth_handler.delete_access_tokens_for_user(
user_id, device_id=device_id,
)
yield self.store.delete_e2e_keys_by_device(
user_id=user_id, device_id=device_id
)
yield self.notify_device_update(user_id, [device_id])
@defer.inlineCallbacks
def delete_all_devices_for_user(self, user_id, except_device_id=None):
"""Delete all of the user's devices
Args:
user_id (str):
except_device_id (str|None): optional device id which should not
be deleted
Returns:
defer.Deferred:
"""
device_map = yield self.store.get_devices_by_user(user_id)
device_ids = list(device_map)
if except_device_id is not None:
device_ids = [d for d in device_ids if d != except_device_id]
yield self.delete_devices(user_id, device_ids)
@defer.inlineCallbacks
def delete_devices(self, user_id, device_ids):
""" Delete several devices
Args:
user_id (str):
device_ids (List[str]): The list of device IDs to delete
Returns:
defer.Deferred:
"""
try:
yield self.store.delete_devices(user_id, device_ids)
except errors.StoreError as e:
if e.code == 404:
# no match
pass
else:
raise
# Delete access tokens and e2e keys for each device. Not optimised as it is not
# considered as part of a critical path.
for device_id in device_ids:
yield self._auth_handler.delete_access_tokens_for_user(
user_id, device_id=device_id,
)
yield self.store.delete_e2e_keys_by_device(
user_id=user_id, device_id=device_id
)
yield self.notify_device_update(user_id, device_ids)
@defer.inlineCallbacks
def update_device(self, user_id, device_id, content):
""" Update the given device
Args:
user_id (str):
device_id (str):
content (dict): body of update request
Returns:
defer.Deferred:
"""
try:
yield self.store.update_device(
user_id,
device_id,
new_display_name=content.get("display_name")
)
yield self.notify_device_update(user_id, [device_id])
except errors.StoreError as e:
if e.code == 404:
raise errors.NotFoundError()
else:
raise
@measure_func("notify_device_update")
@defer.inlineCallbacks
def notify_device_update(self, user_id, device_ids):
"""Notify that a user's device(s) has changed. Pokes the notifier, and
remote servers if the user is local.
"""
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
user_id
)
hosts = set()
if self.hs.is_mine_id(user_id):
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
hosts.discard(self.server_name)
position = yield self.store.add_device_change_to_streams(
user_id, device_ids, list(hosts)
)
room_ids = yield self.store.get_rooms_for_user(user_id)
yield self.notifier.on_new_event(
"device_list_key", position, rooms=room_ids,
)
if hosts:
logger.info("Sending device list update notif to: %r", hosts)
for host in hosts:
self.federation_sender.send_device_messages(host)
@measure_func("device.get_user_ids_changed")
@defer.inlineCallbacks
def get_user_ids_changed(self, user_id, from_token):
"""Get list of users that have had the devices updated, or have newly
joined a room, that `user_id` may be interested in.
Args:
user_id (str)
from_token (StreamToken)
"""
now_token = yield self.hs.get_event_sources().get_current_token()
room_ids = yield self.store.get_rooms_for_user(user_id)
# First we check if any devices have changed
changed = yield self.store.get_user_whose_devices_changed(
from_token.device_list_key
)
# Then work out if any users have since joined
rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
member_events = yield self.store.get_membership_changes_for_user(
user_id, from_token.room_key, now_token.room_key
)
rooms_changed.update(event.room_id for event in member_events)
stream_ordering = RoomStreamToken.parse_stream_token(
from_token.room_key
).stream
possibly_changed = set(changed)
possibly_left = set()
for room_id in rooms_changed:
current_state_ids = yield self.store.get_current_state_ids(room_id)
# The user may have left the room
# TODO: Check if they actually did or if we were just invited.
if room_id not in room_ids:
for key, event_id in iteritems(current_state_ids):
etype, state_key = key
if etype != EventTypes.Member:
continue
possibly_left.add(state_key)
continue
# Fetch the current state at the time.
try:
event_ids = yield self.store.get_forward_extremeties_for_room(
room_id, stream_ordering=stream_ordering
)
except errors.StoreError:
# we have purged the stream_ordering index since the stream
# ordering: treat it the same as a new room
event_ids = []
# special-case for an empty prev state: include all members
# in the changed list
if not event_ids:
for key, event_id in iteritems(current_state_ids):
etype, state_key = key
if etype != EventTypes.Member:
continue
possibly_changed.add(state_key)
continue
current_member_id = current_state_ids.get((EventTypes.Member, user_id))
if not current_member_id:
continue
# mapping from event_id -> state_dict
prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
# Check if we've joined the room? If so we just blindly add all the users to
# the "possibly changed" users.
for state_dict in itervalues(prev_state_ids):
member_event = state_dict.get((EventTypes.Member, user_id), None)
if not member_event or member_event != current_member_id:
for key, event_id in iteritems(current_state_ids):
etype, state_key = key
if etype != EventTypes.Member:
continue
possibly_changed.add(state_key)
break
# If there has been any change in membership, include them in the
# possibly changed list. We'll check if they are joined below,
# and we're not toooo worried about spuriously adding users.
for key, event_id in iteritems(current_state_ids):
etype, state_key = key
if etype != EventTypes.Member:
continue
# check if this member has changed since any of the extremities
# at the stream_ordering, and add them to the list if so.
for state_dict in itervalues(prev_state_ids):
prev_event_id = state_dict.get(key, None)
if not prev_event_id or prev_event_id != event_id:
if state_key != user_id:
possibly_changed.add(state_key)
break
if possibly_changed or possibly_left:
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
user_id
)
# Take the intersection of the users whose devices may have changed
# and those that actually still share a room with the user
possibly_joined = possibly_changed & users_who_share_room
possibly_left = (possibly_changed | possibly_left) - users_who_share_room
else:
possibly_joined = []
possibly_left = []
defer.returnValue({
"changed": list(possibly_joined),
"left": list(possibly_left),
})
@defer.inlineCallbacks
def on_federation_query_user_devices(self, user_id):
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
defer.returnValue({
"user_id": user_id,
"stream_id": stream_id,
"devices": devices,
})
@defer.inlineCallbacks
def user_left_room(self, user, room_id):
user_id = user.to_string()
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We no longer share rooms with this user, so we'll no longer
# receive device updates. Mark this in DB.
yield self.store.mark_remote_user_device_list_as_unsubscribed(user_id)
def _update_device_from_client_ips(device, client_ips):
ip = client_ips.get((device["user_id"], device["device_id"]), {})
device.update({
"last_seen_ts": ip.get("last_seen"),
"last_seen_ip": ip.get("ip"),
})
class DeviceListEduUpdater(object):
"Handles incoming device list updates from federation and updates the DB"
def __init__(self, hs, device_handler):
self.store = hs.get_datastore()
self.federation = hs.get_federation_client()
self.clock = hs.get_clock()
self.device_handler = device_handler
self._remote_edu_linearizer = Linearizer(name="remote_device_list")
# user_id -> list of updates waiting to be handled.
self._pending_updates = {}
# Recently seen stream ids. We don't bother keeping these in the DB,
# but they're useful to have them about to reduce the number of spurious
# resyncs.
self._seen_updates = ExpiringCache(
cache_name="device_update_edu",
clock=self.clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
iterable=True,
)
@defer.inlineCallbacks
def incoming_device_list_update(self, origin, edu_content):
"""Called on incoming device list update from federation. Responsible
for parsing the EDU and adding to pending updates list.
"""
user_id = edu_content.pop("user_id")
device_id = edu_content.pop("device_id")
stream_id = str(edu_content.pop("stream_id")) # They may come as ints
prev_ids = edu_content.pop("prev_id", [])
prev_ids = [str(p) for p in prev_ids] # They may come as ints
if get_domain_from_id(user_id) != origin:
# TODO: Raise?
logger.warning("Got device list update edu for %r from %r", user_id, origin)
return
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
return
self._pending_updates.setdefault(user_id, []).append(
(device_id, stream_id, prev_ids, edu_content)
)
yield self._handle_device_updates(user_id)
@measure_func("_incoming_device_list_update")
@defer.inlineCallbacks
def _handle_device_updates(self, user_id):
"Actually handle pending updates."
with (yield self._remote_edu_linearizer.queue(user_id)):
pending_updates = self._pending_updates.pop(user_id, [])
if not pending_updates:
# This can happen since we batch updates
return
# Given a list of updates we check if we need to resync. This
# happens if we've missed updates.
resync = yield self._need_to_do_resync(user_id, pending_updates)
if resync:
# Fetch all devices for the user.
origin = get_domain_from_id(user_id)
try:
result = yield self.federation.query_user_devices(origin, user_id)
except NotRetryingDestination:
# TODO: Remember that we are now out of sync and try again
# later
logger.warn(
"Failed to handle device list update for %s,"
" we're not retrying the remote",
user_id,
)
# We abort on exceptions rather than accepting the update
# as otherwise synapse will 'forget' that its device list
# is out of date. If we bail then we will retry the resync
# next time we get a device list update for this user_id.
# This makes it more likely that the device lists will
# eventually become consistent.
return
except FederationDeniedError as e:
logger.info(e)
return
except Exception:
# TODO: Remember that we are now out of sync and try again
# later
logger.exception(
"Failed to handle device list update for %s", user_id
)
return
stream_id = result["stream_id"]
devices = result["devices"]
yield self.store.update_remote_device_list_cache(
user_id, devices, stream_id,
)
device_ids = [device["device_id"] for device in devices]
yield self.device_handler.notify_device_update(user_id, device_ids)
else:
# Simply update the single device, since we know that is the only
# change (becuase of the single prev_id matching the current cache)
for device_id, stream_id, prev_ids, content in pending_updates:
yield self.store.update_remote_device_list_cache_entry(
user_id, device_id, content, stream_id,
)
yield self.device_handler.notify_device_update(
user_id, [device_id for device_id, _, _, _ in pending_updates]
)
self._seen_updates.setdefault(user_id, set()).update(
stream_id for _, stream_id, _, _ in pending_updates
)
@defer.inlineCallbacks
def _need_to_do_resync(self, user_id, updates):
"""Given a list of updates for a user figure out if we need to do a full
resync, or whether we have enough data that we can just apply the delta.
"""
seen_updates = self._seen_updates.get(user_id, set())
extremity = yield self.store.get_device_list_last_stream_id_for_remote(
user_id
)
stream_id_in_updates = set() # stream_ids in updates list
for _, stream_id, prev_ids, _ in updates:
if not prev_ids:
# We always do a resync if there are no previous IDs
defer.returnValue(True)
for prev_id in prev_ids:
if prev_id == extremity:
continue
elif prev_id in seen_updates:
continue
elif prev_id in stream_id_in_updates:
continue
else:
defer.returnValue(True)
stream_id_in_updates.add(stream_id)
defer.returnValue(False)
| 36.912821 | 88 | 0.599611 |
d319a3b28ec19ecd47d96ec842edd958fbbbea96 | 3,487 | py | Python | accounts/serializers.py | devmohamedwahba/Custom-Api-Login-register | 51ed3914cb16efb52dcb08a9918d5549ddb1afb4 | [
"MIT"
] | null | null | null | accounts/serializers.py | devmohamedwahba/Custom-Api-Login-register | 51ed3914cb16efb52dcb08a9918d5549ddb1afb4 | [
"MIT"
] | null | null | null | accounts/serializers.py | devmohamedwahba/Custom-Api-Login-register | 51ed3914cb16efb52dcb08a9918d5549ddb1afb4 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model, authenticate
from rest_framework import serializers
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_simplejwt.tokens import RefreshToken, TokenError
from django.utils.text import gettext_lazy as _
from .models import PhoneOtp
from django.core.validators import RegexValidator
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('mobile', 'password', 'name', 'is_staff')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
# def validate(self, attrs):
# mobile = attrs.get('mobile', '')
# if mobile:
# pass
# return attrs
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
class TokenResetSerializer(serializers.Serializer):
mobile = serializers.CharField(max_length=11, validators=[RegexValidator(regex='^01[0|1|2|5][0-9]{8}$',
message="Phone number must be : 010 or 011 or 012 or 015.",
code="Invalid Phone Number")])
class VerifyTokenSerializer(serializers.Serializer):
mobile = serializers.CharField(max_length=11, validators=[RegexValidator(regex='^01[0|1|2|5][0-9]{8}$',
message="Phone number must be : 010 or 011 or 012 or 015.",
code="Invalid Phone Number")])
token = serializers.CharField(max_length=4)
def validate(self, attrs):
mobile = attrs.get("mobile", '')
token = attrs.get("token", '')
otp = PhoneOtp.objects.filter(user__mobile=mobile, otp=token).first()
if not otp:
raise AuthenticationFailed('Otp Validation Error')
return attrs
class CreateTokenSerializer(serializers.Serializer):
mobile = serializers.CharField(write_only=True)
password = serializers.CharField(
style={"input_type": "password"},
trim_whitespace=False,
write_only=True
)
tokens = serializers.CharField(read_only=True, max_length=255)
def validate(self, attrs):
mobile = attrs.get("mobile")
password = attrs.get("password")
user = authenticate(mobile=mobile, password=password)
if not user:
raise AuthenticationFailed('Invalid Credentials')
if not user.is_active:
raise AuthenticationFailed('Account is disabled please contact admin')
if not user.is_active:
raise AuthenticationFailed('Account is disabled please contact admin')
if not user.is_verified:
raise AuthenticationFailed('Account is Not Verified')
# attrs['user'] = user
return {
"tokens": user.tokens()
}
class LogoutSerializer(serializers.Serializer):
refresh = serializers.CharField()
default_error_messages = {
'bad_token': _('Token is invalid or expired')
}
def validate(self, attrs):
self.token = attrs['refresh']
return attrs
def save(self, **kwargs):
try:
RefreshToken(self.token).blacklist()
except TokenError:
self.fail('bad_token')
| 37.494624 | 136 | 0.611701 |
2a9b728e07280996bcf90e8237201d8389a11315 | 368 | py | Python | mpl_style_gallery/plot_scripts/streamplot.py | tonysyu/matplotlib-style-gallery | c5aa1164428a822a8898f01a7b6910d86dcddf73 | [
"BSD-3-Clause"
] | 68 | 2015-01-06T08:58:58.000Z | 2021-08-16T22:49:57.000Z | mpl_style_gallery/plot_scripts/streamplot.py | tonysyu/matplotlib-style-gallery | c5aa1164428a822a8898f01a7b6910d86dcddf73 | [
"BSD-3-Clause"
] | 3 | 2015-01-09T01:50:48.000Z | 2016-03-29T08:57:52.000Z | mpl_style_gallery/plot_scripts/streamplot.py | tonysyu/matplotlib-style-gallery | c5aa1164428a822a8898f01a7b6910d86dcddf73 | [
"BSD-3-Clause"
] | 14 | 2015-01-08T23:09:30.000Z | 2021-05-13T08:17:30.000Z | import numpy as np
import matplotlib.pyplot as plt
L = 3
Y, X = np.mgrid[-L:L:100j, -L:L:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
plt.imshow(speed, extent=[-L, L, -L, L], alpha=0.5)
plt.colorbar(label='speed')
plt.streamplot(X, Y, U, V, linewidth=0.2*speed)
plt.title('Streamlines')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
| 18.4 | 51 | 0.622283 |
9ebfb1d202a8e12d5bc256f65e530274887cabd1 | 736 | py | Python | blog/migrations/0005_auto_20210629_2245.py | open-apprentice/ellieplatform-website | 3018feb05a2a44b916afba3e8e2eb71c18147117 | [
"MIT"
] | 1 | 2021-06-26T22:18:31.000Z | 2021-06-26T22:18:31.000Z | blog/migrations/0005_auto_20210629_2245.py | open-apprentice/ellieplatform-website | 3018feb05a2a44b916afba3e8e2eb71c18147117 | [
"MIT"
] | 12 | 2021-06-26T22:38:45.000Z | 2021-07-07T15:49:43.000Z | blog/migrations/0005_auto_20210629_2245.py | open-apprentice/ellieplatform-website | 3018feb05a2a44b916afba3e8e2eb71c18147117 | [
"MIT"
] | 1 | 2021-07-07T15:33:43.000Z | 2021-07-07T15:33:43.000Z | # Generated by Django 3.2.4 on 2021-06-29 20:45
from django.db import migrations, models
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('blog', '0004_auto_20210629_2241'),
]
operations = [
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=30, unique=True),
),
migrations.AlterField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| 28.307692 | 162 | 0.619565 |
5242693e2b635502c04919aa7771f5a4788e0e16 | 985 | py | Python | src/test.py | PavelSurgai/Convolutional_cifar100 | bfaef779cc2a78f0d61407a5762b48e2bb565979 | [
"Unlicense"
] | null | null | null | src/test.py | PavelSurgai/Convolutional_cifar100 | bfaef779cc2a78f0d61407a5762b48e2bb565979 | [
"Unlicense"
] | null | null | null | src/test.py | PavelSurgai/Convolutional_cifar100 | bfaef779cc2a78f0d61407a5762b48e2bb565979 | [
"Unlicense"
] | null | null | null | import numpy
from keras.datasets import cifar100
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D
from keras.utils import np_utils
from keras.optimizers import SGD
import sys
from keras.models import load_model
# Оцениваем качество обучения модели на тестовых данных
# Загружаем данные
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# Количество классов изображений
nb_classes = 100
batch_size = 32
model = load_model(sys.argv[1],custom_objects=None,compile=True)
# Задаем параметры оптимизации
sgd = SGD(lr=0.01)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(X_test, y_test,
batch_size=batch_size,
epochs=1,
validation_split=0.1)
scores = model.evaluate(X_test, y_test)
print("Точность работы на тестовых данных: %.2f%%" % (scores[1]*100))
| 26.621622 | 69 | 0.737056 |
5874d1888597fc09b745d71222e2f7ad90ab51d3 | 1,851 | py | Python | src/tests/unit/security/session/manager.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/tests/unit/security/session/manager.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/tests/unit/security/session/manager.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
session manager module.
"""
from pyrin.security.session.manager import SessionManager as BaseSessionManager
from tests.unit.application.structs import CoreRequestMock
from tests.unit.security.session import SessionPackage
class SessionManager(BaseSessionManager):
"""
session manager class.
"""
package_class = SessionPackage
def __init__(self):
"""
initializes an instance of SessionManager.
"""
super().__init__()
self.__current_request_mock = None
def get_current_request(self):
"""
gets current request object.
:rtype: CoreRequestMock
"""
return self.__current_request_mock
def get_safe_current_request(self):
"""
gets current request object in a safe manner.
meaning that if the request does not exist in current context, it will
return a None object instead of raising an error.
:rtype: CoreRequestMock
"""
return self.__current_request_mock
def inject_new_request(self):
"""
injects a new request into current request object.
"""
self.__current_request_mock = CoreRequestMock()
def clear_current_request(self):
"""
clears current request object.
"""
self.__current_request_mock = None
def set_access_token(self, token):
"""
sets the given access token in current request.
:param str token: access token.
"""
self.__current_request_mock.headers['Authorization'] = token
def set_refresh_token(self, token):
"""
sets the given refresh token in current request.
:param str token: refresh token.
"""
self.__current_request_mock.headers['Cookie'] = f'Refresh-Auth={token}'
| 23.1375 | 79 | 0.638574 |
71786b2751fdc765f7483b98faf9c26897c6fbc7 | 3,230 | py | Python | app.py | Ho-Lab-Colostate/zhunt | d6277817d332e87f2b24a0a27f9f414d9a853365 | [
"MIT"
] | 4 | 2020-04-16T15:36:31.000Z | 2021-08-23T06:40:39.000Z | app.py | Ho-Lab-Colostate/zhunt | d6277817d332e87f2b24a0a27f9f414d9a853365 | [
"MIT"
] | 6 | 2020-04-16T16:42:54.000Z | 2020-04-28T19:03:38.000Z | app.py | Ho-Lab-Colostate/zhunt | d6277817d332e87f2b24a0a27f9f414d9a853365 | [
"MIT"
] | 1 | 2020-12-02T02:30:34.000Z | 2020-12-02T02:30:34.000Z | from flask import Flask, render_template, request, redirect, url_for, send_file, send_from_directory
import os #Needed for the GUI portion
import argparse
import subprocess #Needed to call on the C program
from werkzeug.utils import secure_filename
import smtplib
from string import Template
from datetime import date, datetime
import time
from plotly import graph_objs as go
import numpy as np
output_file=""
if not os.path.exists('uploads'):
os.makedirs('uploads')
user_info_file=open(os.getcwd()+'/uploads/users.txt','w+')
user_info_file.close()
UPLOAD_FOLDER = os.getcwd() + '/uploads'
ALLOWED_EXTENSIONS = {'txt', 'fasta'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
output_file=""
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
email=request.form.get("user_email")
command_line = "zhunt 12 6 12 ./uploads/" + filename
stream=os.popen(command_line)
output=stream.read()
output
output_file="/uploads/"+filename+".Z-SCORE"
user_info=open(os.getcwd()+'/uploads/users.txt','a')
now=datetime.now()
now=str(now)
user_info.write(now + ": ")
user_info.write(filename + " ")
user_info.write(email + "\n")
user_info.close()
for document in os.listdir('./templates/'):
if document.endswith('figure.html'):
os.remove('./templates/' + document)
return render_template("downloads.html", output_file=output_file)
return render_template("index.html")
@app.route('/return-file/', methods=['get','post'])
def downloadFile ():
filename = request.form['download_output_file']
path = "." + filename
return send_file(path, as_attachment=True)
@app.route('/see_graph/', methods=['get','post'])
def see_data ():
filename = request.form['output_file']
df_file="."+filename
df=np.loadtxt(df_file, skiprows=1, usecols=[2],dtype=str)
fig = go.Figure(data=go.Bar(y=df, marker_color="#1359c2"))
fig.update_layout(xaxis=dict(title="Sequence"),yaxis=dict(title="Z-SCORE"))
run_filename= filename[8:] + "_figure.html"
fig.write_html('./templates'+run_filename)
return render_template(run_filename)
@app.route('/research/',methods=['get','post'])
def research():
return render_template("research.html")
@app.route('/contact/',methods=['get','post'])
def contact():
return render_template("contact.html")
| 35.108696 | 100 | 0.632198 |
24b1c75768a7f24fa25b7a3dbf332e302e284ec5 | 518 | py | Python | Gathered CTF writeups/ctf-7867/2020/cyber_security_rumble/babypwn/baby-pwn-for-download/docker/main_old.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/ctf-7867/2020/cyber_security_rumble/babypwn/baby-pwn-for-download/docker/main_old.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/ctf-7867/2020/cyber_security_rumble/babypwn/baby-pwn-for-download/docker/main_old.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | def jump_to_flag():
# This would work if PIE was disabled
total_padding = 120
flag_text_padding = 112
payload = b'12345678' + b'\0'
padding = flag_text_padding - len(payload)
payload += b'A' * padding
flag_text_addr = 0x7fffffffce6d # +1 byte because it may have been overwritten?
payload += pwn.p64(flag_text_addr)
padding = total_padding - len(payload)
payload += b'B' * padding
desired_addr = 0x555555555193
payload += pwn.p64(desired_addr)
return payload
| 23.545455 | 83 | 0.673745 |
59d3860032cfff509eae642aaba92608503d829a | 14,054 | py | Python | pandas/computation/ops.py | nipunreddevil/pandas | 08b1b3edf9d470e804226927701954a39a73ab98 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2021-07-06T23:36:28.000Z | 2021-07-06T23:36:28.000Z | pandas/computation/ops.py | zouguangxian/pandas | 034c99491eddaed6964be6aabb727a4bbc24a71c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/computation/ops.py | zouguangxian/pandas | 034c99491eddaed6964be6aabb727a4bbc24a71c | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """Operator classes for eval.
"""
import re
import operator as op
from functools import partial
from itertools import product, islice, chain
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
_reductions = 'sum', 'prod'
_mathops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p', 'pow', 'div', 'sqrt',
'inv', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos', 'arctan',
'arccosh', 'arcsinh', 'arctanh', 'arctan2', 'abs')
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
self.is_local = text_type(name).startswith(_LOCAL_TAG)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return com.pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
key = self.name
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return np.isscalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return com.pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(com.pprint_thing(opr))
for opr in self.operands)
return com.pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(com.pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = com.pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
_cast_inplace(com.flatten(self), np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return com.pprint_thing('{0}({1})'.format(self.op, self.operand))
| 28.623218 | 79 | 0.569944 |
66e01fc5601d631a0208749750d68aa4fb98894e | 2,099 | py | Python | Python/examples/AdvancedParallelPressureControl.py | william-richards-idexx/fgt-SDK | 674b572c714302be561b08ba63ff3358dfa13cea | [
"Apache-2.0"
] | 20 | 2019-05-21T17:43:07.000Z | 2022-03-22T16:38:59.000Z | Python/examples/AdvancedParallelPressureControl.py | william-richards-idexx/fgt-SDK | 674b572c714302be561b08ba63ff3358dfa13cea | [
"Apache-2.0"
] | 28 | 2019-05-21T17:36:24.000Z | 2022-03-21T07:21:51.000Z | Python/examples/AdvancedParallelPressureControl.py | william-richards-idexx/fgt-SDK | 674b572c714302be561b08ba63ff3358dfa13cea | [
"Apache-2.0"
] | 7 | 2020-09-18T23:47:25.000Z | 2022-03-03T09:36:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Advanced Parallel Pressure Control example
This example shows how to send concurrent pressure orders using threads.
The Dll handles parallel calls, functions can be called simultaneously.
This demonstrates thread handling, same result is obtained using
successive calls as all function calls are executed instantly (within a few µs)
Hardware setup: At least two Fluigent pressure controllers (MFCS, MFCS-EZ or Flow EZ)
Copyright (c) Fluigent 2019. All Rights Reserved.
"""
# Print function for Python 2 compatibility
from __future__ import print_function
import time
import random
from threading import Thread
from Fluigent.SDK import fgt_init, fgt_close
from Fluigent.SDK import fgt_set_pressure, fgt_get_pressureRange
from Fluigent.SDK import fgt_get_pressureChannelCount
cancellationToken = False
def pressureTask(pressureIndex, period):
"""Set the selected pressure channel to a random value every [period] seconds"""
minPressure, maxPressure = fgt_get_pressureRange(pressureIndex)
while not cancellationToken:
pressureOrder = random.random()*maxPressure
fgt_set_pressure(pressureIndex, pressureOrder);
print("task {}: New pressure order: {:.2f} mbar".format(pressureIndex, pressureOrder))
time.sleep(period)
## Initialize the session
# This step is mandatory before starting threads at the same time
fgt_init()
## Create the threads
# Thread 1: drives the first pressure channel (index 0) every 2 seconds
thread1 = Thread(target = pressureTask, args = (0, 2))
# Thread 2: drives the second pressure channel (index 1) every 5 seconds
thread2 = Thread(target = pressureTask, args = (1, 5))
try:
# Start the threads
thread1.start()
thread2.start()
# Wait 10 seconds
time.sleep(10);
finally:
# Stop the threads
cancellationToken = True;
thread1.join();
thread2.join();
# Reset pressure on all channels
for pressure_index in range(fgt_get_pressureChannelCount()):
fgt_set_pressure(pressure_index, 0)
## Close the session
fgt_close()
| 33.854839 | 94 | 0.748928 |
bad184fde54486ffd714535f63c6d96a3f4a4732 | 14,674 | py | Python | util/topgen/merge.py | ladmangesh805/opentitan | f4126a4a2eab29f6621aced2bcfd912149ebdf2d | [
"Apache-2.0"
] | 1 | 2019-12-24T02:10:12.000Z | 2019-12-24T02:10:12.000Z | util/topgen/merge.py | BharathS11/opentitan | bfaf88058776f23428b317aad90ae93b822d6747 | [
"Apache-2.0"
] | null | null | null | util/topgen/merge.py | BharathS11/opentitan | bfaf88058776f23428b317aad90ae93b822d6747 | [
"Apache-2.0"
] | null | null | null | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import logging as log
from copy import deepcopy
from functools import partial
from .lib import *
import hjson
def amend_ip(top, ip):
""" Amend additional information into top module
Amended fields:
- size: register space
- clock: converted into ip_clock
- bus_device
- bus_host: none if doesn't exist
- available_input_list: empty list if doesn't exist
- available_output_list: empty list if doesn't exist
- available_inout_list: empty list if doesn't exist
- interrupt_list: empty list if doesn't exist
- (TBD) alert_list: empty list if doesn't exist
"""
ip_list_in_top = [x["name"].lower() for x in top["module"]]
ipname = ip["name"].lower()
if not ipname in ip_list_in_top:
log.info("TOP doens't use the IP %s. Skip" % ip["name"])
return
# Find index of the IP
ip_idx = ip_list_in_top.index(ipname)
ip_module = top["module"][ip_idx]
# Size
if not "size" in ip_module:
ip_module["size"] = "0x%x" % max(ip["gensize"], 0x1000)
elif ip_module["size"] < ip["gensize"]:
log.error(
"given 'size' field in IP %s is smaller than the required space" %
ip_module["name"])
# bus_device
ip_module["bus_device"] = ip["bus_device"]
# bus_host
if "bus_host" in ip and ip["bus_host"] != "":
ip_module["bus_host"] = ip["bus_host"]
else:
ip_module["bus_host"] = "none"
# available_input_list , available_output_list, available_inout_list
if "available_input_list" in ip:
ip_module["available_input_list"] = ip["available_input_list"]
for i in ip_module["available_input_list"]:
i.pop('desc', None)
i["type"] = "input"
i["width"] = int(i["width"])
else:
ip_module["available_input_list"] = []
if "available_output_list" in ip:
ip_module["available_output_list"] = ip["available_output_list"]
for i in ip_module["available_output_list"]:
i.pop('desc', None)
i["type"] = "output"
i["width"] = int(i["width"])
else:
ip_module["available_output_list"] = []
if "available_inout_list" in ip:
ip_module["available_inout_list"] = ip["available_inout_list"]
for i in ip_module["available_inout_list"]:
i.pop('desc', None)
i["type"] = "inout"
i["width"] = int(i["width"])
else:
ip_module["available_inout_list"] = []
# interrupt_list
if "interrupt_list" in ip:
ip_module["interrupt_list"] = ip["interrupt_list"]
for i in ip_module["interrupt_list"]:
i.pop('desc', None)
i["type"] = "interrupt"
i["width"] = int(i["width"])
else:
ip_module["interrupt_list"] = []
# (TBD) alert_list
# scan
if "scan" in ip:
ip_module["scan"] = ip["scan"]
else:
ip_module["scan"] = "false"
# TODO: Replace this part to be configurable from hjson or template
predefined_modules = {
"corei": "rv_core_ibex",
"cored": "rv_core_ibex",
"dm_sba": "rv_dm",
"debug_mem": "rv_dm"
}
def xbar_addhost(xbar, host):
# TODO: check if host is another crossbar
# Check and fetch host if exists in nodes
obj = list(filter(lambda node: node["name"] == host, xbar["nodes"]))
if len(obj) == 0:
log.warning(
"host %s doesn't exist in the node list. Using default values" %
host)
obj = {
"name": host,
"clock": xbar['clock'],
"type": "host",
"inst_type": "",
# The default matches RTL default
# pipeline_byp is don't care if pipeline is false
"pipeline": "true",
"pipeline_byp": "true"
}
topxbar["nodes"].append(obj)
else:
obj[0]["clock"] = xbar['clock']
obj[0]["inst_type"] = predefined_modules[
host] if host in predefined_modules else ""
obj[0]["pipeline"] = obj[0]["pipeline"] if "pipeline" in obj[
0] else "true"
obj[0]["pipeline_byp"] = obj[0]["pipeline_byp"] if obj[0][
"pipeline"] == "true" and "pipeline_byp" in obj[0] else "true"
def process_pipeline_var(node):
"""Add device nodes pipeline / pipeline_byp information
- Supply a default of true / true if not defined by xbar
"""
node["pipeline"] = node["pipeline"] if "pipeline" in node else "true"
node["pipeline_byp"] = node[
"pipeline_byp"] if "pipeline_byp" in node else "true"
def xbar_adddevice(top, xbar, device):
"""Add device nodes information
- clock: comes from module if exist. use main top clock for memory as of now
- inst_type: comes from module or memory if exist.
- base_addr: comes from module or memory, or assume rv_plic?
- size_byte: comes from module or memory
"""
deviceobj = list(
filter(lambda node: node["name"] == device,
top["module"] + top["memory"]))
nodeobj = list(filter(lambda node: node["name"] == device, xbar["nodes"]))
xbar_list = [x["name"] for x in top["xbar"] if x["name"] != xbar["name"]]
if len(deviceobj) == 0:
# doesn't exist,
# case 1: another xbar --> check in xbar list
if device in xbar_list and len(nodeobj) == 0:
log.error(
"Another crossbar %s needs to be specified in the 'nodes' list"
% device)
return
# case 2: predefined_modules (debug_mem, rv_plic)
# TODO: Find configurable solution not from predefined but from object?
elif device in predefined_modules:
if device == "debug_mem":
if len(nodeobj) == 0:
# Add new debug_mem
xbar["nodes"].append({
"name": "debug_mem",
"type": "device",
"clock": xbar['clock'],
"inst_type": predefined_modules["debug_mem"],
"base_addr": top["debug_mem_base_addr"],
"size_byte": "0x1000",
"pipeline" : "true",
"pipeline_byp" : "true"
}) # yapf: disable
else:
# Update if exists
node = nodeobj[0]
node["inst_type"] = predefined_modules["debug_mem"]
node["base_addr"] = top["debug_mem_base_addr"]
node["size_byte"] = "0x1000"
process_pipeline_var(node)
else:
log.error("device %s shouldn't be host type" % device)
return
# case 3: not defined
else:
log.error(
"device %s doesn't exist in 'module', 'memory', or predefined"
% device)
return
# Search object from module or memory
elif len(nodeobj) == 0:
# found in module or memory but node object doesn't exist.
xbar["nodes"].append({
"name" : device,
"type" : "device",
"clock" : deviceobj[0]["clock"],
"inst_type" : deviceobj[0]["type"],
"base_addr" : deviceobj[0]["base_addr"],
"size_byte": deviceobj[0]["size"],
"pipeline" : "true",
"pipeline_byp" : "true"
}) # yapf: disable
else:
# found and exist in the nodes too
node = nodeobj[0]
node["inst_type"] = deviceobj[0]["type"]
node["base_addr"] = deviceobj[0]["base_addr"]
node["size_byte"] = deviceobj[0]["size"]
process_pipeline_var(node)
def amend_xbar(top, xbar):
"""Amend crossbar informations to the top list
Amended fields
- clock: Adopt from module clock if exists
- inst_type: Module instance some module will be hard-coded
the tool searches module list and memory list then put here
- base_addr: from top["module"]
- size: from top["module"]
"""
xbar_list = [x["name"] for x in top["xbar"]]
if not xbar["name"] in xbar_list:
log.info(
"Xbar %s doesn't belong to the top %s. Check if the xbar doesn't need"
% (xbar["name"], top["name"]))
return
topxbar = list(
filter(lambda node: node["name"] == xbar["name"], top["xbar"]))[0]
topxbar["connections"] = deepcopy(xbar["connections"])
if "nodes" in xbar:
topxbar["nodes"] = deepcopy(xbar["nodes"])
else:
topxbar["nodes"] = []
topxbar["clock"] = xbar["clock"]
# Build nodes from 'connections'
device_nodes = set()
for host, devices in xbar["connections"].items():
# add host first
xbar_addhost(topxbar, host)
# add device if doesn't exist
device_nodes.update(devices)
log.info(device_nodes)
for device in device_nodes:
xbar_adddevice(top, topxbar, device)
def amend_interrupt(top):
"""Check interrupt_module if exists, or just use all modules
"""
if not "interrupt_module" in top:
top["interrupt_module"] = [x["name"] for x in top["module"]]
if not "interrupt" in top or top["interrupt"] == "":
top["interrupt"] = []
for m in top["interrupt_module"]:
ip = list(filter(lambda module: module["name"] == m, top["module"]))
if len(ip) == 0:
log.warning(
"Cannot find IP %s which is used in the interrupt_module" % m)
continue
log.info("Adding interrupts from module %s" % ip[0]["name"])
top["interrupt"] += list(
map(partial(add_prefix_to_signal, prefix=m.lower()),
ip[0]["interrupt_list"]))
def amend_pinmux_io(top):
""" Check dio_modules/ mio_modules. If not exists, add all modules to mio
"""
pinmux = top["pinmux"]
if not "dio_modules" in pinmux:
pinmux['dio_modules'] = []
# list out dedicated IO
pinmux['dio'] = []
for e in pinmux["dio_modules"]:
# Check name if it is module or signal
mname, sname = get_ms_name(e["name"])
# Parse how many signals
m = get_module_by_name(top, mname)
if sname != None:
signals = deepcopy([get_signal_by_name(m, sname)])
else:
# Get all module signals
signals = deepcopy(m["available_input_list"] +
m["available_output_list"] +
m["available_inout_list"])
sig_width = sum([s["width"] for s in signals])
# convert signal with module name
signals = list(
map(partial(add_prefix_to_signal, prefix=mname), signals))
# Parse how many pads are assigned
if not "pad" in e:
raise SystemExit("Should catch pad field in validate.py!")
total_width = 0
# pads are the list of individual pin, each entry is 1 bit width
pads = []
for p in e["pad"]:
pads += get_pad_list(p)
# check if #sig and #pads are matched
if len(pads) != sig_width:
raise SystemExit("# Pads and # Sig (%s) aren't same: %d" %
(mname, sig_width))
# add info to pads["dio"]
for s in signals:
p = pads[:s["width"]]
pads = pads[s["width"]:]
s["pad"] = p
pinmux["dio"].append(s)
dio_names = [p["name"] for p in pinmux["dio"]]
## Multiplexer IO
if not "mio_modules" in pinmux:
# Add all modules having available io to Multiplexer IO
pinmux["mio_modules"] = []
for m in top["module"]:
num_io = len(m["available_input_list"] +
m["available_output_list"] +
m["available_inout_list"])
if num_io != 0:
# Add if not in dio_modules
pinmux["mio_modules"].append(m["name"])
# List up the dedicated IO to exclude from inputs/outputs
# Add port list to `inputs` and `outputs` fields
if not "inputs" in pinmux:
pinmux["inputs"] = []
if not "outputs" in pinmux:
pinmux["outputs"] = []
if not "inouts" in pinmux:
pinmux["inouts"] = []
for e in pinmux["mio_modules"]:
tokens = e.split('.')
if len(tokens) not in [1, 2]:
raise SystemExit(
"Cannot parse signal/module in mio_modules {}".format(e))
# Add all ports from the module to input/outputs
m = get_module_by_name(top, tokens[0])
if m == None:
raise SystemExit("Module {} doesn't exist".format(tokens[0]))
if len(tokens) == 1:
pinmux["inputs"] += list(
filter(
lambda x: x["name"] not in dio_names,
map(
partial(add_prefix_to_signal,
prefix=m["name"].lower()),
m["available_input_list"])))
pinmux["outputs"] += list(
filter(
lambda x: x["name"] not in dio_names,
map(
partial(add_prefix_to_signal,
prefix=m["name"].lower()),
m["available_output_list"])))
pinmux["inouts"] += list(
filter(
lambda x: x["name"] not in dio_names,
map(
partial(add_prefix_to_signal,
prefix=m["name"].lower()),
m["available_inout_list"])))
elif len(tokens) == 2:
# Current version doesn't consider signal in mio_modules
# only in dio_modules
raise SystemExit(
"Curren version doesn't support signal in mio_modules {}".
format(e))
def merge_top(topcfg, ipobjs, xbarobjs):
gencfg = deepcopy(topcfg)
# Combine ip cfg into topcfg
for ip in ipobjs:
amend_ip(gencfg, ip)
# Combine the interrupt (should be processed prior to xbar)
amend_interrupt(gencfg)
# Creates input/output list in the pinmux
log.info("Processing PINMUX")
amend_pinmux_io(gencfg)
# Combine xbar into topcfg
for xbar in xbarobjs:
amend_xbar(gencfg, xbar)
# remove unwanted fields 'debug_mem_base_addr'
gencfg.pop('debug_mem_base_addr', None)
return gencfg
| 33.578947 | 82 | 0.54893 |
3a3a0ea1ef3c2497ce3d433d0c7a93e58357790b | 30,913 | py | Python | alembic/versions/1c7c31b1c31d_cbs_modify_tables_and_add_dictionaries.py | idosavion/anyway | ad88df478946722a95379eb85ba4b913de38111a | [
"BSD-3-Clause"
] | 1 | 2020-07-16T16:51:17.000Z | 2020-07-16T16:51:17.000Z | alembic/versions/1c7c31b1c31d_cbs_modify_tables_and_add_dictionaries.py | idosavion/anyway | ad88df478946722a95379eb85ba4b913de38111a | [
"BSD-3-Clause"
] | 5 | 2020-07-30T08:30:04.000Z | 2021-06-25T15:39:48.000Z | alembic/versions/1c7c31b1c31d_cbs_modify_tables_and_add_dictionaries.py | idosavion/anyway | ad88df478946722a95379eb85ba4b913de38111a | [
"BSD-3-Clause"
] | null | null | null | """cbs_modify_tables_and_add_dictionaries
Revision ID: 1c7c31b1c31d
Revises: 56b95d5826c
Create Date: 2018-09-30 01:10:18.193297
"""
# revision identifiers, used by Alembic.
revision = '1c7c31b1c31d'
down_revision = '56b95d5826c'
branch_labels = None
depends_on = None
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('accident_hour_raw',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('accident_hour_raw_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_accident_hour_raw_id'), 'accident_hour_raw', ['id'], unique=False)
op.create_table('accident_month',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('accident_month_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_accident_month_id'), 'accident_month', ['id'], unique=False)
op.create_table('accident_severity',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('accident_severity_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_accident_severity_id'), 'accident_severity', ['id'], unique=False)
op.create_table('accident_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('accident_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_accident_type_id'), 'accident_type', ['id'], unique=False)
op.create_table('age_group',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('age_group_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_age_group_id'), 'age_group', ['id'], unique=False)
op.create_table('columns_description',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('column_description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_columns_description_id'), 'columns_description', ['id'], unique=False)
op.create_table('cross_direction',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cross_direction_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_cross_direction_id'), 'cross_direction', ['id'], unique=False)
op.create_table('cross_location',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cross_location_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_cross_location_id'), 'cross_location', ['id'], unique=False)
op.create_table('cross_mode',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cross_mode_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_cross_mode_id'), 'cross_mode', ['id'], unique=False)
op.create_table('day_in_week',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('day_in_week_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_day_in_week_id'), 'day_in_week', ['id'], unique=False)
op.create_table('day_night',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('day_night_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_day_night_id'), 'day_night', ['id'], unique=False)
op.create_table('day_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('day_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_day_type_id'), 'day_type', ['id'], unique=False)
op.create_table('didnt_cross',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('didnt_cross_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_didnt_cross_id'), 'didnt_cross', ['id'], unique=False)
op.create_table('district',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('district_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_district_id'), 'district', ['id'], unique=False)
op.create_table('driving_directions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('driving_directions_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_driving_directions_id'), 'driving_directions', ['id'], unique=False)
op.create_table('engine_volume',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('engine_volume_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_engine_volume_id'), 'engine_volume', ['id'], unique=False)
op.create_table('geo_area',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('geo_area_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_geo_area_id'), 'geo_area', ['id'], unique=False)
op.create_table('hospital_time',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hospital_time_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_hospital_time_id'), 'hospital_time', ['id'], unique=False)
op.create_table('injured_position',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('injured_position_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_injured_position_id'), 'injured_position', ['id'], unique=False)
op.create_table('injured_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('injured_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_injured_type_id'), 'injured_type', ['id'], unique=False)
op.create_table('injury_severity',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('injury_severity_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_injury_severity_id'), 'injury_severity', ['id'], unique=False)
op.create_table('involved_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('involved_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_involved_type_id'), 'involved_type', ['id'], unique=False)
op.create_table('late_deceased',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('late_deceased_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_late_deceased_id'), 'late_deceased', ['id'], unique=False)
op.create_table('location_accuracy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('location_accuracy_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_location_accuracy_id'), 'location_accuracy', ['id'], unique=False)
op.create_table('medical_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('medical_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_medical_type_id'), 'medical_type', ['id'], unique=False)
op.create_table('minizipali_status',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('minizipali_status_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_minizipali_status_id'), 'minizipali_status', ['id'], unique=False)
op.create_table('multi_lane',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('multi_lane_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_multi_lane_id'), 'multi_lane', ['id'], unique=False)
op.create_table('natural_area',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('natural_area_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_natural_area_id'), 'natural_area', ['id'], unique=False)
op.create_table('object_distance',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('object_distance_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_object_distance_id'), 'object_distance', ['id'], unique=False)
op.create_table('one_lane',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('one_lane_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_one_lane_id'), 'one_lane', ['id'], unique=False)
op.create_table('police_unit',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('police_unit_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_police_unit_id'), 'police_unit', ['id'], unique=False)
op.create_table('population_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('population_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_population_type_id'), 'population_type', ['id'], unique=False)
op.create_table('provider_code',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('provider_code_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_provider_code_id'), 'provider_code', ['id'], unique=False)
op.create_table('region',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('region_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_region_id'), 'region', ['id'], unique=False)
op.create_table('release_dest',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('release_dest_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_release_dest_id'), 'release_dest', ['id'], unique=False)
op.create_table('road_control',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_control_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_control_id'), 'road_control', ['id'], unique=False)
op.create_table('road_intactness',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_intactness_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_intactness_id'), 'road_intactness', ['id'], unique=False)
op.create_table('road_light',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_light_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_light_id'), 'road_light', ['id'], unique=False)
op.create_table('road_object',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_object_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_object_id'), 'road_object', ['id'], unique=False)
op.create_table('road_shape',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_shape_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_shape_id'), 'road_shape', ['id'], unique=False)
op.create_table('road_sign',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_sign_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_sign_id'), 'road_sign', ['id'], unique=False)
op.create_table('road_surface',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_surface_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_surface_id'), 'road_surface', ['id'], unique=False)
op.create_table('road_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_type_id'), 'road_type', ['id'], unique=False)
op.create_table('road_width',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('road_width_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_road_width_id'), 'road_width', ['id'], unique=False)
op.create_table('safety_measures',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('safety_measures_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_safety_measures_id'), 'safety_measures', ['id'], unique=False)
op.create_table('safety_measures_use',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('safety_measures_use_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_safety_measures_use_id'), 'safety_measures_use', ['id'], unique=False)
op.create_table('sex',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sex_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_sex_id'), 'sex', ['id'], unique=False)
op.create_table('speed_limit',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('speed_limit_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_speed_limit_id'), 'speed_limit', ['id'], unique=False)
op.create_table('total_weight',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('total_weight_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_total_weight_id'), 'total_weight', ['id'], unique=False)
op.create_table('traffic_light',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('traffic_light_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_traffic_light_id'), 'traffic_light', ['id'], unique=False)
op.create_table('vehicle_attribution',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vehicle_attribution_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_vehicle_attribution_id'), 'vehicle_attribution', ['id'], unique=False)
op.create_table('vehicle_status',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vehicle_status_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_vehicle_status_id'), 'vehicle_status', ['id'], unique=False)
op.create_table('vehicle_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vehicle_type_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_vehicle_type_id'), 'vehicle_type', ['id'], unique=False)
op.create_table('weather',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('weather_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_weather_id'), 'weather', ['id'], unique=False)
op.create_table('yishuv_shape',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('yishuv_shape_hebrew', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_yishuv_shape_id'), 'yishuv_shape', ['id'], unique=False)
op.add_column(u'markers', sa.Column('accident_severity', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('accident_type', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('day_type', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('location_accuracy', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('non_urban_intersection_hebrew', sa.Text(), nullable=True))
op.add_column(u'markers', sa.Column('police_unit', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('road_intactness', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('road_shape', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('road_type', sa.Integer(), nullable=True))
op.add_column(u'markers', sa.Column('street1_hebrew', sa.Text(), nullable=True))
op.add_column(u'markers', sa.Column('street2_hebrew', sa.Text(), nullable=True))
op.add_column(u'markers', sa.Column('yishuv_name', sa.Text(), nullable=True))
op.drop_column(u'markers', 'roadShape')
op.drop_column(u'markers', 'severity')
op.drop_column(u'markers', 'intactness')
op.drop_column(u'markers', 'subtype')
op.drop_column(u'markers', 'roadType')
op.drop_column(u'markers', 'dayType')
op.drop_column(u'markers', 'locationAccuracy')
op.drop_column(u'markers', 'unit')
op.add_column(u'markers_no_location', sa.Column('accident_severity', sa.Integer(), nullable=True))
op.add_column(u'markers_no_location', sa.Column('accident_type', sa.Integer(), nullable=True))
op.add_column(u'markers_no_location', sa.Column('day_type', sa.Integer(), nullable=True))
op.add_column(u'markers_no_location', sa.Column('location_accuracy', sa.Integer(), nullable=True))
op.add_column(u'markers_no_location', sa.Column('police_unit', sa.Integer(), nullable=True))
op.add_column(u'markers_no_location', sa.Column('road_intactness', sa.Integer(), nullable=True))
op.add_column(u'markers_no_location', sa.Column('road_shape', sa.Integer(), nullable=True))
op.add_column(u'markers_no_location', sa.Column('road_type', sa.Integer(), nullable=True))
op.drop_column(u'markers_no_location', 'roadShape')
op.drop_column(u'markers_no_location', 'severity')
op.drop_column(u'markers_no_location', 'intactness')
op.drop_column(u'markers_no_location', 'subtype')
op.drop_column(u'markers_no_location', 'roadType')
op.drop_column(u'markers_no_location', 'dayType')
op.drop_column(u'markers_no_location', 'locationAccuracy')
op.drop_column(u'markers_no_location', 'unit')
op.drop_index('idx_schools_geom', table_name='schools')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('idx_schools_geom', 'schools', ['geom'], unique=False)
op.add_column(u'markers_no_location', sa.Column('unit', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers_no_location',
sa.Column('locationAccuracy', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers_no_location', sa.Column('dayType', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers_no_location', sa.Column('roadType', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers_no_location', sa.Column('subtype', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers_no_location', sa.Column('intactness', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers_no_location', sa.Column('severity', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers_no_location', sa.Column('roadShape', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column(u'markers_no_location', 'road_type')
op.drop_column(u'markers_no_location', 'road_shape')
op.drop_column(u'markers_no_location', 'road_intactness')
op.drop_column(u'markers_no_location', 'police_unit')
op.drop_column(u'markers_no_location', 'location_accuracy')
op.drop_column(u'markers_no_location', 'day_type')
op.drop_column(u'markers_no_location', 'accident_type')
op.drop_column(u'markers_no_location', 'accident_severity')
op.add_column(u'markers', sa.Column('unit', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers', sa.Column('locationAccuracy', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers', sa.Column('dayType', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers', sa.Column('roadType', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers', sa.Column('subtype', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers', sa.Column('intactness', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers', sa.Column('severity', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column(u'markers', sa.Column('roadShape', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column(u'markers', 'yishuv_name')
op.drop_column(u'markers', 'street2_hebrew')
op.drop_column(u'markers', 'street1_hebrew')
op.drop_column(u'markers', 'road_type')
op.drop_column(u'markers', 'road_shape')
op.drop_column(u'markers', 'road_intactness')
op.drop_column(u'markers', 'police_unit')
op.drop_column(u'markers', 'non_urban_intersection_hebrew')
op.drop_column(u'markers', 'location_accuracy')
op.drop_column(u'markers', 'day_type')
op.drop_column(u'markers', 'accident_type')
op.drop_column(u'markers', 'accident_severity')
op.drop_index(op.f('ix_yishuv_shape_id'), table_name='yishuv_shape')
op.drop_table('yishuv_shape')
op.drop_index(op.f('ix_weather_id'), table_name='weather')
op.drop_table('weather')
op.drop_index(op.f('ix_vehicle_type_id'), table_name='vehicle_type')
op.drop_table('vehicle_type')
op.drop_index(op.f('ix_vehicle_status_id'), table_name='vehicle_status')
op.drop_table('vehicle_status')
op.drop_index(op.f('ix_vehicle_attribution_id'), table_name='vehicle_attribution')
op.drop_table('vehicle_attribution')
op.drop_index(op.f('ix_traffic_light_id'), table_name='traffic_light')
op.drop_table('traffic_light')
op.drop_index(op.f('ix_total_weight_id'), table_name='total_weight')
op.drop_table('total_weight')
op.drop_index(op.f('ix_speed_limit_id'), table_name='speed_limit')
op.drop_table('speed_limit')
op.drop_index(op.f('ix_sex_id'), table_name='sex')
op.drop_table('sex')
op.drop_index(op.f('ix_safety_measures_use_id'), table_name='safety_measures_use')
op.drop_table('safety_measures_use')
op.drop_index(op.f('ix_safety_measures_id'), table_name='safety_measures')
op.drop_table('safety_measures')
op.drop_index(op.f('ix_road_width_id'), table_name='road_width')
op.drop_table('road_width')
op.drop_index(op.f('ix_road_type_id'), table_name='road_type')
op.drop_table('road_type')
op.drop_index(op.f('ix_road_surface_id'), table_name='road_surface')
op.drop_table('road_surface')
op.drop_index(op.f('ix_road_sign_id'), table_name='road_sign')
op.drop_table('road_sign')
op.drop_index(op.f('ix_road_shape_id'), table_name='road_shape')
op.drop_table('road_shape')
op.drop_index(op.f('ix_road_object_id'), table_name='road_object')
op.drop_table('road_object')
op.drop_index(op.f('ix_road_light_id'), table_name='road_light')
op.drop_table('road_light')
op.drop_index(op.f('ix_road_intactness_id'), table_name='road_intactness')
op.drop_table('road_intactness')
op.drop_index(op.f('ix_road_control_id'), table_name='road_control')
op.drop_table('road_control')
op.drop_index(op.f('ix_release_dest_id'), table_name='release_dest')
op.drop_table('release_dest')
op.drop_index(op.f('ix_region_id'), table_name='region')
op.drop_table('region')
op.drop_index(op.f('ix_provider_code_id'), table_name='provider_code')
op.drop_table('provider_code')
op.drop_index(op.f('ix_population_type_id'), table_name='population_type')
op.drop_table('population_type')
op.drop_index(op.f('ix_police_unit_id'), table_name='police_unit')
op.drop_table('police_unit')
op.drop_index(op.f('ix_one_lane_id'), table_name='one_lane')
op.drop_table('one_lane')
op.drop_index(op.f('ix_object_distance_id'), table_name='object_distance')
op.drop_table('object_distance')
op.drop_index(op.f('ix_natural_area_id'), table_name='natural_area')
op.drop_table('natural_area')
op.drop_index(op.f('ix_multi_lane_id'), table_name='multi_lane')
op.drop_table('multi_lane')
op.drop_index(op.f('ix_minizipali_status_id'), table_name='minizipali_status')
op.drop_table('minizipali_status')
op.drop_index(op.f('ix_medical_type_id'), table_name='medical_type')
op.drop_table('medical_type')
op.drop_index(op.f('ix_location_accuracy_id'), table_name='location_accuracy')
op.drop_table('location_accuracy')
op.drop_index(op.f('ix_late_deceased_id'), table_name='late_deceased')
op.drop_table('late_deceased')
op.drop_index(op.f('ix_involved_type_id'), table_name='involved_type')
op.drop_table('involved_type')
op.drop_index(op.f('ix_injury_severity_id'), table_name='injury_severity')
op.drop_table('injury_severity')
op.drop_index(op.f('ix_injured_type_id'), table_name='injured_type')
op.drop_table('injured_type')
op.drop_index(op.f('ix_injured_position_id'), table_name='injured_position')
op.drop_table('injured_position')
op.drop_index(op.f('ix_hospital_time_id'), table_name='hospital_time')
op.drop_table('hospital_time')
op.drop_index(op.f('ix_geo_area_id'), table_name='geo_area')
op.drop_table('geo_area')
op.drop_index(op.f('ix_engine_volume_id'), table_name='engine_volume')
op.drop_table('engine_volume')
op.drop_index(op.f('ix_driving_directions_id'), table_name='driving_directions')
op.drop_table('driving_directions')
op.drop_index(op.f('ix_district_id'), table_name='district')
op.drop_table('district')
op.drop_index(op.f('ix_didnt_cross_id'), table_name='didnt_cross')
op.drop_table('didnt_cross')
op.drop_index(op.f('ix_day_type_id'), table_name='day_type')
op.drop_table('day_type')
op.drop_index(op.f('ix_day_night_id'), table_name='day_night')
op.drop_table('day_night')
op.drop_index(op.f('ix_day_in_week_id'), table_name='day_in_week')
op.drop_table('day_in_week')
op.drop_index(op.f('ix_cross_mode_id'), table_name='cross_mode')
op.drop_table('cross_mode')
op.drop_index(op.f('ix_cross_location_id'), table_name='cross_location')
op.drop_table('cross_location')
op.drop_index(op.f('ix_cross_direction_id'), table_name='cross_direction')
op.drop_table('cross_direction')
op.drop_index(op.f('ix_columns_description_id'), table_name='columns_description')
op.drop_table('columns_description')
op.drop_index(op.f('ix_age_group_id'), table_name='age_group')
op.drop_table('age_group')
op.drop_index(op.f('ix_accident_type_id'), table_name='accident_type')
op.drop_table('accident_type')
op.drop_index(op.f('ix_accident_severity_id'), table_name='accident_severity')
op.drop_table('accident_severity')
op.drop_index(op.f('ix_accident_month_id'), table_name='accident_month')
op.drop_table('accident_month')
op.drop_index(op.f('ix_accident_hour_raw_id'), table_name='accident_hour_raw')
op.drop_table('accident_hour_raw')
### end Alembic commands ###
| 56.930018 | 116 | 0.622392 |
3628ecfddbb23bb365f08b2666afe97661500eb4 | 9,351 | py | Python | rads/utility.py | ccarocean/python-rads | fec1cfccbdafb7e372b66a76132c59f3d2a6beb3 | [
"MIT"
] | null | null | null | rads/utility.py | ccarocean/python-rads | fec1cfccbdafb7e372b66a76132c59f3d2a6beb3 | [
"MIT"
] | 3 | 2019-06-27T19:00:35.000Z | 2020-03-07T09:43:24.000Z | rads/utility.py | ccarocean/python-rads | fec1cfccbdafb7e372b66a76132c59f3d2a6beb3 | [
"MIT"
] | 1 | 2019-05-31T01:04:26.000Z | 2019-05-31T01:04:26.000Z | """Utility functions."""
import datetime
import io
import os
from typing import IO, Any, List, Optional, Union, cast
from wrapt import ObjectProxy # type: ignore
from .constants import EPOCH
from .typing import PathLike, PathLikeOrFile
__all__ = [
"ensure_open",
"filestring",
"isio",
"xor",
"contains_sublist",
"merge_sublist",
"delete_sublist",
"fortran_float",
"datetime_to_timestamp",
"timestamp_to_datetime",
]
class _NoCloseIOWrapper(ObjectProxy): # type: ignore
def __exit__(self, *args: object, **kwargs: object) -> None:
pass
def close(self) -> None:
pass
def ensure_open(
file: PathLikeOrFile,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
closefd: bool = True,
closeio: bool = False,
) -> IO[Any]:
"""Open file or leave file-like object open.
This function behaves identically to :func:`open` but can also accept a
file-like object in the `file` parameter.
:param file:
A path-like object giving the pathname (absolute or relative to the
current working directory) of the file to be opened or an integer file
descriptor of the file to be wrapped or a file-like object.
.. note::
If a file descriptor is given, it is closed when the
returned I/O object is closed, unless `closefd` is set to False.
.. note::
If a file-like object is given closing the returned I/O object will
not close the given file unless `closeio` is set to True.
:param mode:
See :func:`open`
:param buffering:
See :func:`open`
:param encoding:
See :func:`open`
:param errors:
See :func:`open`
:param newline:
See :func:`open`
:param closefd:
See :func:`open`
:param closeio:
If set to True then if `file` is a file like object it will be closed
when either the __exit__ or close methods are called on the returned
I/O object. By default these methods will be ignored when `file` is
a file-like object.
:return:
An I/O object or the original file-like object if `file` is a file-like
object. If this is the original file-like object and `closeio` is set
to False (the default) then it's close and __exit__ methods will be
no-ops.
.. seealso:: :func:`open`
"""
if hasattr(file, "read"):
if not closeio:
return cast(IO[Any], _NoCloseIOWrapper(file))
return cast(IO[Any], file)
return open(
cast(Union[PathLike, int], file),
mode,
buffering,
encoding,
errors,
newline,
closefd,
)
def filestring(file: PathLikeOrFile) -> Optional[str]:
"""Convert a PathLikeOrFile to a string.
:param file:
file or file-like object to get the string for.
:return:
The string representation of the filename or path. If it cannot get
the name/path of the given file or file-like object or cannot convert
it to a str, None will be returned.
"""
if isinstance(file, int):
return None
if hasattr(file, "read"):
try:
return cast(IO[Any], file).name
except AttributeError:
return None
if not isinstance(file, (str, bytes)):
file = os.fspath(cast(PathLike, file))
if isinstance(file, str):
return file
if isinstance(file, bytes):
try:
return cast(bytes, file).decode("utf-8")
except UnicodeDecodeError:
return None
raise TypeError(f"'{type(file)}' is not a file like object")
def isio(obj: Any, *, read: bool = False, write: bool = False) -> bool:
"""Determine if object is IO like and is read and/or write.
.. note::
Falls back to :code:`isinstnace(obj, io.IOBase)` if neither `read` nor
`write` is True.
:param obj:
Object to check if it is an IO like object.
:param read:
Require `obj` to be readable if True.
:param write:
Require `obj` to be writable if True.
:return:
True if the given `obj` is readable and/or writeable as defined by the
`read` and `write` arguments.
"""
if read or write:
return (not read or hasattr(obj, "read")) and (
not write or hasattr(obj, "write")
)
return isinstance(obj, io.IOBase)
def xor(a: bool, b: bool) -> bool:
"""Boolean XOR operator.
This implements the XOR boolean operator and has the following truth table:
===== ===== =======
a b a XOR b
===== ===== =======
True True False
True False True
False True True
False False False
===== ===== =======
:param a:
First boolean value.
:param b:
Second boolean value.
:return:
The result of `a` XOR `b` from the truth table above.
"""
return (a and not b) or (not a and b)
def contains_sublist(list_: List[Any], sublist: List[Any]) -> bool:
"""Determine if a `list` contains a `sublist`.
:param list_:
list to search for the `sublist` in.
:param sublist:
Sub list to search for.
:return:
True if `list` contains `sublist`.
"""
# Adapted from: https://stackoverflow.com/a/12576755
if not sublist:
return False
for i in range(len(list_)):
if list_[i] == sublist[0] and list_[i : i + len(sublist)] == sublist:
return True
return False
def merge_sublist(list_: List[Any], sublist: List[Any]) -> List[Any]:
"""Merge a `sublist` into a given `list_`.
:param list_:
List to merge `sublist` into.
:param sublist:
Sublist to merge into `list_`
:return:
A copy of `list_` with `sublist` at the end if `sublist` is not a
sublist of `list_`. Otherwise, a copy of `list_` is returned
unchanged.
"""
if contains_sublist(list_, sublist):
return list_[:]
return list_ + sublist
def delete_sublist(list_: List[Any], sublist: List[Any]) -> List[Any]:
"""Remove a `sublist` from the given `list_`.
:param list_:
List to remove the `sublist` from.
:param sublist:
Sublist to remove from `list_`.
:return:
A copy of `list_` with the `sublist` removed.
"""
if not sublist:
return list_[:]
for i in range(len(list_)):
if list_[i] == sublist[0] and list_[i : i + len(sublist)] == sublist:
return list_[:i] + list_[i + len(sublist) :]
return list_[:]
def fortran_float(string: str) -> float:
"""Construct :class:`float` from Fortran style float strings.
This function can convert strings to floats in all of the formats below:
* ``3.14e10`` (also parsable with :class:`float`)
* ``3.14E10`` (also parsable with :class:`float`)
* ``3.14d10``
* ``3.14D10``
* ``3.14e+10`` (also parsable with :class:`float`)
* ``3.14E+10`` (also parsable with :class:`float`)
* ``3.14d+10``
* ``3.14D+10``
* ``3.14e-10`` (also parsable with :class:`float`)
* ``3.14E-10`` (also parsable with :class:`float`)
* ``3.14d-10``
* ``3.14D-10``
* ``3.14+100``
* ``3.14-100``
.. note::
Because RADS was written in Fortran, exponent characters in
configuration and passindex files sometimes use 'D' or 'd' as
the exponent separator instead of 'E' or 'e'.
.. warning::
If you are Fortran developer stop using 'Ew.d' and 'Ew.dDe' formats
and use 'Ew.dEe' instead. The first two are not commonly supported
by other languages while the last version is the standard for nearly
all languages. Ok, rant over.
:param string:
String to attempt to convert to a float.
:return:
The float parsed from the given `string`.
:raises ValueError:
If `string` does not represent a valid float.
"""
try:
return float(string)
except ValueError as err:
try:
return float(string.replace("d", "e").replace("D", "E"))
except ValueError:
try:
return float(string.replace("+", "e+").replace("-", "e-"))
except ValueError:
raise err
def datetime_to_timestamp(
time: datetime.datetime, *, epoch: datetime.datetime = EPOCH
) -> float:
"""Convert datetime object to timestamp relative to an epoch.
:param time:
Date and time.
:param epoch:
Date and time of epoch. Defaults to the RADS epoch.
:return:
The number of seconds between the `epoch` and the given `time`.
"""
return (time - epoch).total_seconds()
def timestamp_to_datetime(
seconds: float, *, epoch: datetime.datetime = EPOCH
) -> datetime.datetime:
"""Convert timestamp relative to an epoch to a datetime.
:param seconds:
Seconds since the given `epoch`.
:param epoch:
Date and time of epoch. Defaults to the RADS epoch.
:return:
Date and time corresponding to the given `seconds` since the `epoch`.
"""
return epoch + datetime.timedelta(seconds=seconds)
| 28.509146 | 79 | 0.595551 |
9d7d4763ff5bcc621d4fd625be1835d42698db6e | 6,866 | py | Python | antistasi_logbook/gui/widgets/data_view_widget/type_fields/standard_type_fields.py | Giddius/Antistasi_Logbook | b2b520db1a54df484984876c7dfdb724703fed77 | [
"MIT"
] | 2 | 2022-01-12T22:45:56.000Z | 2022-03-10T14:23:36.000Z | antistasi_logbook/gui/widgets/data_view_widget/type_fields/standard_type_fields.py | Giddius/Antistasi_Logbook | b2b520db1a54df484984876c7dfdb724703fed77 | [
"MIT"
] | null | null | null | antistasi_logbook/gui/widgets/data_view_widget/type_fields/standard_type_fields.py | Giddius/Antistasi_Logbook | b2b520db1a54df484984876c7dfdb724703fed77 | [
"MIT"
] | null | null | null | """
WiP.
Soon.
"""
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
from typing import Mapping, Iterable
from pathlib import Path
from functools import cached_property
# * Qt Imports --------------------------------------------------------------------------------------->
from PySide6.QtGui import QFont, QColor
from PySide6.QtCore import Qt, QSize
from PySide6.QtWidgets import QLabel, QListWidget, QTreeWidget, QTreeWidgetItem
# * Gid Imports ----------------------------------------------------------------------------------------->
from gidapptools import get_logger
from gidapptools.general_helper.typing_helper import implements_protocol
# * Local Imports --------------------------------------------------------------------------------------->
from antistasi_logbook.gui.resources.antistasi_logbook_resources_accessor import AllResourceItems
from antistasi_logbook.gui.widgets.data_view_widget.type_fields.base_type_field import TypeFieldProtocol
# endregion[Imports]
# region [TODO]
# endregion [TODO]
# region [Logging]
# endregion[Logging]
# region [Constants]
THIS_FILE_DIR = Path(__file__).parent.absolute()
log = get_logger(__name__)
# endregion[Constants]
@implements_protocol(TypeFieldProtocol)
class BoolTypeField(QLabel):
___typus___: type = bool
def __init__(self, parent=None):
super().__init__(parent=parent)
# self.setLayoutDirection(Qt.RightToLeft)
self.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self._image_size: QSize = QSize(50, 50)
self.image_table = {True: AllResourceItems.check_mark_green_image.get_as_pixmap(),
False: AllResourceItems.close_cancel_image.get_as_pixmap()}
def set_size(self, w: int, h: int) -> None:
self._image_size = QSize(w, h)
if self.pixmap() is not None:
self.setPixmap(self.pixmap().scaled(self._image_size, Qt.KeepAspectRatioByExpanding))
def set_value(self, value: bool) -> None:
self.clear()
pixmap = self.image_table.get(value, None)
if pixmap is None:
self.setText('-')
else:
if self._image_size is not None:
pixmap = pixmap.scaled(self._image_size, Qt.KeepAspectRatioByExpanding)
self.setPixmap(pixmap)
@classmethod
def add_to_type_field_table(cls, table: dict):
table[cls.___typus___] = cls
return table
@implements_protocol(TypeFieldProtocol)
class StringTypeField(QLabel):
___typus___: type = str
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setLayoutDirection(Qt.RightToLeft)
self.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.setTextFormat(Qt.MarkdownText)
def set_size(self, h, w):
pass
def set_value(self, value: str):
self.setText(f"`{value}`")
@classmethod
def add_to_type_field_table(cls, table: dict):
table[cls.___typus___] = cls
return table
@implements_protocol(TypeFieldProtocol)
class IntTypeField(StringTypeField):
___typus___: type = int
def set_value(self, value: int):
return super().set_value(str(value))
@implements_protocol(TypeFieldProtocol)
class FloatTypeField(StringTypeField):
___typus___: type = float
def set_value(self, value: float):
return super().set_value(str(value))
@implements_protocol(TypeFieldProtocol)
class ListTypeField(QListWidget):
___typus___: type = list
def __init__(self, parent=None):
super().__init__(parent=parent)
self.values = None
self.setup()
def setup(self):
self.setItemAlignment(Qt.AlignCenter)
def set_size(self, h, w):
pass
def set_value(self, value: list):
self.values = value
self.addItems(str(i) for i in value)
@classmethod
def add_to_type_field_table(cls, table: dict):
table[cls.___typus___] = cls
return table
@implements_protocol(TypeFieldProtocol)
class DictTypeField(QTreeWidget):
___typus___: type = dict
bool_images = {True: AllResourceItems.check_mark_green_image.get_as_icon(),
False: AllResourceItems.close_black_image.get_as_icon()}
def __init__(self, parent=None):
super().__init__(parent=parent)
self.values = None
self.setup()
@cached_property
def bool_font(self) -> QFont:
font: QFont = self.font()
font.setBold(True)
return font
@cached_property
def key_font(self) -> QFont:
font: QFont = self.font()
font.setBold(True)
return font
def setup(self):
self.setRootIsDecorated(True)
self.header().setHidden(True)
def set_size(self, h, w):
pass
def _fill_item(self, item: QTreeWidgetItem, value):
item.setExpanded(True)
if isinstance(value, Mapping):
for key, val in value.items():
child = QTreeWidgetItem()
child.setText(0, str(key))
child.setFont(0, self.key_font)
item.addChild(child)
self._fill_item(child, val)
elif isinstance(value, Iterable) and not isinstance(value, str):
for val in value:
child = QTreeWidgetItem()
item.addChild(child)
if isinstance(val, Mapping):
child.setText(0, '[dict]')
self._fill_item(child, val)
elif isinstance(value, Iterable) and not isinstance(value, str):
child.setText(0, '[list]')
self._fill_item(child, val)
elif isinstance(val, bool):
child.setFont(0, self.bool_font)
if val is True:
child.setForeground(0, QColor(0, 225, 0, 200))
child.setIcon(0, self.bool_images[True])
elif val is False:
child.setForeground(0, QColor(225, 0, 0, 200))
child.setIcon(0, self.bool_images[False])
child.setText(0, str(val))
else:
child.setText(0, str(val))
child.setExpanded(True)
else:
child = QTreeWidgetItem()
child.setText(0, str(value))
item.addChild(child)
def fill_widget(self, value: dict):
self.clear()
self._fill_item(self.invisibleRootItem(), value)
def set_value(self, value: dict):
self.values = value
self.fill_widget(value)
@classmethod
def add_to_type_field_table(cls, table: dict):
table[cls.___typus___] = cls
return table
# region[Main_Exec]
if __name__ == '__main__':
pass
# endregion[Main_Exec]
| 30.114035 | 106 | 0.601078 |
b25cf9e04d7613b3566d3f034e53f3a9e08950eb | 280 | py | Python | tools/telemetry/unittest_data/discoverable_classes/discover_dummyclass.py | nagineni/chromium-crosswalk | 5725642f1c67d0f97e8613ec1c3e8107ab53fdf8 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | tools/telemetry/unittest_data/discoverable_classes/discover_dummyclass.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-02-14T21:55:58.000Z | 2017-02-14T21:55:58.000Z | tools/telemetry/unittest_data/discoverable_classes/discover_dummyclass.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A dummy exception subclass used by core/discover.py's unit tests."""
class DummyException(Exception):
pass
| 31.111111 | 72 | 0.753571 |
0378f19db9f04c5185639e004ae917f3d9e2c2c0 | 1,056 | py | Python | scripts/merge_gps.py | hyaguchi947d/gopro_gps_ui | 3e9184ec0f47193eb3a48485784033ec33bef33e | [
"MIT"
] | 1 | 2019-12-23T10:11:56.000Z | 2019-12-23T10:11:56.000Z | scripts/merge_gps.py | hyaguchi947d/gopro_gps_ui | 3e9184ec0f47193eb3a48485784033ec33bef33e | [
"MIT"
] | null | null | null | scripts/merge_gps.py | hyaguchi947d/gopro_gps_ui | 3e9184ec0f47193eb3a48485784033ec33bef33e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import json
if __name__ == "__main__":
if len(sys.argv) < 4:
print("usage: merge_gps.py <DIR> <PREFIX> <POSTFIX>")
sys.exit(1)
dirname = sys.argv[1]
prefix = sys.argv[2]
postfix = sys.argv[3]
gps_data = {"1":{"streams":{"GPS5":{"samples":[]}}}}
# gps_data = {"data":[]} # old format
idx = 1
filename = "%s/%s%02d%s.json" % (dirname, prefix, idx, postfix)
while(os.path.isfile(filename)):
print(filename)
with open(filename) as f:
data = json.load(f)
# gps_data["data"].extend(filter(lambda gps : "gps_fix" in gps, data["data"])) # old format
if "GPS5" in data["1"]["streams"]:
gps_data["1"]["streams"]["GPS5"]["samples"].extend(data["1"]["streams"]["GPS5"]["samples"])
idx += 1
filename = "%s/%s%02d%s.json" % (dirname, prefix, idx, postfix)
outfile = "%s/%sXX%s.json" % (dirname, prefix, postfix)
with open(outfile, "w") as f:
json.dump(gps_data, f)
| 27.789474 | 107 | 0.549242 |
87b717d73ebc4d7fe995ff0dee159c0c7ee3a8a8 | 37 | py | Python | avalon/tools/html_server/__init__.py | Yowza-Animation/avalon-core | 0a423bfb6c92e8c06dcf5cc61ba04494fb0bac85 | [
"MIT"
] | null | null | null | avalon/tools/html_server/__init__.py | Yowza-Animation/avalon-core | 0a423bfb6c92e8c06dcf5cc61ba04494fb0bac85 | [
"MIT"
] | null | null | null | avalon/tools/html_server/__init__.py | Yowza-Animation/avalon-core | 0a423bfb6c92e8c06dcf5cc61ba04494fb0bac85 | [
"MIT"
] | null | null | null | from . import app
__all__ = ["app"]
| 9.25 | 17 | 0.621622 |
41f17858a786f548f26fb87802ac3e2fb2d4d2a3 | 547 | py | Python | manage.py | Madjura/text-entailment | efaf5bd369971f5742eb3bad9ca998af402d0bcb | [
"MIT"
] | null | null | null | manage.py | Madjura/text-entailment | efaf5bd369971f5742eb3bad9ca998af402d0bcb | [
"MIT"
] | null | null | null | manage.py | Madjura/text-entailment | efaf5bd369971f5742eb3bad9ca998af402d0bcb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "text_entailment.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.1875 | 79 | 0.691042 |
0ef0896df8d4460a8c3426fcb91a208a7c6850d6 | 76,989 | py | Python | pandas/core/window/rolling.py | faisal-deepsource/pandas | df91d4a9c9fee61591ebeecc851b50f7df79c83b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-06-01T15:28:21.000Z | 2021-06-01T15:28:21.000Z | pandas/core/window/rolling.py | faisal-deepsource/pandas | df91d4a9c9fee61591ebeecc851b50f7df79c83b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/window/rolling.py | faisal-deepsource/pandas | df91d4a9c9fee61591ebeecc851b50f7df79c83b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from __future__ import annotations
import copy
from datetime import timedelta
from functools import partial
import inspect
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
)
import warnings
import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
to_offset,
)
import pandas._libs.window.aggregations as window_aggregations
from pandas._typing import (
ArrayLike,
Axis,
FrameOrSeries,
FrameOrSeriesUnion,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_integer,
is_list_like,
is_scalar,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.missing import notna
from pandas.core.algorithms import factorize
from pandas.core.apply import ResamplerWindowApply
from pandas.core.base import (
DataError,
SelectionMixin,
)
import pandas.core.common as com
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
MultiIndex,
PeriodIndex,
TimedeltaIndex,
)
from pandas.core.internals import ArrayManager
from pandas.core.reshape.concat import concat
from pandas.core.util.numba_ import (
NUMBA_FUNC_CACHE,
maybe_use_numba,
)
from pandas.core.window.common import (
flex_binary_moment,
zsqrt,
)
from pandas.core.window.doc import (
_shared_docs,
args_compat,
create_section_header,
kwargs_compat,
kwargs_scipy,
numba_notes,
template_header,
template_returns,
template_see_also,
window_agg_numba_parameters,
window_apply_parameters,
)
from pandas.core.window.indexers import (
BaseIndexer,
FixedWindowIndexer,
GroupbyIndexer,
VariableWindowIndexer,
)
from pandas.core.window.numba_ import (
generate_manual_numpy_nan_agg_with_axis,
generate_numba_apply_func,
generate_numba_table_func,
)
if TYPE_CHECKING:
from pandas import (
DataFrame,
Series,
)
from pandas.core.groupby.ops import BaseGrouper
from pandas.core.internals import Block # noqa:F401
class BaseWindow(SelectionMixin):
"""Provides utilities for performing windowing operations."""
_attributes: list[str] = []
exclusions: frozenset[Hashable] = frozenset()
_on: Index
def __init__(
self,
obj: FrameOrSeries,
window=None,
min_periods: int | None = None,
center: bool = False,
win_type: str | None = None,
axis: Axis = 0,
on: str | Index | None = None,
closed: str | None = None,
method: str = "single",
*,
selection=None,
):
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
# TODO: Change this back to self.win_type once deprecation is enforced
self._win_type = win_type
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.method = method
self._win_freq_i8 = None
if self.on is None:
if self.axis == 0:
self._on = self.obj.index
else:
# i.e. self.axis == 1
self._on = self.obj.columns
elif isinstance(self.on, Index):
self._on = self.on
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
self._on = Index(self.obj[self.on])
else:
raise ValueError(
f"invalid on specified as {self.on}, "
"must be a column (of DataFrame), an Index or None"
)
self._selection = selection
self.validate()
@property
def win_type(self):
if self._win_freq_i8 is not None:
warnings.warn(
"win_type will no longer return 'freq' in a future version. "
"Check the type of self.window instead.",
FutureWarning,
stacklevel=2,
)
return "freq"
return self._win_type
@property
def is_datetimelike(self) -> bool:
warnings.warn(
"is_datetimelike is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
return self._win_freq_i8 is not None
def validate(self) -> None:
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None:
if not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
elif self.min_periods < 0:
raise ValueError("min_periods must be >= 0")
elif is_integer(self.window) and self.min_periods > self.window:
raise ValueError(
f"min_periods {self.min_periods} must be <= window {self.window}"
)
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or 'neither'")
if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):
raise TypeError(f"invalid type: {type(self)}")
if isinstance(self.window, BaseIndexer):
# Validate that the passed BaseIndexer subclass has
# a get_window_bounds with the correct signature.
get_window_bounds_signature = inspect.signature(
self.window.get_window_bounds
).parameters.keys()
expected_signature = inspect.signature(
BaseIndexer().get_window_bounds
).parameters.keys()
if get_window_bounds_signature != expected_signature:
raise ValueError(
f"{type(self.window).__name__} does not implement "
f"the correct signature for get_window_bounds"
)
if self.method not in ["table", "single"]:
raise ValueError("method must be 'table' or 'single")
def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries:
"""
Split data into blocks & return conformed data.
"""
# filter out the on from the object
if self.on is not None and not isinstance(self.on, Index) and obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
if self.axis == 1:
# GH: 20649 in case of mixed dtype and axis=1 we have to convert everything
# to float to calculate the complete row at once. We exclude all non-numeric
# dtypes.
obj = obj.select_dtypes(include=["integer", "float"], exclude=["timedelta"])
obj = obj.astype("float64", copy=False)
obj._mgr = obj._mgr.consolidate()
return obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
with warnings.catch_warnings():
# TODO: Remove once win_type deprecation is enforced
warnings.filterwarnings("ignore", "win_type", FutureWarning)
kwargs = {attr: getattr(self, attr) for attr in self._attributes}
selection = None
if subset.ndim == 2 and (
(is_scalar(key) and key in subset) or is_list_like(key)
):
selection = key
new_win = type(self)(subset, selection=selection, **kwargs)
return new_win
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
def _dir_additions(self):
return self.obj._dir_additions()
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs_list = (
f"{attr_name}={getattr(self, attr_name)}"
for attr_name in self._attributes
if getattr(self, attr_name, None) is not None and attr_name[0] != "_"
)
attrs = ",".join(attrs_list)
return f"{type(self).__name__} [{attrs}]"
def __iter__(self):
obj = self._create_data(self._selected_obj)
indexer = self._get_window_indexer()
start, end = indexer.get_window_bounds(
num_values=len(obj),
min_periods=self.min_periods,
center=self.center,
closed=self.closed,
)
# From get_window_bounds, those two should be equal in length of array
assert len(start) == len(end)
for s, e in zip(start, end):
result = obj.iloc[slice(s, e)]
yield result
def _prep_values(self, values: ArrayLike) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if needs_i8_conversion(values.dtype):
raise NotImplementedError(
f"ops for {type(self).__name__} for this "
f"dtype {values.dtype} are not implemented"
)
else:
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
try:
values = ensure_float64(values)
except (ValueError, TypeError) as err:
raise TypeError(f"cannot handle this type -> {values.dtype}") from err
# Convert inf to nan for C funcs
inf = np.isinf(values)
if inf.any():
values = np.where(inf, np.nan, values)
# error: Incompatible return value type (got "Optional[ndarray]",
# expected "ndarray")
return values # type: ignore[return-value]
def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None:
# if we have an 'on' column we want to put it back into
# the results in the same location
from pandas import Series
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
extra_col = Series(self._on, index=self.obj.index, name=name)
if name in result.columns:
# TODO: sure we want to overwrite results?
result[name] = extra_col
elif name in result.index.names:
pass
elif name in self._selected_obj.columns:
# insert in the same location as we had in _selected_obj
old_cols = self._selected_obj.columns
new_cols = result.columns
old_loc = old_cols.get_loc(name)
overlap = new_cols.intersection(old_cols[:old_loc])
new_loc = len(overlap)
result.insert(new_loc, name, extra_col)
else:
# insert at the end
result[name] = extra_col
@property
def _index_array(self):
# TODO: why do we get here with e.g. MultiIndex?
if needs_i8_conversion(self._on.dtype):
return self._on.asi8
return None
def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame:
"""Validate and finalize result."""
if out.shape[1] == 0 and obj.shape[1] > 0:
raise DataError("No numeric types to aggregate")
elif out.shape[1] == 0:
return obj.astype("float64")
self._insert_on_column(out, obj)
return out
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
if isinstance(self.window, BaseIndexer):
return self.window
if self._win_freq_i8 is not None:
return VariableWindowIndexer(
index_array=self._index_array,
window_size=self._win_freq_i8,
center=self.center,
)
return FixedWindowIndexer(window_size=self.window)
def _apply_series(
self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None
) -> Series:
"""
Series version of _apply_blockwise
"""
obj = self._create_data(self._selected_obj)
if name == "count":
# GH 12541: Special case for count where we support date-like types
obj = notna(obj).astype(int)
try:
values = self._prep_values(obj._values)
except (TypeError, NotImplementedError) as err:
raise DataError("No numeric types to aggregate") from err
result = homogeneous_func(values)
return obj._constructor(result, index=obj.index, name=obj.name)
def _apply_blockwise(
self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None
) -> FrameOrSeriesUnion:
"""
Apply the given function to the DataFrame broken down into homogeneous
sub-frames.
"""
if self._selected_obj.ndim == 1:
return self._apply_series(homogeneous_func, name)
obj = self._create_data(self._selected_obj)
if name == "count":
# GH 12541: Special case for count where we support date-like types
obj = notna(obj).astype(int)
obj._mgr = obj._mgr.consolidate()
mgr = obj._mgr
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(EA2D): getattr unnecessary with 2D EAs
values = self._prep_values(getattr(bvalues, "T", bvalues))
res_values = homogeneous_func(values)
return getattr(res_values, "T", res_values)
def hfunc2d(values: ArrayLike) -> ArrayLike:
values = self._prep_values(values)
return homogeneous_func(values)
if isinstance(mgr, ArrayManager) and self.axis == 1:
new_mgr = mgr.apply_2d(hfunc2d, ignore_failures=True)
else:
new_mgr = mgr.apply(hfunc, ignore_failures=True)
out = obj._constructor(new_mgr)
return self._resolve_output(out, obj)
def _apply_tablewise(
self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None
) -> FrameOrSeriesUnion:
"""
Apply the given function to the DataFrame across the entire object
"""
if self._selected_obj.ndim == 1:
raise ValueError("method='table' not applicable for Series objects.")
obj = self._create_data(self._selected_obj)
values = self._prep_values(obj.to_numpy())
values = values.T if self.axis == 1 else values
result = homogeneous_func(values)
result = result.T if self.axis == 1 else result
out = obj._constructor(result, index=obj.index, columns=obj.columns)
return self._resolve_output(out, obj)
def _apply_pairwise(
self,
target: FrameOrSeriesUnion,
other: FrameOrSeriesUnion | None,
pairwise: bool | None,
func: Callable[[FrameOrSeriesUnion, FrameOrSeriesUnion], FrameOrSeriesUnion],
) -> FrameOrSeriesUnion:
"""
Apply the given pairwise function given 2 pandas objects (DataFrame/Series)
"""
if other is None:
other = target
# only default unset
pairwise = True if pairwise is None else pairwise
return flex_binary_moment(target, other, func, pairwise=bool(pairwise))
def _apply(
self,
func: Callable[..., Any],
name: str | None = None,
numba_cache_key: tuple[Callable, str] | None = None,
**kwargs,
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : callable function to apply
name : str,
numba_cache_key : tuple
caching key to be used to store a compiled numba func
**kwargs
additional arguments for rolling function and window function
Returns
-------
y : type of input
"""
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
def homogeneous_func(values: np.ndarray):
# calculation function
if values.size == 0:
return values.copy()
def calc(x):
start, end = window_indexer.get_window_bounds(
num_values=len(x),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
return func(x, start, end, min_periods)
with np.errstate(all="ignore"):
if values.ndim > 1 and self.method == "single":
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if numba_cache_key is not None:
NUMBA_FUNC_CACHE[numba_cache_key] = func
return result
if self.method == "single":
return self._apply_blockwise(homogeneous_func, name)
else:
return self._apply_tablewise(homogeneous_func, name)
def aggregate(self, func, *args, **kwargs):
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
class BaseWindowGroupby(BaseWindow):
"""
Provide the groupby windowing facilities.
"""
_grouper: BaseGrouper
_as_index: bool
_attributes = ["_grouper"]
def __init__(
self,
obj: FrameOrSeries,
*args,
_grouper: BaseGrouper,
_as_index: bool = True,
**kwargs,
):
from pandas.core.groupby.ops import BaseGrouper
if not isinstance(_grouper, BaseGrouper):
raise ValueError("Must pass a BaseGrouper object.")
self._grouper = _grouper
self._as_index = _as_index
# GH 32262: It's convention to keep the grouping column in
# groupby.<agg_func>, but unexpected to users in
# groupby.rolling.<agg_func>
obj = obj.drop(columns=self._grouper.names, errors="ignore")
super().__init__(obj, *args, **kwargs)
def _apply(
self,
func: Callable[..., Any],
name: str | None = None,
numba_cache_key: tuple[Callable, str] | None = None,
**kwargs,
) -> FrameOrSeries:
result = super()._apply(
func,
name,
numba_cache_key,
**kwargs,
)
# Reconstruct the resulting MultiIndex
# 1st set of levels = group by labels
# 2nd set of levels = original DataFrame/Series index
grouped_object_index = self.obj.index
grouped_index_name = [*grouped_object_index.names]
groupby_keys = copy.copy(self._grouper.names)
result_index_names = groupby_keys + grouped_index_name
drop_columns = [
key
for key in self._grouper.names
if key not in self.obj.index.names or key is None
]
if len(drop_columns) != len(groupby_keys):
# Our result will have still kept the column in the result
result = result.drop(columns=drop_columns, errors="ignore")
codes = self._grouper.codes
levels = copy.copy(self._grouper.levels)
group_indices = self._grouper.indices.values()
if group_indices:
indexer = np.concatenate(list(group_indices))
else:
indexer = np.array([], dtype=np.intp)
codes = [c.take(indexer) for c in codes]
# if the index of the original dataframe needs to be preserved, append
# this index (but reordered) to the codes/levels from the groupby
if grouped_object_index is not None:
idx = grouped_object_index.take(indexer)
if not isinstance(idx, MultiIndex):
idx = MultiIndex.from_arrays([idx])
codes.extend(list(idx.codes))
levels.extend(list(idx.levels))
result_index = MultiIndex(
levels, codes, names=result_index_names, verify_integrity=False
)
result.index = result_index
if not self._as_index:
result = result.reset_index(level=list(range(len(groupby_keys))))
return result
def _apply_pairwise(
self,
target: FrameOrSeriesUnion,
other: FrameOrSeriesUnion | None,
pairwise: bool | None,
func: Callable[[FrameOrSeriesUnion, FrameOrSeriesUnion], FrameOrSeriesUnion],
) -> FrameOrSeriesUnion:
"""
Apply the given pairwise function given 2 pandas objects (DataFrame/Series)
"""
# Manually drop the grouping column first
target = target.drop(columns=self._grouper.names, errors="ignore")
result = super()._apply_pairwise(target, other, pairwise, func)
# 1) Determine the levels + codes of the groupby levels
if other is not None:
# When we have other, we must reindex (expand) the result
# from flex_binary_moment to a "transform"-like result
# per groupby combination
old_result_len = len(result)
result = concat(
[
result.take(gb_indices).reindex(result.index)
for gb_indices in self._grouper.indices.values()
]
)
gb_pairs = (
com.maybe_make_list(pair) for pair in self._grouper.indices.keys()
)
groupby_codes = []
groupby_levels = []
# e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]]
for gb_level_pair in map(list, zip(*gb_pairs)):
labels = np.repeat(np.array(gb_level_pair), old_result_len)
codes, levels = factorize(labels)
groupby_codes.append(codes)
groupby_levels.append(levels)
else:
# When we evaluate the pairwise=True result, repeat the groupby
# labels by the number of columns in the original object
groupby_codes = self._grouper.codes
# error: Incompatible types in assignment (expression has type
# "List[Index]", variable has type "List[Union[ndarray, Index]]")
groupby_levels = self._grouper.levels # type: ignore[assignment]
group_indices = self._grouper.indices.values()
if group_indices:
indexer = np.concatenate(list(group_indices))
else:
indexer = np.array([], dtype=np.intp)
if target.ndim == 1:
repeat_by = 1
else:
repeat_by = len(target.columns)
groupby_codes = [
np.repeat(c.take(indexer), repeat_by) for c in groupby_codes
]
# 2) Determine the levels + codes of the result from super()._apply_pairwise
if isinstance(result.index, MultiIndex):
result_codes = list(result.index.codes)
result_levels = list(result.index.levels)
result_names = list(result.index.names)
else:
idx_codes, idx_levels = factorize(result.index)
result_codes = [idx_codes]
result_levels = [idx_levels]
result_names = [result.index.name]
# 3) Create the resulting index by combining 1) + 2)
result_codes = groupby_codes + result_codes
result_levels = groupby_levels + result_levels
result_names = self._grouper.names + result_names
result_index = MultiIndex(
result_levels, result_codes, names=result_names, verify_integrity=False
)
result.index = result_index
return result
def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries:
"""
Split data into blocks & return conformed data.
"""
# Ensure the object we're rolling over is monotonically sorted relative
# to the groups
# GH 36197
if not obj.empty:
groupby_order = np.concatenate(list(self._grouper.indices.values())).astype(
np.int64
)
obj = obj.take(groupby_order)
return super()._create_data(obj)
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried through to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self.obj = self.obj.set_index(self._on)
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that "on" is monotonic; already validated at a higher level.
"""
pass
class Window(BaseWindow):
"""
Provide rolling window calculations.
Parameters
----------
window : int, offset, or BaseIndexer subclass
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes.
If a BaseIndexer subclass is passed, calculates the window boundaries
based on the defined ``get_window_bounds`` method. Additional rolling
keyword arguments, namely `min_periods`, `center`, and
`closed` will be passed to `get_window_bounds`.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column or Index level on which
to calculate the rolling window, rather than the DataFrame's index.
Provided integer column is ignored and excluded from result since
an integer index is not used to calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints. Defaults to 'right'.
.. versionchanged:: 1.2.0
The closed parameter with fixed windows is now supported.
method : str {'single', 'table'}, default 'single'
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
.. versionadded:: 1.3.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
If ``win_type=None``, all points are evenly weighted; otherwise, ``win_type``
can accept a string of any `scipy.signal window function
<https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.
Certain Scipy window types require additional parameters to be passed
in the aggregation function. The additional parameters must match
the keywords specified in the Scipy window type method signature.
Please see the third example below on how to add the additional parameters.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, using the 'gaussian'
window type (note how we need to specify std).
>>> df.rolling(2, win_type='gaussian').sum(std=3)
B
0 NaN
1 0.986207
2 2.958621
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
Same as above, but with forward-looking windows
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
"method",
]
def validate(self):
super().validate()
if not isinstance(self.win_type, str):
raise ValueError(f"Invalid win_type {self.win_type}")
signal = import_optional_dependency(
"scipy.signal", extra="Scipy is required to generate window weight."
)
self._scipy_weight_generator = getattr(signal, self.win_type, None)
if self._scipy_weight_generator is None:
raise ValueError(f"Invalid win_type {self.win_type}")
if isinstance(self.window, BaseIndexer):
raise NotImplementedError(
"BaseIndexer subclasses not implemented with win_types."
)
elif not is_integer(self.window) or self.window < 0:
raise ValueError("window must be an integer 0 or greater")
if self.method != "single":
raise NotImplementedError("'single' is the only supported method type.")
def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray:
"""
Center the result in the window for weighted rolling aggregations.
"""
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument dimensions")
if offset > 0:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def _apply(
self,
func: Callable[[np.ndarray, int, int], np.ndarray],
name: str | None = None,
numba_cache_key: tuple[Callable, str] | None = None,
**kwargs,
):
"""
Rolling with weights statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : callable function to apply
name : str,
use_numba_cache : tuple
unused
**kwargs
additional arguments for scipy windows if necessary
Returns
-------
y : type of input
"""
window = self._scipy_weight_generator(self.window, **kwargs)
offset = (len(window) - 1) // 2 if self.center else 0
def homogeneous_func(values: np.ndarray):
# calculation function
if values.size == 0:
return values.copy()
def calc(x):
additional_nans = np.array([np.nan] * offset)
x = np.concatenate((x, additional_nans))
return func(x, window, self.min_periods or len(window))
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
# Our weighted aggregations return memoryviews
result = np.asarray(calc(values))
if self.center:
result = self._center_window(result, offset)
return result
return self._apply_blockwise(homogeneous_func, name)
@doc(
_shared_docs["aggregate"],
see_also=dedent(
"""
See Also
--------
pandas.DataFrame.aggregate : Similar DataFrame method.
pandas.Series.aggregate : Similar Series method.
"""
),
examples=dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.rolling(2, win_type="boxcar").agg("mean")
A B C
0 NaN NaN NaN
1 1.5 4.5 7.5
2 2.5 5.5 8.5
"""
),
klass="Series/DataFrame",
axis="",
)
def aggregate(self, func, *args, **kwargs):
result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()
if result is None:
# these must apply directly
result = func(self)
return result
agg = aggregate
@doc(
template_header,
create_section_header("Parameters"),
kwargs_scipy,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="rolling",
aggregation_description="weighted window sum",
agg_method="sum",
)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
window_func = window_aggregations.roll_weighted_sum
# error: Argument 1 to "_apply" of "Window" has incompatible type
# "Callable[[ndarray, ndarray, int], ndarray]"; expected
# "Callable[[ndarray, int, int], ndarray]"
return self._apply(window_func, name="sum", **kwargs) # type: ignore[arg-type]
@doc(
template_header,
create_section_header("Parameters"),
kwargs_scipy,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="rolling",
aggregation_description="weighted window mean",
agg_method="mean",
)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
window_func = window_aggregations.roll_weighted_mean
# error: Argument 1 to "_apply" of "Window" has incompatible type
# "Callable[[ndarray, ndarray, int], ndarray]"; expected
# "Callable[[ndarray, int, int], ndarray]"
return self._apply(window_func, name="mean", **kwargs) # type: ignore[arg-type]
@doc(
template_header,
".. versionadded:: 1.0.0 \n\n",
create_section_header("Parameters"),
kwargs_scipy,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="rolling",
aggregation_description="weighted window variance",
agg_method="var",
)
def var(self, ddof: int = 1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
window_func = partial(window_aggregations.roll_weighted_var, ddof=ddof)
kwargs.pop("name", None)
return self._apply(window_func, name="var", **kwargs)
@doc(
template_header,
".. versionadded:: 1.0.0 \n\n",
create_section_header("Parameters"),
kwargs_scipy,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="rolling",
aggregation_description="weighted window standard deviation",
agg_method="std",
)
def std(self, ddof: int = 1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
return zsqrt(self.var(ddof=ddof, name="std", **kwargs))
class RollingAndExpandingMixin(BaseWindow):
def count(self):
window_func = window_aggregations.roll_sum
return self._apply(window_func, name="count")
def apply(
self,
func: Callable[..., Any],
raw: bool = False,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if not is_bool(raw):
raise ValueError("raw parameter must be `True` or `False`")
numba_cache_key = None
if maybe_use_numba(engine):
if raw is False:
raise ValueError("raw must be `True` when using the numba engine")
caller_name = type(self).__name__
if self.method == "single":
apply_func = generate_numba_apply_func(
args, kwargs, func, engine_kwargs, caller_name
)
numba_cache_key = (func, f"{caller_name}_apply_single")
else:
apply_func = generate_numba_table_func(
args, kwargs, func, engine_kwargs, f"{caller_name}_apply"
)
numba_cache_key = (func, f"{caller_name}_apply_table")
elif engine in ("cython", None):
if engine_kwargs is not None:
raise ValueError("cython engine does not accept engine_kwargs")
apply_func = self._generate_cython_apply_func(args, kwargs, raw, func)
else:
raise ValueError("engine must be either 'numba' or 'cython'")
return self._apply(
apply_func,
numba_cache_key=numba_cache_key,
)
def _generate_cython_apply_func(
self,
args: tuple[Any, ...],
kwargs: dict[str, Any],
raw: bool,
function: Callable[..., Any],
) -> Callable[[np.ndarray, np.ndarray, np.ndarray, int], np.ndarray]:
from pandas import Series
window_func = partial(
window_aggregations.roll_apply,
args=args,
kwargs=kwargs,
raw=raw,
function=function,
)
def apply_func(values, begin, end, min_periods, raw=raw):
if not raw:
values = Series(values, index=self.obj.index)
return window_func(values, begin, end, min_periods)
return apply_func
def sum(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_window_func("sum", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
func = generate_manual_numpy_nan_agg_with_axis(np.nansum)
else:
func = np.nansum
return self.apply(
func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
)
window_func = window_aggregations.roll_sum
return self._apply(window_func, name="sum", **kwargs)
def max(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_window_func("max", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
func = generate_manual_numpy_nan_agg_with_axis(np.nanmax)
else:
func = np.nanmax
return self.apply(
func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
)
window_func = window_aggregations.roll_max
return self._apply(window_func, name="max", **kwargs)
def min(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_window_func("min", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
func = generate_manual_numpy_nan_agg_with_axis(np.nanmin)
else:
func = np.nanmin
return self.apply(
func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
)
window_func = window_aggregations.roll_min
return self._apply(window_func, name="min", **kwargs)
def mean(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_window_func("mean", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
func = generate_manual_numpy_nan_agg_with_axis(np.nanmean)
else:
func = np.nanmean
return self.apply(
func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
)
window_func = window_aggregations.roll_mean
return self._apply(window_func, name="mean", **kwargs)
def median(
self,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
if maybe_use_numba(engine):
if self.method == "table":
func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian)
else:
func = np.nanmedian
return self.apply(
func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
)
window_func = window_aggregations.roll_median_c
return self._apply(window_func, name="median", **kwargs)
def std(self, ddof: int = 1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window_func = window_aggregations.roll_var
def zsqrt_func(values, begin, end, min_periods):
return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof))
return self._apply(
zsqrt_func,
name="std",
**kwargs,
)
def var(self, ddof: int = 1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
window_func = partial(window_aggregations.roll_var, ddof=ddof)
return self._apply(
window_func,
name="var",
**kwargs,
)
def skew(self, **kwargs):
window_func = window_aggregations.roll_skew
return self._apply(
window_func,
name="skew",
**kwargs,
)
def sem(self, ddof: int = 1, *args, **kwargs):
return self.std(*args, **kwargs) / (self.count() - ddof).pow(0.5)
def kurt(self, **kwargs):
window_func = window_aggregations.roll_kurt
return self._apply(
window_func,
name="kurt",
**kwargs,
)
def quantile(self, quantile: float, interpolation: str = "linear", **kwargs):
if quantile == 1.0:
window_func = window_aggregations.roll_max
elif quantile == 0.0:
window_func = window_aggregations.roll_min
else:
window_func = partial(
window_aggregations.roll_quantile,
quantile=quantile,
interpolation=interpolation,
)
return self._apply(window_func, name="quantile", **kwargs)
def cov(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
ddof: int = 1,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
with np.errstate(all="ignore"):
mean_x_y = window_aggregations.roll_mean(
x_array * y_array, start, end, min_periods
)
mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods)
mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods)
count_x_y = window_aggregations.roll_sum(
notna(x_array + y_array).astype(np.float64), start, end, 0
)
result = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof))
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
def corr(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
ddof: int = 1,
**kwargs,
):
from pandas import Series
def corr_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
with np.errstate(all="ignore"):
mean_x_y = window_aggregations.roll_mean(
x_array * y_array, start, end, min_periods
)
mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods)
mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods)
count_x_y = window_aggregations.roll_sum(
notna(x_array + y_array).astype(np.float64), start, end, 0
)
x_var = window_aggregations.roll_var(
x_array, start, end, min_periods, ddof
)
y_var = window_aggregations.roll_var(
y_array, start, end, min_periods, ddof
)
numerator = (mean_x_y - mean_x * mean_y) * (
count_x_y / (count_x_y - ddof)
)
denominator = (x_var * y_var) ** 0.5
result = numerator / denominator
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, corr_func)
class Rolling(RollingAndExpandingMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
"method",
]
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (
self.obj.empty
or isinstance(self._on, (DatetimeIndex, TimedeltaIndex, PeriodIndex))
) and isinstance(self.window, (str, BaseOffset, timedelta)):
self._validate_monotonic()
# this will raise ValueError on non-fixed freqs
try:
freq = to_offset(self.window)
except (TypeError, ValueError) as err:
raise ValueError(
f"passed window {self.window} is not "
"compatible with a datetimelike index"
) from err
if isinstance(self._on, PeriodIndex):
self._win_freq_i8 = freq.nanos / (self._on.freq.nanos / self._on.freq.n)
else:
self._win_freq_i8 = freq.nanos
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif isinstance(self.window, BaseIndexer):
# Passed BaseIndexer subclass should handle all other rolling kwargs
return
elif not is_integer(self.window) or self.window < 0:
raise ValueError("window must be an integer 0 or greater")
def _validate_monotonic(self):
"""
Validate monotonic (increasing or decreasing).
"""
if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing):
self._raise_monotonic_error()
def _raise_monotonic_error(self):
formatted = self.on
if self.on is None:
formatted = "index"
raise ValueError(f"{formatted} must be monotonic")
@doc(
_shared_docs["aggregate"],
see_also=dedent(
"""
See Also
--------
pandas.Series.rolling : Calling object with Series data.
pandas.DataFrame.rolling : Calling object with DataFrame data.
"""
),
examples=dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.rolling(2).sum()
A B C
0 NaN NaN NaN
1 3.0 9.0 15.0
2 5.0 11.0 17.0
>>> df.rolling(2).agg({"A": "sum", "B": "min"})
A B
0 NaN NaN
1 3.0 4.0
2 5.0 5.0
"""
),
klass="Series/Dataframe",
axis="",
)
def aggregate(self, func, *args, **kwargs):
return super().aggregate(func, *args, **kwargs)
agg = aggregate
@doc(
template_header,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Examples"),
dedent(
"""
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="count of non NaN observations",
agg_method="count",
)
def count(self):
if self.min_periods is None:
warnings.warn(
(
"min_periods=None will default to the size of window "
"consistent with other methods in a future version. "
"Specify min_periods=0 instead."
),
FutureWarning,
)
self.min_periods = 0
result = super().count()
self.min_periods = None
else:
result = super().count()
return result
@doc(
template_header,
create_section_header("Parameters"),
window_apply_parameters,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="rolling",
aggregation_description="custom aggregation function",
agg_method="apply",
)
def apply(
self,
func: Callable[..., Any],
raw: bool = False,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
):
return super().apply(
func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes,
create_section_header("Examples"),
dedent(
"""
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each sum is computed column-wise.
>>> df = pd.DataFrame({{"A": s, "B": s ** 2}})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="sum",
agg_method="sum",
)
def sum(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes[:-1],
window_method="rolling",
aggregation_description="maximum",
agg_method="max",
)
def max(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes,
create_section_header("Examples"),
dedent(
"""
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="minimum",
agg_method="min",
)
def min(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes,
create_section_header("Examples"),
dedent(
"""
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="mean",
agg_method="mean",
)
def mean(
self,
*args,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes,
create_section_header("Examples"),
dedent(
"""
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="median",
agg_method="median",
)
def median(
self,
engine: str | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
return super().median(engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
"numpy.std : Equivalent method for NumPy array.\n",
template_see_also,
create_section_header("Notes"),
dedent(
"""
The default ``ddof`` of 1 used in :meth:`Series.std` is different
than the default ``ddof`` of 0 in :func:`numpy.std`.
A minimum of one period is required for the rolling calculation.
The implementation is susceptible to floating point imprecision as
shown in the example below.\n
"""
).replace("\n", "", 1),
create_section_header("Examples"),
dedent(
"""
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 5.773503e-01
3 1.000000e+00
4 1.000000e+00
5 1.154701e+00
6 2.580957e-08
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="standard deviation",
agg_method="std",
)
def std(self, ddof: int = 1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
"numpy.var : Equivalent method for NumPy array.\n",
template_see_also,
create_section_header("Notes"),
dedent(
"""
The default ``ddof`` of 1 used in :meth:`Series.var` is different
than the default ``ddof`` of 0 in :func:`numpy.var`.
A minimum of one period is required for the rolling calculation.
The implementation is susceptible to floating point imprecision as
shown in the example below.\n
"""
).replace("\n", "", 1),
create_section_header("Examples"),
dedent(
"""
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 3.333333e-01
3 1.000000e+00
4 1.000000e+00
5 1.333333e+00
6 6.661338e-16
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="variance",
agg_method="var",
)
def var(self, ddof: int = 1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
"scipy.stats.skew : Third moment of a probability density.\n",
template_see_also,
create_section_header("Notes"),
"A minimum of three periods is required for the rolling calculation.\n",
window_method="rolling",
aggregation_description="unbiased skewness",
agg_method="skew",
)
def skew(self, **kwargs):
return super().skew(**kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
"A minimum of one period is required for the calculation.\n\n",
create_section_header("Examples"),
dedent(
"""
>>> s = pd.Series([0, 1, 2, 3])
>>> s.rolling(2, min_periods=1).sem()
0 NaN
1 0.707107
2 0.707107
3 0.707107
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="standard error of mean",
agg_method="sem",
)
def sem(self, ddof: int = 1, *args, **kwargs):
return self.std(*args, **kwargs) / (self.count() - ddof).pow(0.5)
@doc(
template_header,
create_section_header("Parameters"),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
"scipy.stats.kurtosis : Reference SciPy method.\n",
template_see_also,
create_section_header("Notes"),
"A minimum of four periods is required for the calculation.\n\n",
create_section_header("Examples"),
dedent(
"""
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}")
-1.200000
>>> print(f"{{scipy.stats.kurtosis(arr[1:], bias=False):.6f}}")
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="Fisher's definition of kurtosis without bias",
agg_method="kurt",
)
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Examples"),
dedent(
"""
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="quantile",
agg_method="quantile",
)
def quantile(self, quantile: float, interpolation: str = "linear", **kwargs):
return super().quantile(
quantile=quantile,
interpolation=interpolation,
**kwargs,
)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="rolling",
aggregation_description="sample covariance",
agg_method="cov",
)
def cov(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
ddof: int = 1,
**kwargs,
):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
dedent(
"""
cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
"""
).replace("\n", "", 1),
template_see_also,
create_section_header("Notes"),
dedent(
"""
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.\n
"""
).replace("\n", "", 1),
create_section_header("Examples"),
dedent(
"""
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(f"{{np.corrcoef(v1[:-1], v2[:-1])[0][1]:.6f}}")
0.333333
>>> print(f"{{np.corrcoef(v1[1:], v2[1:])[0][1]:.6f}}")
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
).replace("\n", "", 1),
window_method="rolling",
aggregation_description="correlation",
agg_method="corr",
)
def corr(
self,
other: FrameOrSeriesUnion | None = None,
pairwise: bool | None = None,
ddof: int = 1,
**kwargs,
):
return super().corr(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
Rolling.__doc__ = Window.__doc__
class RollingGroupby(BaseWindowGroupby, Rolling):
"""
Provide a rolling groupby implementation.
"""
_attributes = Rolling._attributes + BaseWindowGroupby._attributes
def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
Returns
-------
GroupbyIndexer
"""
rolling_indexer: type[BaseIndexer]
indexer_kwargs: dict[str, Any] | None = None
index_array = self._index_array
if isinstance(self.window, BaseIndexer):
rolling_indexer = type(self.window)
indexer_kwargs = self.window.__dict__
assert isinstance(indexer_kwargs, dict) # for mypy
# We'll be using the index of each group later
indexer_kwargs.pop("index_array", None)
window = 0
elif self._win_freq_i8 is not None:
rolling_indexer = VariableWindowIndexer
window = self._win_freq_i8
else:
rolling_indexer = FixedWindowIndexer
window = self.window
window_indexer = GroupbyIndexer(
index_array=index_array,
window_size=window,
groupby_indicies=self._grouper.indices,
window_indexer=rolling_indexer,
indexer_kwargs=indexer_kwargs,
)
return window_indexer
def _validate_monotonic(self):
"""
Validate that on is monotonic;
in this case we have to check only for nans, because
monotonicity was already validated at a higher level.
"""
if self._on.hasnans:
self._raise_monotonic_error()
| 32.859155 | 100 | 0.561834 |
ae5ff9ef0b8bd7c7c40d733b287bf4ed468292d5 | 5,459 | py | Python | python_modules/dagster/dagster/core/storage/runs/sqlite/sqlite_run_storage.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | python_modules/dagster/dagster/core/storage/runs/sqlite/sqlite_run_storage.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | python_modules/dagster/dagster/core/storage/runs/sqlite/sqlite_run_storage.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | import os
from contextlib import contextmanager
from urllib.parse import urljoin, urlparse
import sqlalchemy as db
from dagster import StringSource, check
from dagster.core.storage.sql import (
check_alembic_revision,
create_engine,
get_alembic_config,
handle_schema_errors,
run_alembic_downgrade,
run_alembic_upgrade,
stamp_alembic_rev,
)
from dagster.core.storage.sqlite import create_db_conn_string
from dagster.serdes import ConfigurableClass, ConfigurableClassData
from dagster.utils import mkdir_p
from sqlalchemy.pool import NullPool
from ..schema import RunStorageSqlMetadata, RunTagsTable, RunsTable
from ..sql_run_storage import SqlRunStorage
class SqliteRunStorage(SqlRunStorage, ConfigurableClass):
"""SQLite-backed run storage.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagit`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
This is the default run storage when none is specified in the ``dagster.yaml``.
To explicitly specify SQLite for run storage, you can add a block such as the following to your
``dagster.yaml``:
.. code-block:: YAML
run_storage:
module: dagster.core.storage.runs
class: SqliteRunStorage
config:
base_dir: /path/to/dir
The ``base_dir`` param tells the run storage where on disk to store the database.
"""
def __init__(self, conn_string, inst_data=None):
check.str_param(conn_string, "conn_string")
self._conn_string = conn_string
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
super().__init__()
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {"base_dir": StringSource}
@staticmethod
def from_config_value(inst_data, config_value):
return SqliteRunStorage.from_local(inst_data=inst_data, **config_value)
@staticmethod
def from_local(base_dir, inst_data=None):
check.str_param(base_dir, "base_dir")
mkdir_p(base_dir)
conn_string = create_db_conn_string(base_dir, "runs")
engine = create_engine(conn_string, poolclass=NullPool)
alembic_config = get_alembic_config(__file__)
should_mark_indexes = False
with engine.connect() as connection:
db_revision, head_revision = check_alembic_revision(alembic_config, connection)
if not (db_revision and head_revision):
RunStorageSqlMetadata.create_all(engine)
engine.execute("PRAGMA journal_mode=WAL;")
stamp_alembic_rev(alembic_config, connection)
should_mark_indexes = True
run_storage = SqliteRunStorage(conn_string, inst_data)
if should_mark_indexes:
# mark all secondary indexes
run_storage.build_missing_indexes()
return run_storage
@contextmanager
def connect(self):
engine = create_engine(self._conn_string, poolclass=NullPool)
conn = engine.connect()
try:
with handle_schema_errors(
conn,
get_alembic_config(__file__),
msg="Sqlite run storage requires migration",
):
yield conn
finally:
conn.close()
def _alembic_upgrade(self, rev="head"):
alembic_config = get_alembic_config(__file__)
with self.connect() as conn:
run_alembic_upgrade(alembic_config, conn, rev=rev)
def _alembic_downgrade(self, rev="head"):
alembic_config = get_alembic_config(__file__)
with self.connect() as conn:
run_alembic_downgrade(alembic_config, conn, rev=rev)
def upgrade(self):
self._check_for_version_066_migration_and_perform()
self._alembic_upgrade()
# In version 0.6.6, we changed the layout of the of the sqllite dbs on disk
# to move from the root of DAGSTER_HOME/runs.db to DAGSTER_HOME/history/runs.bd
# This function checks for that condition and does the move
def _check_for_version_066_migration_and_perform(self):
old_conn_string = "sqlite://" + urljoin(urlparse(self._conn_string).path, "../runs.db")
path_to_old_db = urlparse(old_conn_string).path
# sqlite URLs look like `sqlite:///foo/bar/baz on Unix/Mac` but on Windows they look like
# `sqlite:///D:/foo/bar/baz` (or `sqlite:///D:\foo\bar\baz`)
if os.name == "nt":
path_to_old_db = path_to_old_db.lstrip("/")
if os.path.exists(path_to_old_db):
old_storage = SqliteRunStorage(old_conn_string)
old_runs = old_storage.get_runs()
for run in old_runs:
self.add_run(run)
os.unlink(path_to_old_db)
def delete_run(self, run_id):
"""Override the default sql delete run implementation until we can get full
support on cascading deletes"""
check.str_param(run_id, "run_id")
remove_tags = db.delete(RunTagsTable).where(RunTagsTable.c.run_id == run_id)
remove_run = db.delete(RunsTable).where(RunsTable.c.run_id == run_id)
with self.connect() as conn:
conn.execute(remove_tags)
conn.execute(remove_run)
| 37.390411 | 99 | 0.680528 |
23df582f4ff9b49741bd1a233f57e131163ddf47 | 36,541 | py | Python | TradeAPI.py | Lavabar/rl_trading | 0f021bb348d748e1c4dc04b972c40f9fd7081eed | [
"MIT"
] | 2 | 2018-10-24T17:13:30.000Z | 2021-12-14T17:31:18.000Z | TradeAPI.py | Lavabar/rl_trading | 0f021bb348d748e1c4dc04b972c40f9fd7081eed | [
"MIT"
] | null | null | null | TradeAPI.py | Lavabar/rl_trading | 0f021bb348d748e1c4dc04b972c40f9fd7081eed | [
"MIT"
] | null | null | null | # Gist example of IB wrapper ...
#
# Download API from http://interactivebrokers.github.io/#
#
# Install python API code /IBJts/source/pythonclient $ python3 setup.py install
#
# Note: The test cases, and the documentation refer to a python package called IBApi,
# but the actual package is called ibapi. Go figure.
#
# Get the latest version of the gateway:
# https://www.interactivebrokers.com/en/?f=%2Fen%2Fcontrol%2Fsystemstandalone-ibGateway.php%3Fos%3Dunix
# (for unix: windows and mac users please find your own version)
#
# Run the gateway
#
# user: edemo
# pwd: demo123
#
# Now I'll try and replicate the historical data example
from ibapi.wrapper import EWrapper
from ibapi.client import EClient
from ibapi.contract import Contract as IBcontract
from ibapi.order import Order
from ibapi.execution import ExecutionFilter
import time
from threading import Thread
import queue
import datetime
from copy import deepcopy
## these are just arbitrary numbers in leiu of a policy on this sort of thing
DEFAULT_MARKET_DATA_ID=50
DEFAULT_GET_CONTRACT_ID=43
DEFAULT_EXEC_TICKER=78
## This is the reqId IB API sends when a fill is received
FILL_CODE=-1
"""
Next section is 'scaffolding'
"""
ACCOUNT_UPDATE_FLAG = "update"
ACCOUNT_VALUE_FLAG = "value"
ACCOUNT_TIME_FLAG = "time"
class identifed_as(object):
"""
Used to identify
"""
def __init__(self, label, data):
self.label = label
self.data = data
def __repr__(self):
return "Identified as %s" % self.label
class list_of_identified_items(list):
"""
A list of elements, each of class identified_as (or duck equivalent)
Used to seperate out accounting data
"""
def seperate_into_dict(self):
"""
:return: dict, keys are labels, each element is a list of items matching label
"""
all_labels = [element.label for element in self]
dict_data = dict([
(label,
[element.data for element in self if element.label==label])
for label in all_labels])
return dict_data
## marker for when queue is finished
FINISHED = object()
STARTED = object()
TIME_OUT = object()
class finishableQueue(object):
"""
Creates a queue which will finish at some point
"""
def __init__(self, queue_to_finish):
self._queue = queue_to_finish
self.status = STARTED
def get(self, timeout):
"""
Returns a list of queue elements once timeout is finished, or a FINISHED flag is received in the queue
:param timeout: how long to wait before giving up
:return: list of queue elements
"""
contents_of_queue=[]
finished=False
while not finished:
try:
current_element = self._queue.get(timeout=timeout)
if current_element is FINISHED:
finished = True
self.status = FINISHED
else:
contents_of_queue.append(current_element)
## keep going and try and get more data
except queue.Empty:
## If we hit a time out it's most probable we're not getting a finished element any time soon
## give up and return what we have
finished = True
self.status = TIME_OUT
return contents_of_queue
def timed_out(self):
return self.status is TIME_OUT
## cache used for accounting data
class simpleCache(object):
"""
Cache is stored in _cache in nested dict, outer key is accountName, inner key is cache label
"""
def __init__(self, max_staleness_seconds):
self._cache = dict()
self._cache_updated_local_time = dict()
self._max_staleness_seconds = max_staleness_seconds
def __repr__(self):
return "Cache with labels"+",".join(self._cache.keys())
def update_data(self, accountName):
raise Exception("You need to set this method in an inherited class")
def _get_last_updated_time(self, accountName, cache_label):
if accountName not in self._cache_updated_local_time.keys():
return None
if cache_label not in self._cache_updated_local_time[accountName]:
return None
return self._cache_updated_local_time[accountName][cache_label]
def _set_time_of_updated_cache(self, accountName, cache_label):
# make sure we know when the cache was updated
if accountName not in self._cache_updated_local_time.keys():
self._cache_updated_local_time[accountName]={}
self._cache_updated_local_time[accountName][cache_label] = time.time()
def _is_data_stale(self, accountName, cache_label, ):
"""
Check to see if the cached data has been updated recently for a given account and label, or if it's stale
:return: bool
"""
STALE = True
NOT_STALE = False
last_update = self._get_last_updated_time(accountName, cache_label)
if last_update is None:
## we haven't got any data, so by construction our data is stale
return STALE
time_now = time.time()
time_since_updated = time_now - last_update
if time_since_updated > self._max_staleness_seconds:
return STALE
else:
## recently updated
return NOT_STALE
def _check_cache_empty(self, accountName, cache_label):
"""
:param accountName: str
:param cache_label: str
:return: bool
"""
CACHE_EMPTY = True
CACHE_PRESENT = False
cache = self._cache
if accountName not in cache.keys():
return CACHE_EMPTY
cache_this_account = cache[accountName]
if cache_label not in cache_this_account.keys():
return CACHE_EMPTY
return CACHE_PRESENT
def _return_cache_values(self, accountName, cache_label):
"""
:param accountName: str
:param cache_label: str
:return: None or cache contents
"""
if self._check_cache_empty(accountName, cache_label):
return None
return self._cache[accountName][cache_label]
def _create_cache_element(self, accountName, cache_label):
cache = self._cache
if accountName not in cache.keys():
cache[accountName] = {}
cache_this_account = cache[accountName]
if cache_label not in cache_this_account.keys():
cache[accountName][cache_label] = None
def get_updated_cache(self, accountName, cache_label):
"""
Checks for stale cache, updates if needed, returns up to date value
:param accountName: str
:param cache_label: str
:return: updated part of cache
"""
if self._is_data_stale(accountName, cache_label) or self._check_cache_empty(accountName, cache_label):
self.update_data(accountName)
return self._return_cache_values(accountName, cache_label)
def update_cache(self, accountName, dict_with_data):
"""
:param accountName: str
:param dict_with_data: dict, which has keynames with cache labels
:return: nothing
"""
all_labels = dict_with_data.keys()
for cache_label in all_labels:
self._create_cache_element(accountName, cache_label)
self._cache[accountName][cache_label] = dict_with_data[cache_label]
self._set_time_of_updated_cache(accountName, cache_label)
"""
Mergable objects are used to capture order and execution information which comes from different sources and needs
glueing together
"""
## marker to show a mergable object hasn't got any attributes
NO_ATTRIBUTES_SET=object()
class mergableObject(object):
"""
Generic object to make it easier to munge together incomplete information about orders and executions
"""
def __init__(self, id, **kwargs):
"""
:param id: master reference, has to be an immutable type
:param kwargs: other attributes which will appear in list returned by attributes() method
"""
self.id=id
attr_to_use=self.attributes()
for argname in kwargs:
if argname in attr_to_use:
setattr(self, argname, kwargs[argname])
else:
print("TRADEAPI:\tIgnoring argument passed %s: is this the right kind of object? If so, add to .attributes() method" % argname)
def attributes(self):
## should return a list of str here
## eg return ["thingone", "thingtwo"]
return NO_ATTRIBUTES_SET
def _name(self):
return "Generic Mergable object - "
def __repr__(self):
attr_list = self.attributes()
if attr_list is NO_ATTRIBUTES_SET:
return self._name()
return self._name()+" ".join([ "%s: %s" % (attrname, str(getattr(self, attrname))) for attrname in attr_list
if getattr(self, attrname, None) is not None])
def merge(self, details_to_merge, overwrite=True):
"""
Merge two things
self.id must match
:param details_to_merge: thing to merge into current one
:param overwrite: if True then overwrite current values, otherwise keep current values
:return: merged thing
"""
if self.id!=details_to_merge.id:
raise Exception("Can't merge details with different IDS %d and %d!" % (self.id, details_to_merge.id))
arg_list = self.attributes()
if arg_list is NO_ATTRIBUTES_SET:
## self is a generic, empty, object.
## I can just replace it wholesale with the new object
new_object = details_to_merge
return new_object
new_object = deepcopy(self)
for argname in arg_list:
my_arg_value = getattr(self, argname, None)
new_arg_value = getattr(details_to_merge, argname, None)
if new_arg_value is not None:
## have something to merge
if my_arg_value is not None and not overwrite:
## conflict with current value, don't want to overwrite, skip
pass
else:
setattr(new_object, argname, new_arg_value)
return new_object
class orderInformation(mergableObject):
"""
Collect information about orders
master ID will be the orderID
eg you'd do order_details = orderInformation(orderID, contract=....)
"""
def _name(self):
return "Order - "
def attributes(self):
return ['contract','order','orderstate','status',
'filled', 'remaining', 'avgFillPrice', 'permid',
'parentId', 'lastFillPrice', 'clientId', 'whyHeld',
'mktCapPrice']
class execInformation(mergableObject):
"""
Collect information about executions
master ID will be the execid
eg you'd do exec_info = execInformation(execid, contract= ... )
"""
def _name(self):
return "Execution - "
def attributes(self):
return ['contract','ClientId','OrderId','time','AvgPrice','Price','AcctNumber',
'Shares','Commission', 'commission_currency', 'realisedpnl']
class list_of_mergables(list):
"""
A list of mergable objects, like execution details or order information
"""
def merged_dict(self):
"""
Merge and remove duplicates of a stack of mergable objects with unique ID
Essentially creates the union of the objects in the stack
:return: dict of mergableObjects, keynames .id
"""
## We create a new stack of order details which will contain merged order or execution details
new_stack_dict = {}
for stack_member in self:
id = stack_member.id
if id not in new_stack_dict.keys():
## not in new stack yet, create a 'blank' object
## Note this will have no attributes, so will be replaced when merged with a proper object
new_stack_dict[id] = mergableObject(id)
existing_stack_member = new_stack_dict[id]
## add on the new information by merging
## if this was an empty 'blank' object it will just be replaced with stack_member
new_stack_dict[id] = existing_stack_member.merge(stack_member)
return new_stack_dict
def blended_dict(self, stack_to_merge):
"""
Merges any objects in new_stack with the same ID as those in the original_stack
:param self: list of mergableObject or inheritors thereof
:param stack_to_merge: list of mergableObject or inheritors thereof
:return: dict of mergableObjects, keynames .id
"""
## We create a new dict stack of order details which will contain merged details
new_stack = {}
## convert the thing we're merging into a dictionary
stack_to_merge_dict = stack_to_merge.merged_dict()
for stack_member in self:
id = stack_member.id
new_stack[id] = deepcopy(stack_member)
if id in stack_to_merge_dict.keys():
## add on the new information by merging without overwriting
new_stack[id] = stack_member.merge(stack_to_merge_dict[id], overwrite=False)
return new_stack
## Just to make the code more readable
class list_of_execInformation(list_of_mergables):
pass
class list_of_orderInformation(list_of_mergables):
pass
"""
Now into the main bit of the code; Wrapper and Client objects
"""
class TestWrapper(EWrapper):
"""
The wrapper deals with the action coming back from the IB gateway or TWS instance
We override methods in EWrapper that will get called when this action happens, like currentTime
Extra methods are added as we need to store the results in this object
"""
def __init__(self):
self._my_contract_details = {}
self._my_requested_execution = {}
## We set these up as we could get things coming along before we run an init
self._my_executions_stream = queue.Queue()
self._my_commission_stream = queue.Queue()
self._my_open_orders = queue.Queue()
## use a dict as could have different accountids
self._my_accounts = {}
## We set these up as we could get things coming along before we run an init
self._my_positions = queue.Queue()
self._my_errors = queue.Queue()
## error handling code
def init_error(self):
error_queue=queue.Queue()
self._my_errors = error_queue
def get_error(self, timeout=5):
if self.is_error():
try:
return self._my_errors.get(timeout=timeout)
except queue.Empty:
return None
return None
def is_error(self):
an_error_if=not self._my_errors.empty()
return an_error_if
def error(self, id, errorCode, errorString):
## Overriden method
errormsg = "IB error id %d errorcode %d string %s" % (id, errorCode, errorString)
self._my_errors.put(errormsg)
## get positions code
def init_positions(self):
positions_queue = self._my_positions = queue.Queue()
return positions_queue
def position(self, account, contract, position,
avgCost):
## uses a simple tuple, but you could do other, fancier, things here
position_object = (account, contract, position,
avgCost)
self._my_positions.put(position_object)
def positionEnd(self):
## overriden method
self._my_positions.put(FINISHED)
## get accounting data
def init_accounts(self, accountName):
accounting_queue = self._my_accounts[accountName] = queue.Queue()
return accounting_queue
def updateAccountValue(self, key:str, val:str, currency:str,
accountName:str):
## use this to seperate out different account data
data = identifed_as(ACCOUNT_VALUE_FLAG, (key,val, currency))
self._my_accounts[accountName].put(data)
def updatePortfolio(self, contract, position:float,
marketPrice:float, marketValue:float,
averageCost:float, unrealizedPNL:float,
realizedPNL:float, accountName:str):
## use this to seperate out different account data
data = identifed_as(ACCOUNT_UPDATE_FLAG, (contract, position, marketPrice, marketValue, averageCost,
unrealizedPNL, realizedPNL))
self._my_accounts[accountName].put(data)
# def updateAccountTime(self, timeStamp:str):
## use this to seperate out different account data
# data = identifed_as(ACCOUNT_TIME_FLAG, timeStamp)
# self._my_accounts[accountName].put(data)
def accountDownloadEnd(self, accountName:str):
self._my_accounts[accountName].put(FINISHED)
## get contract details code
def init_contractdetails(self, reqId):
contract_details_queue = self._my_contract_details[reqId] = queue.Queue()
return contract_details_queue
def contractDetails(self, reqId, contractDetails):
## overridden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(contractDetails)
def contractDetailsEnd(self, reqId):
## overriden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(FINISHED)
# orders
def init_open_orders(self):
open_orders_queue = self._my_open_orders = queue.Queue()
return open_orders_queue
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permid,
parentId, lastFillPrice, clientId, whyHeld, mktCapPrice):
order_details = orderInformation(orderId, status=status, filled=filled,
avgFillPrice=avgFillPrice, permid=permid,
parentId=parentId, lastFillPrice=lastFillPrice, clientId=clientId,
whyHeld=whyHeld, mktCapPrice=mktCapPrice)
self._my_open_orders.put(order_details)
def openOrder(self, orderId, contract, order, orderstate):
"""
Tells us about any orders we are working now
overriden method
"""
order_details = orderInformation(orderId, contract=contract, order=order, orderstate = orderstate)
self._my_open_orders.put(order_details)
def openOrderEnd(self):
"""
Finished getting open orders
Overriden method
"""
self._my_open_orders.put(FINISHED)
""" Executions and commissions
requested executions get dropped into single queue: self._my_requested_execution[reqId]
Those that arrive as orders are completed without a relevant reqId go into self._my_executions_stream
All commissions go into self._my_commission_stream (could be requested or not)
The *_stream queues are permanent, and init when the TestWrapper instance is created
"""
def init_requested_execution_data(self, reqId):
execution_queue = self._my_requested_execution[reqId] = queue.Queue()
return execution_queue
def access_commission_stream(self):
## Access to the 'permanent' queue for commissions
return self._my_commission_stream
def access_executions_stream(self):
## Access to the 'permanent' queue for executions
return self._my_executions_stream
def commissionReport(self, commreport):
"""
This is called if
a) we have submitted an order and a fill has come back
b) We have asked for recent fills to be given to us
However no reqid is ever passed
overriden method
:param commreport:
:return:
"""
commdata = execInformation(commreport.execId, Commission=commreport.commission,
commission_currency = commreport.currency,
realisedpnl = commreport.realizedPNL)
## there are some other things in commreport you could add
## make sure you add them to the .attributes() field of the execInformation class
## These always go into the 'stream' as could be from a request, or a fill thats just happened
self._my_commission_stream.put(commdata)
def execDetails(self, reqId, contract, execution):
"""
This is called if
a) we have submitted an order and a fill has come back (in which case reqId will be FILL_CODE)
b) We have asked for recent fills to be given to us (reqId will be
See API docs for more details
"""
## overriden method
execdata = execInformation(execution.execId, contract=contract,
ClientId=execution.clientId, OrderId=execution.orderId,
time=execution.time, AvgPrice=execution.avgPrice,
AcctNumber=execution.acctNumber, Shares=execution.shares,
Price = execution.price)
## there are some other things in execution you could add
## make sure you add them to the .attributes() field of the execInformation class
reqId = int(reqId)
## We eithier put this into a stream if its just happened, or store it for a specific request
if reqId==FILL_CODE:
self._my_executions_stream.put(execdata)
else:
self._my_requested_execution[reqId].put(execdata)
def execDetailsEnd(self, reqId):
"""
No more orders to look at if execution details requested
"""
self._my_requested_execution[reqId].put(FINISHED)
## order ids
def init_nextvalidid(self):
orderid_queue = self._my_orderid_data = queue.Queue()
return orderid_queue
def nextValidId(self, orderId):
"""
Give the next valid order id
Note this doesn't 'burn' the ID; if you call again without executing the next ID will be the same
If you're executing through multiple clients you are probably better off having an explicit counter
"""
if getattr(self, '_my_orderid_data', None) is None:
## getting an ID which we haven't asked for
## this happens, IB server just sends this along occassionally
self.init_nextvalidid()
self._my_orderid_data.put(orderId)
class TestClient(EClient):
"""
The client method
We don't override native methods, but instead call them from our own wrappers
"""
def __init__(self, wrapper):
## Set up with a wrapper inside
EClient.__init__(self, wrapper)
self._market_data_q_dict = {}
self._commissions=list_of_execInformation()
## We use these to store accounting data
self._account_cache = simpleCache(max_staleness_seconds = 5*60)
## override function
self._account_cache.update_data = self._update_accounting_data
def get_current_positions(self):
"""
Current positions held
:return:
"""
## Make a place to store the data we're going to return
positions_queue = finishableQueue(self.init_positions())
## ask for the data
self.reqPositions()
## poll until we get a termination or die of boredom
MAX_WAIT_SECONDS = 10
positions_list = positions_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if positions_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished whilst getting positions")
return positions_list
def _update_accounting_data(self, accountName):
"""
Update the accounting data in the cache
:param accountName: account we want to get data for
:return: nothing
"""
## Make a place to store the data we're going to return
accounting_queue = finishableQueue(self.init_accounts(accountName))
## ask for the data
self.reqAccountUpdates(True, accountName)
## poll until we get a termination or die of boredom
MAX_WAIT_SECONDS = 10
accounting_list = accounting_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if accounting_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished whilst getting accounting data")
# seperate things out, because this is one big queue of data with different things in it
accounting_list = list_of_identified_items(accounting_list)
seperated_accounting_data = accounting_list.seperate_into_dict()
## update the cache with different elements
self._account_cache.update_cache(accountName, seperated_accounting_data)
## return nothing, information is accessed via get_... methods
def get_accounting_time_from_server(self, accountName):
"""
Get the accounting time from IB server
:return: accounting time as served up by IB
"""
#All these functions follow the same pattern: check if stale or missing, if not return cache, else update values
return self._account_cache.get_updated_cache(accountName, ACCOUNT_TIME_FLAG)
def get_accounting_values(self, accountName):
"""
Get the accounting values from IB server
:return: accounting values as served up by IB
"""
#All these functions follow the same pattern: check if stale, if not return cache, else update values
return self._account_cache.get_updated_cache(accountName, ACCOUNT_VALUE_FLAG)
def get_accounting_updates(self, accountName):
"""
Get the accounting updates from IB server
:return: accounting updates as served up by IB
"""
#All these functions follow the same pattern: check if stale, if not return cache, else update values
return self._account_cache.get_updated_cache(accountName, ACCOUNT_UPDATE_FLAG)
def resolve_ib_contract(self, ibcontract, reqId=DEFAULT_GET_CONTRACT_ID):
"""
From a partially formed contract, returns a fully fledged version
:returns fully resolved IB contract
"""
## Make a place to store the data we're going to return
contract_details_queue = finishableQueue(self.init_contractdetails(reqId))
print("TRADEAPI:\tGetting full contract details from the server... ")
self.reqContractDetails(reqId, ibcontract)
## Run until we get a valid contract(s) or get bored waiting
MAX_WAIT_SECONDS = 10
new_contract_details = contract_details_queue.get(timeout = MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print("TRADEAPI:\t" + str(self.get_error()))
if contract_details_queue.timed_out():
print("TRADEAPI:\tExceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
if len(new_contract_details)==0:
print("TRADEAPI:\tFailed to get additional contract details: returning unresolved contract")
return ibcontract
if len(new_contract_details)>1:
print("TRADEAPI:\tgot multiple contracts using first one")
new_contract_details=new_contract_details[0]
resolved_ibcontract=new_contract_details.contract
return resolved_ibcontract
def get_next_brokerorderid(self):
"""
Get next broker order id
:return: broker order id, int; or TIME_OUT if unavailable
"""
## Make a place to store the data we're going to return
orderid_q = self.init_nextvalidid()
self.reqIds(-1) # -1 is irrelevant apparently (see IB API docs)
## Run until we get a valid contract(s) or get bored waiting
MAX_WAIT_SECONDS = 10
try:
brokerorderid = orderid_q.get(timeout=MAX_WAIT_SECONDS)
except queue.Empty:
print("TRADEAPI:\tWrapper timeout waiting for broker orderid")
brokerorderid = TIME_OUT
while self.wrapper.is_error():
print("TRADEAPI:\t" + str(self.get_error(timeout=MAX_WAIT_SECONDS)))
return brokerorderid
def place_new_IB_order(self, ibcontract, order, orderid=None):
"""
Places an order
Returns brokerorderid
"""
## We can eithier supply our own ID or ask IB to give us the next valid one
if orderid is None:
print("TRADEAPI:\tGetting orderid from IB")
orderid = self.get_next_brokerorderid()
if orderid is TIME_OUT:
raise Exception("I couldn't get an orderid from IB, and you didn't provide an orderid")
print("TRADEAPI:\tUsing order id of %d" % orderid)
## Note: It's possible if you have multiple traidng instances for orderids to be submitted out of sequence
## in which case IB will break
# Place the order
self.placeOrder(
orderid, # orderId,
ibcontract, # contract,
order # order
)
return orderid
def any_open_orders(self):
"""
Simple wrapper to tell us if we have any open orders
"""
return len(self.get_open_orders()) > 0
def get_open_orders(self):
"""
Returns a list of any open orders
"""
## store the orders somewhere
open_orders_queue = finishableQueue(self.init_open_orders())
## You may prefer to use reqOpenOrders() which only retrieves orders for this client
self.reqAllOpenOrders()
## Run until we get a terimination or get bored waiting
MAX_WAIT_SECONDS = 5
open_orders_list = list_of_orderInformation(open_orders_queue.get(timeout = MAX_WAIT_SECONDS))
while self.wrapper.is_error():
print("TRADEAPI:\t" + str(self.get_error()))
if open_orders_queue.timed_out():
print("TRADEAPI:\tExceeded maximum wait for wrapper to confirm finished whilst getting orders")
## open orders queue will be a jumble of order details, turn into a tidy dict with no duplicates
open_orders_dict = open_orders_list.merged_dict()
return open_orders_dict
def get_executions_and_commissions(self, reqId=DEFAULT_EXEC_TICKER, execution_filter = ExecutionFilter()):
"""
Returns a list of all executions done today with commission data
"""
## store somewhere
execution_queue = finishableQueue(self.init_requested_execution_data(reqId))
## We can change ExecutionFilter to subset different orders
## note this will also pull in commissions but we would use get_executions_with_commissions
self.reqExecutions(reqId, execution_filter)
## Run until we get a terimination or get bored waiting
MAX_WAIT_SECONDS = 10
exec_list = list_of_execInformation(execution_queue.get(timeout = MAX_WAIT_SECONDS))
while self.wrapper.is_error():
print("TRADEAPI:\t" + str(self.get_error()))
if execution_queue.timed_out():
print("TRADEAPI:\tExceeded maximum wait for wrapper to confirm finished whilst getting exec / commissions")
## Commissions will arrive seperately. We get all of them, but will only use those relevant for us
commissions = self._all_commissions()
## glue them together, create a dict, remove duplicates
all_data = exec_list.blended_dict(commissions)
return all_data
def _recent_fills(self):
"""
Returns any fills since we last called recent_fills
:return: list of executions as execInformation objects
"""
## we don't set up a queue but access the permanent one
fill_queue = self.access_executions_stream()
list_of_fills=list_of_execInformation()
while not fill_queue.empty():
MAX_WAIT_SECONDS = 5
try:
next_fill = fill_queue.get(timeout=MAX_WAIT_SECONDS)
list_of_fills.append(next_fill)
except queue.Empty:
## corner case where Q emptied since we last checked if empty at top of while loop
pass
## note this could include duplicates and is a list
return list_of_fills
def recent_fills_and_commissions(self):
"""
Return recent fills, with commissions added in
:return: dict of execInformation objects, keys are execids
"""
recent_fills = self._recent_fills()
commissions = self._all_commissions() ## we want all commissions
## glue them together, create a dict, remove duplicates
all_data = recent_fills.blended_dict(commissions)
return all_data
def _recent_commissions(self):
"""
Returns any commissions that are in the queue since we last checked
:return: list of commissions as execInformation objects
"""
## we don't set up a queue, as there is a permanent one
comm_queue = self.access_commission_stream()
list_of_comm=list_of_execInformation()
while not comm_queue.empty():
MAX_WAIT_SECONDS = 5
try:
next_comm = comm_queue.get(timeout=MAX_WAIT_SECONDS)
list_of_comm.append(next_comm)
except queue.Empty:
## corner case where Q emptied since we last checked if empty at top of while loop
pass
## note this could include duplicates and is a list
return list_of_comm
def _all_commissions(self):
"""
Returns all commissions since we created this instance
:return: list of commissions as execInformation objects
"""
original_commissions = self._commissions
latest_commissions = self._recent_commissions()
all_commissions = list_of_execInformation(original_commissions + latest_commissions)
self._commissions = all_commissions
# note this could include duplicates and is a list
return all_commissions
def cancel_order(self, orderid):
## Has to be an order placed by this client. I don't check this here -
## If you have multiple IDs then you you need to check this yourself.
self.cancelOrder(orderid)
## Wait until order is cancelled
start_time=datetime.datetime.now()
MAX_WAIT_TIME_SECONDS = 10
finished = False
while not finished:
if orderid not in self.get_open_orders():
## finally cancelled
finished = True
if (datetime.datetime.now() - start_time).seconds > MAX_WAIT_TIME_SECONDS:
print("TRADEAPI:\tWrapper didn't come back with confirmation that order was cancelled!")
finished = True
## return nothing
def cancel_all_orders(self):
## Cancels all orders, from all client ids.
## if you don't want to do this, then instead run .cancel_order over named IDs
app.reqGlobalCancel()
start_time=datetime.datetime.now()
MAX_WAIT_TIME_SECONDS = 10
finished = False
while not finished:
if not self.any_open_orders():
## all orders finally cancelled
finished = True
if (datetime.datetime.now() - start_time).seconds > MAX_WAIT_TIME_SECONDS:
print("TRADEAPI:\tWrapper didn't come back with confirmation that all orders were cancelled!")
finished = True
## return nothing
class TestApp(TestWrapper, TestClient):
def __init__(self, ipaddress, portid, clientid):
TestWrapper.__init__(self)
TestClient.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target = self.run)
thread.start()
setattr(self, "_thread", thread)
self.init_error() | 32.625893 | 143 | 0.64741 |
3fe934c43886f457a7fa86e22a558d84fdaa563f | 7,613 | py | Python | client_turnin.py | bmwanz/cs176bFinal | 08f218621382b2b939d3dbcb5fed5f2a9bd74a8e | [
"Apache-2.0"
] | null | null | null | client_turnin.py | bmwanz/cs176bFinal | 08f218621382b2b939d3dbcb5fed5f2a9bd74a8e | [
"Apache-2.0"
] | null | null | null | client_turnin.py | bmwanz/cs176bFinal | 08f218621382b2b939d3dbcb5fed5f2a9bd74a8e | [
"Apache-2.0"
] | null | null | null | #Chien Kai Wang, Brian Wan, William Chen
#TCP CHAT CLIENT
import pyaudio
import sys
import socket
import select
import time
from array import array
from struct import pack
msgType = 0
check = 0
receive = 0
ALPHA_LOWER = ['a', 'b', 'c', 'd', 'e', 'f', 'g', ' ', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
ALPHA_UPPER = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
SUB_CIPHER = [('1','5'), (',','\''), ('.','"'), (';','<'), ('"','>'), (':','?'), ('<','/'), ('>',';'), ('/',','), ('?','.'), ('2','`'), ('3','-'), ('4','~'), ('5','1'), ('[','|'), (']','\\'), ('\\','{'), ('}','['), ('{',']'), ('|','}'), ('6','0'), ('7','='), ('8','2'), ('9','6'), ('0','8'), ('-','9'), ('=','7'), ('!','#'), ('@','*'), ('#','!'), ('$','^'), ('%','('), ('^','&'), ('&','+'), ('*',')'), ('(','%'), (')','_'), ('_','@'), ('+','$'), ('~', '4'), ('`', '3')]
NEW_CIPHER = {}
#receive data
def MsgRec(s, x):
msgType = int(s.recv(1))
length = int(s.recv(111))
msg = ''
if(length < 1024):
msg = s.recv(1024)
else:
left = length % 1024
while(length >= 1024):
msg = msg + s.recv(1024)
length = length - 1024
msg = msg + s.recv(left)
# Decrypt the message, preserving the prefix
i = 0
for c in msg:
if c == ']':
break
i = i+1
if x == 0:
dmsg = decrypt_sub(msg[i+2:])
msg = msg[:i+2]
msg = msg + dmsg
return msg
#send data
def MsgSend(clientsocket, msg, x):
if x == 0:
msg = encrypt_sub(msg)
msgTypeSend = str(msgType)
length = str(len(msg))
while(len(length) < 111):
length = '0' + length
try:
clientsocket.sendall(msgTypeSend)
clientsocket.sendall(length)
clientsocket.sendall(msg)
except socket.error:
print >> sys.stderr,'ERROR: Failed to send message. Terminating.'
sys.exit()
def encrypt_sub(message):
encrypted = ''
for c in message:
if c in NEW_CIPHER:
encrypted = encrypted + NEW_CIPHER[c]
else:
encrypted = encrypted + c
return encrypted
def decrypt_sub(message):
# Invert the Cipher to get the decryptions
inverted = {}
for key, value in NEW_CIPHER.items():
inverted[value] = key
# Decrypt it
decrypted = ''
for c in message:
if c in NEW_CIPHER:
decrypted = decrypted + inverted[c]
else:
decrypted = decrypted + c
return decrypted
def chat_client():
global msgType
global check
global receive
if len(sys.argv) != 2:
print >> sys.stderr,'Invalid number of args. Terminating'
sys.exit()
host = sys.argv[1]
#port = int(sys.argv[2])
# Set up the Substitution Cipher password
print 'Rearrange the words for your password'
print '"TV quiz drag cox blew JFK nymphs"'
password = sys.stdin.readline()
if len(password.replace(' ', '')) != 27:
print >> sys.stderr,'Invalid password. You included an extra character or forgot one. Terminating'
sys.exit()
# Scramble the Preset Subtitution Cipher
i = 0
j = 38
for tuple in SUB_CIPHER:
NEW_CIPHER[SUB_CIPHER[i][0]] = SUB_CIPHER[j][1]
i = i + 1
j = j - 1
# Build the Substitution cipher
password = password.lower()
password = password.replace(' ', '')
password = password.replace('\n', '')
password = password + ' '
j = 15
for i in range(0, 27):
NEW_CIPHER[ALPHA_LOWER[i]] = password[i]
for i in range(25, -1, -1):
NEW_CIPHER[ALPHA_UPPER[i]] = password[i].upper()
j = j+1
NEW_CIPHER['\n'] = '\n'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, 9001))
except Exception, e:
if(e.errno == -2):
print >> sys.stderr,'ERROR: Could not connect to server. Terminating.'
elif(e.errno == 111):
print >> sys.stderr,'ERROR: Invalid port. Terminating.'
else:
print >> sys.stderr,e
sys.exit()
print 'Connected'
sys.stdout.write('[Local] '); sys.stdout.flush()
p = pyaudio.PyAudio()
r = array('h')
countSilent = 0
while 1:
socket_list = [sys.stdin, s]
# chooses inputs
if check == 0 :
read_sock, write_sock, err_sock = select.select(socket_list , [], [])
for sock in read_sock:
if sock == s:
data = MsgRec(sock, 0)
#if data.strip() != '[audio]' :
if '[audio]' not in data:
sys.stdout.write("\r" + '[Guest]'+data)
sys.stdout.write('[Local] '); sys.stdout.flush()
else:
receive = 1
check = 1
break
else :
if msgType == 0: #text
msg = sys.stdin.readline() #read a line from the client
#if msg.strip() == '[audio]':
if '[audio]' in msg:
msgType = 1
MsgSend(s,msg, 0) #send using the send function
sys.stdout.write('[Local] '); sys.stdout.flush()
else : #audio
countSilent = 0
receive = 2
check = 1
break
if receive == 1 :
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = 44100,
output = True)
sys.stdout.write('\nGuest is Talking ...\n'); sys.stdout.flush()
while 1:
datastream = MsgRec(s, 1)
if 'endofaudio' in datastream:
check = 0
receive = 0
msgType = 0
sys.stdout.write('\nGuest is Done Talking!\n'); sys.stdout.flush()
break
if datastream:
stream.write(datastream)
elif receive == 2:
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = 44100,
input = True,
frames_per_buffer = 8192)
try:
while 1:
datastreamSend = stream.read(8192)
dataTest = array('h',datastreamSend)
silent = is_silent(dataTest)
MsgSend(s,datastreamSend, 1)
if silent:
countSilent += 1
if countSilent > 40:
MsgSend(s,'endofaudio', 1)
msgType = 0
check = 0
receive = 0
#MsgSend(s,'endofaudio')
break
#MsgSend(s,datastreamSend)
'''
datastreamSend = stream.read(8192)
MsgSend(s,datastreamSend)
'''
except IOError:
print 'warning: dropped frame'
def is_silent (sendData):
return max(sendData) < 600
if __name__ == "__main__":
sys.exit(chat_client())
| 32.67382 | 469 | 0.442795 |
d766adc9be5d559e1bebe0ae8ac2f5371a406d53 | 2,643 | py | Python | repos/system_upgrade/el7toel8/actors/checkbtrfs/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 21 | 2018-11-20T15:58:39.000Z | 2022-03-15T19:57:24.000Z | repos/system_upgrade/el7toel8/actors/checkbtrfs/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 732 | 2018-11-21T18:33:26.000Z | 2022-03-31T16:16:24.000Z | repos/system_upgrade/el7toel8/actors/checkbtrfs/actor.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 85 | 2018-11-20T17:55:00.000Z | 2022-03-29T09:40:31.000Z | from leapp.actors import Actor
from leapp.models import ActiveKernelModulesFacts
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
from leapp import reporting
from leapp.reporting import Report, create_report
class CheckBtrfs(Actor):
"""
Check if Btrfs filesystem is in use. If yes, inhibit the upgrade process.
Btrfs filesystem was introduced as Technology Preview with initial releases of RHEL 6 and 7. It
was deprecated on versions 6.6 and 7.4 and will not be present in next major version.
"""
name = 'check_btrfs'
consumes = (ActiveKernelModulesFacts,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
hint = 'In order to unload the module from the running system, check the accompanied command.'
command = ['modprobe', '-r', 'btrfs']
for fact in self.consume(ActiveKernelModulesFacts):
for active_module in fact.kernel_modules:
if active_module.filename == 'btrfs':
create_report([
reporting.Title('Btrfs has been removed from RHEL8'),
reporting.Summary(
'The Btrfs file system was introduced as Technology Preview with the '
'initial release of Red Hat Enterprise Linux 6 and Red Hat Enterprise Linux 7. As of '
'versions 6.6 and 7.4 this technology has been deprecated and removed in RHEL8.'
),
reporting.ExternalLink(
title='Considerations in adopting RHEL 8 - btrfs has been removed.',
url='https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/considerations_in_adopting_rhel_8/file-systems-and-storage_considerations-in-adopting-rhel-8#btrfs-has-been-removed_file-systems-and-storage' # noqa: E501; pylint: disable=line-too-long
),
reporting.ExternalLink(
title='How do I prevent a kernel module from loading automatically?',
url='https://access.redhat.com/solutions/41278'
),
reporting.Severity(reporting.Severity.HIGH),
reporting.Flags([reporting.Flags.INHIBITOR]),
reporting.Tags([reporting.Tags.FILESYSTEM]),
reporting.Remediation(hint=hint, commands=[command]),
reporting.RelatedResource('kernel-driver', 'btrfs')
])
break
| 51.823529 | 297 | 0.601589 |
58b9bbf71a6f16dee4cfca5d095669e15471c7af | 504 | py | Python | src/gen_lookup_table.py | nico/n2 | 788c128ab0d570a6957209f922c0e56b848995e3 | [
"Apache-2.0"
] | null | null | null | src/gen_lookup_table.py | nico/n2 | 788c128ab0d570a6957209f922c0e56b848995e3 | [
"Apache-2.0"
] | null | null | null | src/gen_lookup_table.py | nico/n2 | 788c128ab0d570a6957209f922c0e56b848995e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# See parse.rs: is_ident_char().
ident_chars = ['az', 'AZ', '09', '_', '-', '.']
path_chars = ['az', 'AZ', '09', '_', '-', '.', '/', ',', '+']
chars = path_chars
tab = [0 for _ in range(256)]
for span in path_chars:
if len(span) > 1:
for c in range(ord(span[0]), ord(span[1])+1):
tab[c] = 1
else:
tab[ord(span)] = 1
for ofs in range(0, 256, 64):
bits = tab[ofs:ofs+64]
s = ''.join('1' if b else '0' for b in bits)[::-1]
num = int(s, 2)
print(hex(num)) | 24 | 61 | 0.519841 |
645ce217ded40c43b9220911d845c80030f3ca90 | 30,229 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_public_ip_prefixes_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_public_ip_prefixes_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_public_ip_prefixes_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations:
"""PublicIPPrefixesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_prefix_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PublicIPPrefix":
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.PublicIPPrefix",
**kwargs: Any
) -> "_models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.PublicIPPrefix",
**kwargs: Any
) -> AsyncLROPoller["_models.PublicIPPrefix"]:
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.PublicIPPrefix"]:
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPPrefixListResult"]:
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPPrefixListResult"]:
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
| 50.214286 | 200 | 0.670912 |
fd474499b8fc98b4cec0f3e514df02e6695a3827 | 160,204 | py | Python | odoo/custom/src/private/purchase_product_uos/_patch/addons/stock/stock.py | ecosoft-odoo/mh-doodba | 093f14850aaff337951b4829b24bf32eee6e6d40 | [
"BSL-1.0"
] | 1 | 2021-10-03T08:11:18.000Z | 2021-10-03T08:11:18.000Z | odoo/custom/src/private/purchase_product_uos/_patch/addons/stock/stock.py | ecosoft-odoo/mh-doodba | 093f14850aaff337951b4829b24bf32eee6e6d40 | [
"BSL-1.0"
] | null | null | null | odoo/custom/src/private/purchase_product_uos/_patch/addons/stock/stock.py | ecosoft-odoo/mh-doodba | 093f14850aaff337951b4829b24bf32eee6e6d40 | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from operator import itemgetter
from itertools import groupby
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
from openerp import netsvc
from openerp import tools
from openerp.tools import float_compare, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', size=64, required=True, help="Incoterms are series of sales terms.They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Code for Incoterms"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM without deleting it."),
}
_defaults = {
'active': True,
}
stock_incoterms()
class stock_journal(osv.osv):
_name = "stock.journal"
_description = "Stock Journal"
_columns = {
'name': fields.char('Stock Journal', size=32, required=True),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'user_id': lambda s, c, u, ctx: u
}
stock_journal()
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Location"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'posz,name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
# always return the full hierarchical name
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
names = [m.name]
parent = m.location_id
while parent:
names.append(parent.name)
parent = parent.location_id
res[m.id] = ' / '.join(reversed(names))
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
return self.search(cr, uid, [('id', 'child_of', ids)], context=context)
def _product_value(self, cr, uid, ids, field_names, arg, context=None):
"""Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).
@param field_names: Name of field
@return: Dictionary of values
"""
prod_id = context and context.get('product_id', False)
if not prod_id:
return dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
product_product_obj = self.pool.get('product.product')
cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))
dict1 = cr.dictfetchall()
cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))
dict2 = cr.dictfetchall()
res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))
products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))
result = dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
result.update(dict([(i, {}.fromkeys(field_names, 0.0)) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))
currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
currency_obj = self.pool.get('res.currency')
currency = currency_obj.browse(cr, uid, currency_id, context=context)
for loc_id, product_ids in products_by_location.items():
if prod_id:
product_ids = [prod_id]
c = (context or {}).copy()
c['location'] = loc_id
for prod in product_product_obj.browse(cr, uid, product_ids, context=c):
for f in field_names:
if f == 'stock_real':
if loc_id not in result:
result[loc_id] = {}
result[loc_id][f] += prod.qty_available
elif f == 'stock_virtual':
result[loc_id][f] += prod.virtual_available
elif f == 'stock_real_value':
amount = prod.qty_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
elif f == 'stock_virtual_value':
amount = prod.virtual_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
return result
_columns = {
'name': fields.char('Location Name', size=64, required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location for Inter-Companies Transfers')], 'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
""", select = True),
# temporarily removed, as it's unused: 'allocation_method': fields.selection([('fifo', 'FIFO'), ('lifo', 'LIFO'), ('nearest', 'Nearest')], 'Allocation Method', required=True),
'complete_name': fields.function(_complete_name, type='char', size=256, string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id'], 10)}),
'stock_real': fields.function(_product_value, type='float', string='Real Stock', multi="stock"),
'stock_virtual': fields.function(_product_value, type='float', string='Virtual Stock', multi="stock"),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'chained_journal_id': fields.many2one('stock.journal', 'Chaining Journal',help="Inventory Journal in which the chained move will be written, if the Chaining Type is not Transparent (no journal is used if left empty)"),
'chained_location_id': fields.many2one('stock.location', 'Chained Location If Fixed'),
'chained_location_type': fields.selection([('none', 'None'), ('customer', 'Customer'), ('fixed', 'Fixed Location')],
'Chained Location Type', required=True,
help="Determines whether this location is chained to another location, i.e. any incoming product in this location \n" \
"should next go to the chained location. The chained location is determined according to the type :"\
"\n* None: No chaining at all"\
"\n* Customer: The chained location will be taken from the Customer Location field on the Partner form of the Partner that is specified in the Picking list of the incoming products." \
"\n* Fixed Location: The chained location is taken from the next field: Chained Location if Fixed." \
),
'chained_auto_packing': fields.selection(
[('auto', 'Automatic Move'), ('manual', 'Manual Operation'), ('transparent', 'Automatic No Step Added')],
'Chaining Type',
required=True,
help="This is used only if you select a chained location type.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'chained_picking_type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', help="Shipping Type of the Picking List that will contain the chained move (leave empty to automatically detect the type based on the source and destination locations)."),
'chained_company_id': fields.many2one('res.company', 'Chained Company', help='The company the Picking List containing the chained move will belong to (leave empty to use the default company determination rules'),
'chained_delay': fields.integer('Chaining Lead Time',help="Delay between original move and chained move in days"),
'partner_id': fields.many2one('res.partner', 'Location Address',help="Address of customer or supplier."),
'icon': fields.selection(tools.icons, 'Icon', size=64,help="Icon show in hierarchical tree view"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)',help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'stock_real_value': fields.function(_product_value, type='float', string='Real Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'stock_virtual_value': fields.function(_product_value, type='float', string='Virtual Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between all companies'),
'scrap_location': fields.boolean('Scrap Location', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
_defaults = {
'active': True,
'usage': 'internal',
'chained_location_type': 'none',
'chained_auto_packing': 'manual',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'icon': False,
'scrap_location': False,
}
def chained_location_get(self, cr, uid, location, partner=None, product=None, context=None):
""" Finds chained location
@param location: Location id
@param partner: Partner id
@param product: Product id
@return: List of values
"""
result = None
if location.chained_location_type == 'customer':
if partner:
result = partner.property_stock_customer
elif location.chained_location_type == 'fixed':
result = location.chained_location_id
if result:
return result, location.chained_auto_packing, location.chained_delay, location.chained_journal_id and location.chained_journal_id.id or False, location.chained_company_id and location.chained_company_id.id or False, location.chained_picking_type, False
return result
def picking_type_get(self, cr, uid, from_location, to_location, context=None):
""" Gets type of picking.
@param from_location: Source location
@param to_location: Destination location
@return: Location type
"""
result = 'internal'
if (from_location.usage=='internal') and (to_location and to_location.usage in ('customer', 'supplier')):
result = 'out'
elif (from_location.usage in ('supplier', 'customer')) and (to_location.usage == 'internal'):
result = 'in'
return result
def _product_get_all_report(self, cr, uid, ids, product_ids=False, context=None):
return self._product_get_report(cr, uid, ids, product_ids, context, recursive=True)
def _product_get_report(self, cr, uid, ids, product_ids=False,
context=None, recursive=False):
""" Finds the product quantity and price for particular location.
@param product_ids: Ids of product
@param recursive: True or False
@return: Dictionary of values
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product')
# Take the user company and pricetype
context['currency_id'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
# To be able to offer recursive or non-recursive reports we need to prevent recursive quantities by default
context['compute_child'] = False
if not product_ids:
product_ids = product_obj.search(cr, uid, [], context={'active_test': False})
products = product_obj.browse(cr, uid, product_ids, context=context)
products_by_uom = {}
products_by_id = {}
for product in products:
products_by_uom.setdefault(product.uom_id.id, [])
products_by_uom[product.uom_id.id].append(product)
products_by_id.setdefault(product.id, [])
products_by_id[product.id] = product
result = {}
result['product'] = []
for id in ids:
quantity_total = 0.0
total_price = 0.0
for uom_id in products_by_uom.keys():
fnc = self._product_get
if recursive:
fnc = self._product_all_get
ctx = context.copy()
ctx['uom'] = uom_id
qty = fnc(cr, uid, id, [x.id for x in products_by_uom[uom_id]],
context=ctx)
for product_id in qty.keys():
if not qty[product_id]:
continue
product = products_by_id[product_id]
quantity_total += qty[product_id]
# Compute based on pricetype
# Choose the right filed standard_price to read
amount_unit = product.price_get('standard_price', context=context)[product.id]
price = qty[product_id] * amount_unit
total_price += price
result['product'].append({
'price': amount_unit,
'prod_name': product.name,
'code': product.default_code, # used by lot_overview_all report!
'variants': product.variants or '',
'uom': product.uom_id.name,
'prod_qty': qty[product_id],
'price_value': price,
})
result['total'] = quantity_total
result['total_price'] = total_price
return result
def _product_get_multi_location(self, cr, uid, ids, product_ids=False, context=None,
states=['done'], what=('in', 'out')):
"""
@param product_ids: Ids of product
@param states: List of states
@param what: Tuple of
@return:
"""
product_obj = self.pool.get('product.product')
if context is None:
context = {}
context.update({
'states': states,
'what': what,
'location': ids
})
return product_obj.get_product_available(cr, uid, product_ids, context=context)
def _product_get(self, cr, uid, id, product_ids=False, context=None, states=None):
"""
@param product_ids:
@param states:
@return:
"""
if states is None:
states = ['done']
ids = id and [id] or []
return self._product_get_multi_location(cr, uid, ids, product_ids, context=context, states=states)
def _product_all_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
# build the list of ids of children of the location given by id
ids = id and [id] or []
location_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
return self._product_get_multi_location(cr, uid, location_ids, product_ids, context, states)
def _product_virtual_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
return self._product_all_get(cr, uid, id, product_ids, context, ['confirmed', 'waiting', 'assigned', 'done'])
def _product_reserve(self, cr, uid, ids, product_id, product_qty, context=None, lock=False):
"""
Attempt to find a quantity ``product_qty`` (in the product's default uom or the uom passed in ``context``) of product ``product_id``
in locations with id ``ids`` and their child locations. If ``lock`` is True, the stock.move lines
of product with id ``product_id`` in the searched location will be write-locked using Postgres's
"FOR UPDATE NOWAIT" option until the transaction is committed or rolled back, to prevent reservin
twice the same products.
If ``lock`` is True and the lock cannot be obtained (because another transaction has locked some of
the same stock.move lines), a log line will be output and False will be returned, as if there was
not enough stock.
:param product_id: Id of product to reserve
:param product_qty: Quantity of product to reserve (in the product's default uom or the uom passed in ``context``)
:param lock: if True, the stock.move lines of product with id ``product_id`` in all locations (and children locations) with ``ids`` will
be write-locked using postgres's "FOR UPDATE NOWAIT" option until the transaction is committed or rolled back. This is
to prevent reserving twice the same products.
:param context: optional context dictionary: if a 'uom' key is present it will be used instead of the default product uom to
compute the ``product_qty`` and in the return value.
:return: List of tuples in the form (qty, location_id) with the (partial) quantities that can be taken in each location to
reach the requested product_qty (``qty`` is expressed in the default uom of the product), of False if enough
products could not be found, or the lock could not be obtained (and ``lock`` was True).
"""
result = []
amount = 0.0
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
uom_rounding = self.pool.get('product.product').browse(cr, uid, product_id, context=context).uom_id.rounding
if context.get('uom'):
uom_rounding = uom_obj.browse(cr, uid, context.get('uom'), context=context).rounding
for id in self.search(cr, uid, [('location_id', 'child_of', ids)]):
if lock:
try:
# Must lock with a separate select query because FOR UPDATE can't be used with
# aggregation/group by's (when individual rows aren't identifiable).
# We use a SAVEPOINT to be able to rollback this part of the transaction without
# failing the whole transaction in case the LOCK cannot be acquired.
cr.execute("SAVEPOINT stock_location_product_reserve")
cr.execute("""SELECT id FROM stock_move
WHERE product_id=%s AND
(
(location_dest_id=%s AND
location_id<>%s AND
state='done')
OR
(location_id=%s AND
location_dest_id<>%s AND
state in ('done', 'assigned'))
)
FOR UPDATE of stock_move NOWAIT""", (product_id, id, id, id, id), log_exceptions=False)
except Exception:
# Here it's likely that the FOR UPDATE NOWAIT failed to get the LOCK,
# so we ROLLBACK to the SAVEPOINT to restore the transaction to its earlier
# state, we return False as if the products were not available, and log it:
cr.execute("ROLLBACK TO stock_location_product_reserve")
_logger.warning("Failed attempt to reserve %s x product %s, likely due to another transaction already in progress. Next attempt is likely to work. Detailed error available at DEBUG level.", product_qty, product_id)
_logger.debug("Trace of the failed product reservation attempt: ", exc_info=True)
return False
# XXX TODO: rewrite this with one single query, possibly even the quantity conversion
cr.execute("""SELECT product_uom, sum(product_qty) AS product_qty
FROM stock_move
WHERE location_dest_id=%s AND
location_id<>%s AND
product_id=%s AND
state='done'
GROUP BY product_uom
""",
(id, id, product_id))
results = cr.dictfetchall()
cr.execute("""SELECT product_uom,-sum(product_qty) AS product_qty
FROM stock_move
WHERE location_id=%s AND
location_dest_id<>%s AND
product_id=%s AND
state in ('done', 'assigned')
GROUP BY product_uom
""",
(id, id, product_id))
results += cr.dictfetchall()
total = 0.0
results2 = 0.0
for r in results:
amount = uom_obj._compute_qty(cr, uid, r['product_uom'], r['product_qty'], context.get('uom', False))
results2 += amount
total += amount
if total <= 0.0:
continue
amount = results2
compare_qty = float_compare(amount, 0, precision_rounding=uom_rounding)
if compare_qty == 1:
if amount > min(total, product_qty):
amount = min(product_qty, total)
result.append((amount, id))
product_qty -= amount
total -= amount
if product_qty <= 0.0:
return result
if total <= 0.0:
continue
return False
stock_location()
class stock_tracking(osv.osv):
_name = "stock.tracking"
_description = "Packs"
def checksum(sscc):
salt = '31' * 8 + '3'
sum = 0
for sscc_part, salt_part in zip(sscc, salt):
sum += int(sscc_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
def make_sscc(self, cr, uid, context=None):
sequence = self.pool.get('ir.sequence').get(cr, uid, 'stock.lot.tracking')
try:
return sequence + str(self.checksum(sequence))
except Exception:
return sequence
_columns = {
'name': fields.char('Pack Reference', size=64, required=True, select=True, help="By default, the pack reference is generated following the sscc standard. (Serial number + 1 check digit)"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a pack without deleting it."),
'serial': fields.char('Additional Reference', size=64, select=True, help="Other reference or serial number"),
'move_ids': fields.one2many('stock.move', 'tracking_id', 'Moves for this pack', readonly=True),
'date': fields.datetime('Creation Date', required=True),
}
_defaults = {
'active': 1,
'name': make_sscc,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = self.search(cr, user, [('serial', '=', name)]+ args, limit=limit, context=context)
ids += self.search(cr, user, [('name', operator, name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def name_get(self, cr, uid, ids, context=None):
"""Append the serial to the name"""
if not len(ids):
return []
res = [ (r['id'], r['serial'] and '%s [%s]' % (r['name'], r['serial'])
or r['name'] )
for r in self.read(cr, uid, ids, ['name', 'serial'],
context=context) ]
return res
def unlink(self, cr, uid, ids, context=None):
raise osv.except_osv(_('Error!'), _('You cannot remove a lot line.'))
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
return self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
stock_tracking()
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "id desc"
def _set_maximum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is greater than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%d """ % (value, pick.id)
if pick.max_date:
sql_str += " and (date_expected='" + pick.max_date + "')"
cr.execute(sql_str)
return True
def _set_minimum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is less than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%s """ % (value, pick.id)
if pick.min_date:
sql_str += " and (date_expected='" + pick.min_date + "')"
cr.execute(sql_str)
return True
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected)
from
stock_move
where
picking_id IN %s
group by
picking_id""",(tuple(ids),))
for pick, dt1, dt2 in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
return res
def create(self, cr, user, vals, context=None):
if ('name' not in vals) or (vals.get('name')=='/'):
seq_obj_name = self._name
vals['name'] = self.pool.get('ir.sequence').get(cr, user, seq_obj_name)
new_id = super(stock_picking, self).create(cr, user, vals, context)
return new_id
_columns = {
'name': fields.char('Reference', size=64, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'origin': fields.char('Source Document', size=64, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', required=True, select=True, help="Shipping type specify, goods coming in or going out."),
'note': fields.text('Notes', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'stock_journal_id': fields.many2one('stock.journal','Stock Journal', select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'location_id': fields.many2one('stock.location', 'Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations.", select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Location where the system will stock the finished products.", select=True),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], 'Status', readonly=True, select=True, track_visibility='onchange', help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'min_date': fields.function(get_min_max_date, fnct_inv=_set_minimum_date, multi="min_max_date",
store=True, type='datetime', string='Scheduled Time', select=1, help="Scheduled time for the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation date, usually the time of the order.", select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'max_date': fields.function(get_min_max_date, fnct_inv=_set_maximum_date, multi="min_max_date",
store=True, type='datetime', string='Max. Expected Date', select=2),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'auto_picking': fields.boolean('Auto-Picking', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Control",
select=True, required=True, readonly=True, track_visibility='onchange', states={'draft': [('readonly', False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
}
_defaults = {
'name': lambda self, cr, uid, context: '/',
'state': 'draft',
'move_type': 'direct',
'type': 'internal',
'invoice_state': 'none',
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c)
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
def action_process(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Open the partial picking wizard"""
context.update({
'active_model': self._name,
'active_ids': ids,
'active_id': len(ids) and ids[0] or False
})
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.partial.picking',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context,
'nodestroy': True,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
picking_obj = self.browse(cr, uid, id, context=context)
move_obj = self.pool.get('stock.move')
if ('name' not in default) or (picking_obj.name == '/'):
seq_obj_name = 'stock.picking.' + picking_obj.type
default['name'] = self.pool.get('ir.sequence').get(cr, uid, seq_obj_name)
default['origin'] = ''
default['backorder_id'] = False
if 'invoice_state' not in default and picking_obj.invoice_state == 'invoiced':
default['invoice_state'] = '2binvoiced'
res = super(stock_picking, self).copy(cr, uid, id, default, context)
if res:
picking_obj = self.browse(cr, uid, res, context=context)
for move in picking_obj.move_lines:
move_obj.write(cr, uid, [move.id], {'tracking_id': False, 'prodlot_id': False, 'move_history_ids2': [(6, 0, [])], 'move_history_ids': [(6, 0, [])]})
return res
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
if view_type == 'form' and not view_id:
mod_obj = self.pool.get('ir.model.data')
if self._name == "stock.picking.in":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_in_form')
if self._name == "stock.picking.out":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')
return super(stock_picking, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
return {}
def action_explode(self, cr, uid, moves, context=None):
"""Hook to allow other modules to split the moves of a picking."""
return moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms picking.
@return: True
"""
pickings = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
todo = []
for picking in pickings:
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
todo = self.action_explode(cr, uid, todo, context)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
return True
def test_auto_picking(self, cr, uid, ids):
# TODO: Check locations to see if in the same location ?
return True
def action_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if all moves are confirmed.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if pick.state == 'draft':
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_confirm', cr)
move_ids = [x.id for x in pick.move_lines if x.state == 'confirmed']
if not move_ids:
raise osv.except_osv(_('Warning!'),_('Not enough stock, unable to reserve the products.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids)
return True
def force_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed','waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def draft_force_assign(self, cr, uid, ids, *args):
""" Confirms picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if not pick.move_lines:
raise osv.except_osv(_('Error!'),_('You cannot process picking without stock moves.'))
wf_service.trg_validate(uid, 'stock.picking', pick.id,
'button_confirm', cr)
return True
def draft_validate(self, cr, uid, ids, context=None):
""" Validates picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
self.draft_force_assign(cr, uid, ids)
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return self.action_process(
cr, uid, ids, context=context)
def cancel_assign(self, cr, uid, ids, *args):
""" Cancels picking and moves.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').cancel_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def action_assign_wkf(self, cr, uid, ids, context=None):
""" Changes picking state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
return True
def test_finished(self, cr, uid, ids):
""" Tests whether the move is in done or cancel state or not.
@return: True or False
"""
move_ids = self.pool.get('stock.move').search(cr, uid, [('picking_id', 'in', ids)])
for move in self.pool.get('stock.move').browse(cr, uid, move_ids):
if move.state not in ('done', 'cancel'):
if move.product_qty != 0.0:
return False
else:
move.write({'state': 'done'})
return True
def test_assigned(self, cr, uid, ids):
""" Tests whether the move is in assigned state or not.
@return: True or False
"""
#TOFIX: assignment of move lines should be call before testing assigment otherwise picking never gone in assign state
ok = True
for pick in self.browse(cr, uid, ids):
mt = pick.move_type
# incomming shipments are always set as available if they aren't chained
if pick.type == 'in':
if all([x.state != 'waiting' for x in pick.move_lines]):
return True
for move in pick.move_lines:
if (move.state in ('confirmed', 'draft')) and (mt == 'one'):
return False
if (mt == 'direct') and (move.state == 'assigned') and (move.product_qty):
return True
ok = ok and (move.state in ('cancel', 'done', 'assigned'))
return ok
def action_cancel(self, cr, uid, ids, context=None):
""" Changes picking state to cancel.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
self.write(cr, uid, ids, {'state': 'cancel', 'invoice_state': 'none'})
return True
#
# TODO: change and create a move if not parents
#
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done.
This method is called at the end of the workflow by the activity "done".
@return: True
"""
self.write(cr, uid, ids, {'state': 'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')})
return True
def action_move(self, cr, uid, ids, context=None):
"""Process the Stock Moves of the Picking
This method is called by the workflow by the activity "move".
Normally that happens when the signal button_done is received (button
"Done" pressed on a Picking view).
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
self.pool.get('stock.move').action_confirm(cr, uid, [move.id],
context=context)
todo.append(move.id)
elif move.state in ('assigned','confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo,
context=context)
return True
def get_currency_id(self, cr, uid, picking):
return False
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Gets the partner that will be invoiced
Note that this function is inherited in the sale and purchase modules
@param picking: object of the picking for which we are selecting the partner to invoice
@return: object of the partner to invoice
"""
return picking.partner_id and picking.partner_id.id
def _get_comment_invoice(self, cr, uid, picking):
"""
@return: comment string for invoice
"""
return picking.note or ''
def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
""" Gets price unit for invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: The price unit for the move line
"""
if context is None:
context = {}
if type in ('in_invoice', 'in_refund'):
# Take the user company and pricetype
context['currency_id'] = move_line.company_id.currency_id.id
amount_unit = move_line.product_id.price_get('standard_price', context=context)[move_line.product_id.id]
return amount_unit
else:
return move_line.product_id.list_price
def _get_discount_invoice(self, cr, uid, move_line):
'''Return the discount for the move line'''
return 0.0
def _get_taxes_invoice(self, cr, uid, move_line, type):
""" Gets taxes on invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: Taxes Ids for the move line
"""
if type in ('in_invoice', 'in_refund'):
taxes = move_line.product_id.supplier_taxes_id
else:
taxes = move_line.product_id.taxes_id
if move_line.picking_id and move_line.picking_id.partner_id and move_line.picking_id.partner_id.id:
return self.pool.get('account.fiscal.position').map_tax(
cr,
uid,
move_line.picking_id.partner_id.property_account_position,
taxes
)
else:
return map(lambda x: x.id, taxes)
def _get_account_analytic_invoice(self, cr, uid, picking, move_line):
return False
def _invoice_line_hook(self, cr, uid, move_line, invoice_line_id):
'''Call after the creation of the invoice line'''
return
def _invoice_hook(self, cr, uid, picking, invoice_id):
'''Call after the creation of the invoice'''
return
def _get_invoice_type(self, pick):
src_usage = dest_usage = None
inv_type = None
if pick.invoice_state == '2binvoiced':
if pick.move_lines:
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
if pick.type == 'out' and dest_usage == 'supplier':
inv_type = 'in_refund'
elif pick.type == 'out' and dest_usage == 'customer':
inv_type = 'out_invoice'
elif pick.type == 'in' and src_usage == 'supplier':
inv_type = 'in_invoice'
elif pick.type == 'in' and src_usage == 'customer':
inv_type = 'out_refund'
else:
inv_type = 'out_invoice'
return inv_type
def _prepare_invoice_group(self, cr, uid, picking, partner, invoice, context=None):
""" Builds the dict for grouped invoices
@param picking: picking object
@param partner: object of the partner to invoice (not used here, but may be usefull if this function is inherited)
@param invoice: object of the invoice that we are updating
@return: dict that will be used to update the invoice
"""
comment = self._get_comment_invoice(cr, uid, picking)
return {
'name': (invoice.name or '') + ', ' + (picking.name or ''),
'origin': (invoice.origin or '') + ', ' + (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
'date_invoice': context.get('date_inv', False),
'user_id': uid,
}
def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):
""" Builds the dict containing the values for the invoice
@param picking: picking object
@param partner: object of the partner to invoice
@param inv_type: type of the invoice ('out_invoice', 'in_invoice', ...)
@param journal_id: ID of the accounting journal
@return: dict that will be used to create the invoice object
"""
if isinstance(partner, int):
partner = self.pool.get('res.partner').browse(cr, uid, partner, context=context)
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
comment = self._get_comment_invoice(cr, uid, picking)
invoice_vals = {
'name': picking.name,
'origin': (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'type': inv_type,
'account_id': account_id,
'partner_id': partner.id,
'comment': comment,
'payment_term': payment_term,
'fiscal_position': partner.property_account_position.id,
'date_invoice': context.get('date_inv', False),
'company_id': picking.company_id.id,
'user_id': uid,
}
cur_id = self.get_currency_id(cr, uid, picking)
if cur_id:
invoice_vals['currency_id'] = cur_id
if journal_id:
invoice_vals['journal_id'] = journal_id
return invoice_vals
def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,
invoice_vals, context=None):
""" Builds the dict containing the values for the invoice line
@param group: True or False
@param picking: picking object
@param: move_line: move_line object
@param: invoice_id: ID of the related invoice
@param: invoice_vals: dict used to created the invoice
@return: dict that will be used to create the invoice line
"""
if group:
name = (picking.name or '') + '-' + move_line.name
else:
name = move_line.name
origin = move_line.picking_id.name or ''
if move_line.picking_id.origin:
origin += ':' + move_line.picking_id.origin
if invoice_vals['type'] in ('out_invoice', 'out_refund'):
account_id = move_line.product_id.property_account_income.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_income_categ.id
else:
account_id = move_line.product_id.property_account_expense.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_expense_categ.id
if invoice_vals['fiscal_position']:
fp_obj = self.pool.get('account.fiscal.position')
fiscal_position = fp_obj.browse(cr, uid, invoice_vals['fiscal_position'], context=context)
account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
# set UoS if it's a sale and the picking doesn't have one
uos_id = move_line.product_uos and move_line.product_uos.id or False
if not uos_id and invoice_vals['type'] in ('out_invoice', 'out_refund'):
uos_id = move_line.product_uom.id
return {
'name': name,
'origin': origin,
'invoice_id': invoice_id,
'uos_id': uos_id,
'product_id': move_line.product_id.id,
'account_id': account_id,
'price_unit': self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type']),
'discount': self._get_discount_invoice(cr, uid, move_line),
'quantity': move_line.product_uos_qty or move_line.product_qty,
'invoice_line_tax_id': [(6, 0, self._get_taxes_invoice(cr, uid, move_line, invoice_vals['type']))],
'account_analytic_id': self._get_account_analytic_invoice(cr, uid, picking, move_line),
}
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
if context is None:
context = {}
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
partner_obj = self.pool.get('res.partner')
invoices_group = {}
res = {}
inv_type = type
for picking in self.browse(cr, uid, ids, context=context):
if picking.invoice_state != '2binvoiced':
continue
partner = self._get_partner_to_invoice(cr, uid, picking, context=context)
if isinstance(partner, int):
partner = partner_obj.browse(cr, uid, [partner], context=context)[0]
if not partner:
raise osv.except_osv(_('Error, no partner!'),
_('Please put a partner on the picking list if you want to generate invoice.'))
if not inv_type:
inv_type = self._get_invoice_type(picking)
if group and partner.id in invoices_group:
invoice_id = invoices_group[partner.id]
invoice = invoice_obj.browse(cr, uid, invoice_id)
invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)
invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)
else:
invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)
invoices_group[partner.id] = invoice_id
res[picking.id] = invoice_id
for move_line in picking.move_lines:
if move_line.state == 'cancel':
continue
if move_line.scrapped:
# do no invoice scrapped products
continue
vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,
invoice_id, invoice_vals, context=context)
if vals:
invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)
self._invoice_line_hook(cr, uid, move_line, invoice_line_id)
invoice_obj.button_compute(cr, uid, [invoice_id], context=context,
set_total=(inv_type in ('in_invoice', 'in_refund')))
self.write(cr, uid, [picking.id], {
'invoice_state': 'invoiced',
}, context=context)
self._invoice_hook(cr, uid, picking, invoice_id)
self.write(cr, uid, res.keys(), {
'invoice_state': 'invoiced',
}, context=context)
return res
def test_done(self, cr, uid, ids, context=None):
""" Test whether the move lines are done or not.
@return: True or False
"""
ok = False
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state not in ('cancel','done'):
return False
if move.state=='done':
ok = True
return ok
def test_cancel(self, cr, uid, ids, context=None):
""" Test whether the move lines are canceled or not.
@return: True or False
"""
for pick in self.browse(cr, uid, ids, context=context):
for move in pick.move_lines:
if move.state not in ('cancel',):
return False
return True
def allow_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state == 'done':
raise osv.except_osv(_('Error!'), _('You cannot cancel the picking as some moves have been done. You should cancel the picking lines.'))
return True
def unlink(self, cr, uid, ids, context=None):
move_obj = self.pool.get('stock.move')
if context is None:
context = {}
for pick in self.browse(cr, uid, ids, context=context):
if pick.state in ['done','cancel']:
raise osv.except_osv(_('Error!'), _('You cannot remove the picking which is in %s state!')%(pick.state,))
else:
ids2 = [move.id for move in pick.move_lines]
ctx = context.copy()
ctx.update({'call_unlink':True})
if pick.state != 'draft':
#Cancelling the move in order to affect Virtual stock of product
move_obj.action_cancel(cr, uid, ids2, ctx)
#Removing the move
move_obj.unlink(cr, uid, ids2, ctx)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
# FIXME: needs refactoring, this code is partially duplicated in stock_move.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, partner_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
sequence_obj = self.pool.get('ir.sequence')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picking = None
complete, too_many, too_few = [], [], []
move_product_qty, prodlot_ids, product_avail, partial_qty, product_uoms = {}, {}, {}, {}, {}
for move in pick.move_lines:
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), {})
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_id = partial_data.get('prodlot_id')
prodlot_ids[move.id] = prodlot_id
product_uoms[move.id] = product_uom
partial_qty[move.id] = uom_obj._compute_qty(cr, uid, product_uoms[move.id], product_qty, move.product_uom.id)
if move.product_qty == partial_qty[move.id]:
complete.append(move)
elif move.product_qty > partial_qty[move.id]:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if product.id in product_avail:
product_avail[product.id] += qty
else:
product_avail[product.id] = product.qty_available
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product_avail[product.id])\
+ (new_price * qty))/(product_avail[product.id] + qty)
# Write the field according to price type field
product_obj.write(cr, uid, [product.id], {'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
move_obj.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency})
for move in too_few:
product_qty = move_product_qty[move.id]
# ecosoft
product = product_obj.browse(cr, uid, move.product_id.id)
# -- ecosoft
if not new_picking:
new_picking_name = pick.name
self.write(cr, uid, [pick.id],
{'name': sequence_obj.get(cr, uid,
'stock.picking.%s'%(pick.type)),
})
new_picking = self.copy(cr, uid, pick.id,
{
'name': new_picking_name,
'move_lines' : [],
'state':'draft',
})
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
# ecosoft
#'product_uos_qty': product_qty, #TODO: put correct uos_qty
'product_uos_qty': product_qty * (product.uos_id and product.uos_coeff or 1), #TODO: put correct uos_qty
# -- ecosoft
'picking_id' : new_picking,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
move_obj.copy(cr, uid, move.id, defaults)
move_obj.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - partial_qty[move.id],
'product_uos_qty': move.product_qty - partial_qty[move.id], #TODO: put correct uos_qty
'prodlot_id': False,
'tracking_id': False,
})
if new_picking:
move_obj.write(cr, uid, [c.id for c in complete], {'picking_id': new_picking})
for move in complete:
defaults = {'product_uom': product_uoms[move.id], 'product_qty': move_product_qty[move.id]}
if prodlot_ids.get(move.id):
defaults.update({'prodlot_id': prodlot_ids[move.id]})
move_obj.write(cr, uid, [move.id], defaults)
for move in too_many:
product_qty = move_product_qty[move.id]
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty, #TODO: put correct uos_qty
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids.get(move.id)
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
if new_picking:
defaults.update(picking_id=new_picking)
move_obj.write(cr, uid, [move.id], defaults)
# At first we confirm the new picking (if necessary)
if new_picking:
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
# Then we finish the good picking
self.write(cr, uid, [pick.id], {'backorder_id': new_picking})
self.action_move(cr, uid, [new_picking], context=context)
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
delivered_pack_id = new_picking
back_order_name = self.browse(cr, uid, delivered_pack_id, context=context).name
self.message_post(cr, uid, ids, body=_("Back order <em>%s</em> has been <b>created</b>.") % (back_order_name), context=context)
else:
self.action_move(cr, uid, [pick.id], context=context)
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_done', cr)
delivered_pack_id = pick.id
delivered_pack = self.browse(cr, uid, delivered_pack_id, context=context)
res[pick.id] = {'delivered_picking': delivered_pack.id or False}
return res
# views associated to each picking type
_VIEW_LIST = {
'out': 'view_picking_out_form',
'in': 'view_picking_in_form',
'internal': 'view_picking_form',
}
def _get_view_id(self, cr, uid, type):
"""Get the view id suiting the given type
@param type: the picking type as a string
@return: view i, or False if no view found
"""
res = self.pool.get('ir.model.data').get_object_reference(cr, uid,
'stock', self._VIEW_LIST.get(type, 'view_picking_form'))
return res and res[1] or False
class stock_production_lot(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name', 'prefix', 'ref'], context)
res = []
for record in reads:
name = record['name']
prefix = record['prefix']
if prefix:
name = prefix + '/' + name
if record['ref']:
name = '%s [%s]' % (name, record['ref'])
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
ids = []
if name:
ids = self.search(cr, uid, [('prefix', '=', name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
_name = 'stock.production.lot'
_description = 'Serial Number'
def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
""" Gets stock of products for locations
@return: Dictionary of values
"""
if context is None:
context = {}
if 'location_id' not in context:
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)
else:
locations = context['location_id'] and [context['location_id']] or []
if isinstance(ids, (int, long)):
ids = [ids]
res = {}.fromkeys(ids, 0.0)
if locations:
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))
res.update(dict(cr.fetchall()))
return res
def _stock_search(self, cr, uid, obj, name, args, context=None):
""" Searches Ids of products
@return: Ids of locations
"""
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')])
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s group by prodlot_id
having sum(qty) '''+ str(args[0][1]) + str(args[0][2]),(tuple(locations),))
res = cr.fetchall()
ids = [('id', 'in', map(lambda x: x[0], res))]
return ids
_columns = {
'name': fields.char('Serial Number', size=64, required=True, help="Unique Serial Number, will be displayed as: PREFIX/SERIAL [INT_REF]"),
'ref': fields.char('Internal Reference', size=256, help="Internal reference number in case it differs from the manufacturer's serial number"),
'prefix': fields.char('Prefix', size=64, help="Optional prefix to prepend when displaying this serial number: PREFIX/SERIAL [INT_REF]"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'date': fields.datetime('Creation Date', required=True),
'stock_available': fields.function(_get_stock, fnct_search=_stock_search, type="float", string="Available", select=True,
help="Current quantity of products with this Serial Number available in company warehouses",
digits_compute=dp.get_precision('Product Unit of Measure')),
'revisions': fields.one2many('stock.production.lot.revision', 'lot_id', 'Revisions'),
'company_id': fields.many2one('res.company', 'Company', select=True),
'move_ids': fields.one2many('stock.move', 'prodlot_id', 'Moves for this serial number', readonly=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref)', 'The combination of Serial Number and internal reference must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
value=self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
return value
def copy(self, cr, uid, id, default=None, context=None):
context = context or {}
default = default and default.copy() or {}
default.update(date=time.strftime('%Y-%m-%d %H:%M:%S'), move_ids=[])
return super(stock_production_lot, self).copy(cr, uid, id, default=default, context=context)
stock_production_lot()
class stock_production_lot_revision(osv.osv):
_name = 'stock.production.lot.revision'
_description = 'Serial Number Revision'
_columns = {
'name': fields.char('Revision Name', size=64, required=True),
'description': fields.text('Description'),
'date': fields.date('Revision Date'),
'indice': fields.char('Revision Number', size=16),
'author_id': fields.many2one('res.users', 'Author'),
'lot_id': fields.many2one('stock.production.lot', 'Serial Number', select=True, ondelete='cascade'),
'company_id': fields.related('lot_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
}
_defaults = {
'author_id': lambda x, y, z, c: z,
'date': fields.date.context_today,
}
stock_production_lot_revision()
# ----------------------------------------------------
# Move
# ----------------------------------------------------
#
# Fields:
# location_dest_id is only used for predicting futur stocks
#
class stock_move(osv.osv):
def _getSSCC(self, cr, uid, context=None):
cr.execute('select id from stock_tracking where create_uid=%s order by id desc limit 1', (uid,))
res = cr.fetchone()
return (res and res[0]) or False
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def action_partial_move(self, cr, uid, ids, context=None):
if context is None: context = {}
if context.get('active_model') != self._name:
context.update(active_ids=ids, active_model=self._name)
partial_id = self.pool.get("stock.partial.move").create(
cr, uid, {}, context=context)
return {
'name':_("Products to Process"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'stock.partial.move',
'res_id': partial_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context
}
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name+' > '+line.location_dest_id.name
# optional prefixes
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _check_tracking(self, cr, uid, ids, context=None):
""" Checks if serial number is assigned to stock move or not.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if not move.prodlot_id and \
(move.state == 'done' and \
( \
(move.product_id.track_production and move.location_id.usage == 'production') or \
(move.product_id.track_production and move.location_dest_id.usage == 'production') or \
(move.product_id.track_incoming and move.location_id.usage == 'supplier') or \
(move.product_id.track_outgoing and move.location_dest_id.usage == 'customer') or \
(move.product_id.track_incoming and move.location_id.usage == 'inventory') \
)):
return False
return True
def _check_product_lot(self, cr, uid, ids, context=None):
""" Checks whether move is done or not and production lot is assigned to that move.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if move.prodlot_id and move.state == 'done' and (move.prodlot_id.product_id.id != move.product_id.id):
return False
return True
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Urgent')], 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Scheduled Date', states={'done': [('readonly', True)]},required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type','<>','service')],states={'done': [('readonly', True)]}),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True,states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True,states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True,states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True,states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'prodlot_id': fields.many2one('stock.production.lot', 'Serial Number', states={'done': [('readonly', True)]}, help="Serial number is used to put a serial number on the production", select=True),
'tracking_id': fields.many2one('stock.tracking', 'Pack', select=True, states={'done': [('readonly', True)]}, help="Logistical shipping unit: pallet, box, pack ..."),
'auto_validate': fields.boolean('Auto Validate'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True),
'move_history_ids': fields.many2many('stock.move', 'stock_move_history_ids', 'parent_id', 'child_id', 'Move History (child moves)'),
'move_history_ids2': fields.many2many('stock.move', 'stock_move_history_ids', 'child_id', 'parent_id', 'Move History (parent moves)'),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True,states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'price_unit': fields.float('Unit Price', digits_compute= dp.get_precision('Product Price'), help="Technical field used to record the product cost set by the user during a picking confirmation (when average price costing method is used)"),
'price_currency_id': fields.many2one('res.currency', 'Currency for average price', help="Technical field used to record the currency chosen by the user during a picking confirmation (when average price costing method is used)"),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'backorder_id': fields.related('picking_id','backorder_id',type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.related('picking_id','origin',type='char', size=64, relation="stock.picking", string="Source", store=True),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id','scrap_location',type='boolean',relation='stock.location',string='Scrapped', readonly=True),
'type': fields.related('picking_id', 'type', type='selection', selection=[('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], string='Shipping Type'),
}
def _check_location(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if (record.state=='done') and (record.location_id.usage == 'view'):
raise osv.except_osv(_('Error'), _('You cannot move product %s from a location of type view %s.')% (record.product_id.name, record.location_id.name))
if (record.state=='done') and (record.location_dest_id.usage == 'view' ):
raise osv.except_osv(_('Error'), _('You cannot move product %s to a location of type view %s.')% (record.product_id.name, record.location_dest_id.name))
return True
_constraints = [
(_check_tracking,
'You must assign a serial number for this product.',
['prodlot_id']),
(_check_location, 'You cannot move products from or to a location of the type view.',
['location_id','location_dest_id']),
(_check_product_lot,
'You try to assign a lot which is not from the same product.',
['prodlot_id'])]
def _default_location_destination(self, cr, uid, context=None):
""" Gets default address of partner for destination location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
if context['move_line'][0]:
if isinstance(context['move_line'][0], (tuple, list)):
location_id = context['move_line'][0][2] and context['move_line'][0][2].get('location_dest_id',False)
else:
move_list = self.pool.get('stock.move').read(cr, uid, context['move_line'][0], ['location_dest_id'])
location_id = move_list and move_list['location_dest_id'][0] or False
elif context.get('address_out_id', False):
property_out = self.pool.get('res.partner').browse(cr, uid, context['address_out_id'], context).property_stock_customer
location_id = property_out and property_out.id or False
else:
location_xml_id = False
if picking_type in ('in', 'internal'):
location_xml_id = 'stock_location_stock'
elif picking_type == 'out':
location_xml_id = 'stock_location_customers'
if location_xml_id:
try:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _default_location_source(self, cr, uid, context=None):
""" Gets default address of partner for source location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
try:
location_id = context['move_line'][0][2]['location_id']
except:
pass
elif context.get('address_in_id', False):
part_obj_add = self.pool.get('res.partner').browse(cr, uid, context['address_in_id'], context=context)
if part_obj_add:
location_id = part_obj_add.property_stock_supplier.id
else:
location_xml_id = False
if picking_type == 'in':
location_xml_id = 'stock_location_suppliers'
elif picking_type in ('out', 'internal'):
location_xml_id = 'stock_location_stock'
if location_xml_id:
try:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _default_destination_address(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.partner_id.id
def _default_move_type(self, cr, uid, context=None):
""" Gets default type of move
@return: type
"""
if context is None:
context = {}
picking_type = context.get('picking_type')
type = 'internal'
if picking_type == 'in':
type = 'in'
elif picking_type == 'out':
type = 'out'
return type
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'type': _default_move_type,
'state': 'draft',
'priority': '1',
'product_qty': 1.0,
'scrapped' : False,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if uid != 1:
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_history_ids2': [], 'move_history_ids': []})
return super(stock_move, self).copy(cr, uid, id, default, context=context)
def _auto_init(self, cursor, context=None):
res = super(stock_move, self)._auto_init(cursor, context=context)
cursor.execute('SELECT indexname \
FROM pg_indexes \
WHERE indexname = \'stock_move_location_id_location_dest_id_product_id_state\'')
if not cursor.fetchone():
cursor.execute('CREATE INDEX stock_move_location_id_location_dest_id_product_id_state \
ON stock_move (product_id, state, location_id, location_dest_id)')
return res
def onchange_lot_id(self, cr, uid, ids, prodlot_id=False, product_qty=False,
loc_id=False, product_id=False, uom_id=False, context=None):
""" On change of production lot gives a warning message.
@param prodlot_id: Changed production lot id
@param product_qty: Quantity of product
@param loc_id: Location id
@param product_id: Product id
@return: Warning message
"""
if not prodlot_id or not loc_id:
return {}
ctx = context and context.copy() or {}
ctx['location_id'] = loc_id
ctx.update({'raise-exception': True})
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product_uom = product_obj.browse(cr, uid, product_id, context=ctx).uom_id
prodlot = self.pool.get('stock.production.lot').browse(cr, uid, prodlot_id, context=ctx)
location = self.pool.get('stock.location').browse(cr, uid, loc_id, context=ctx)
uom = uom_obj.browse(cr, uid, uom_id, context=ctx)
amount_actual = uom_obj._compute_qty_obj(cr, uid, product_uom, prodlot.stock_available, uom, context=ctx)
warning = {}
if (location.usage == 'internal') and (product_qty > (amount_actual or 0.0)):
warning = {
'title': _('Insufficient Stock for Serial Number !'),
'message': _('You are moving %.2f %s but only %.2f %s available for this serial number.') % (product_qty, uom.name, amount_actual, uom.name)
}
return {'warning': warning}
def onchange_quantity(self, cr, uid, ids, product_id, product_qty,
product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <=0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: OpenERP will not "
"automatically generate a back order.") })
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_qty': 0.00
}
warning = {}
if (not product_id) or (product_uos_qty <=0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
for move in self.read(cr, uid, ids, ['product_uos_qty']):
if product_uos_qty < move['product_uos_qty']:
warning.update({
'title': _('Warning: No Back Order'),
'message': _("By changing the quantity here, you accept the "
"new quantity as complete: OpenERP will not "
"automatically generate a Back Order.") })
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_qty'] = product_uos_qty
return {'value': result, 'warning': warning}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
lang = False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_qty': 1.00,
'product_uos_qty' : self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
'prodlot_id' : False,
}
if not ids:
result['name'] = product.partner_ref
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def onchange_move_type(self, cr, uid, ids, type, context=None):
""" On change of move type gives sorce and destination location.
@param type: Move Type
@return: Dictionary of values
"""
mod_obj = self.pool.get('ir.model.data')
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_stock'
if type == 'in':
location_source_id = 'stock_location_suppliers'
location_dest_id = 'stock_location_stock'
elif type == 'out':
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_customers'
try:
source_location = mod_obj.get_object_reference(cr, uid, 'stock', location_source_id)
self.pool.get('stock.location').check_access_rule(cr, uid, [source_location[1]], 'read', context=context)
except (orm.except_orm, ValueError):
source_location = False
try:
dest_location = mod_obj.get_object_reference(cr, uid, 'stock', location_dest_id)
self.pool.get('stock.location').check_access_rule(cr, uid, [dest_location[1]], 'read', context=context)
except (orm.except_orm, ValueError):
dest_location = False
return {'value':{'location_id': source_location and source_location[1] or False, 'location_dest_id': dest_location and dest_location[1] or False}}
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime('%Y-%m-%d %H:%M:%S')
return {'value':{'date': date_expected}}
def _chain_compute(self, cr, uid, moves, context=None):
""" Finds whether the location has chained location type or not.
@param moves: Stock moves
@return: Dictionary containing destination location with chained location type.
"""
result = {}
for m in moves:
dest = self.pool.get('stock.location').chained_location_get(
cr,
uid,
m.location_dest_id,
m.picking_id and m.picking_id.partner_id and m.picking_id.partner_id,
m.product_id,
context
)
if dest:
if dest[1] == 'transparent':
newdate = (datetime.strptime(m.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=dest[2] or 0)).strftime('%Y-%m-%d')
self.write(cr, uid, [m.id], {
'date': newdate,
'location_dest_id': dest[0].id})
if m.picking_id and (dest[3] or dest[5]):
self.pool.get('stock.picking').write(cr, uid, [m.picking_id.id], {
'stock_journal_id': dest[3] or m.picking_id.stock_journal_id.id,
'type': dest[5] or m.picking_id.type
}, context=context)
m.location_dest_id = dest[0]
res2 = self._chain_compute(cr, uid, [m], context=context)
for pick_id in res2.keys():
result.setdefault(pick_id, [])
result[pick_id] += res2[pick_id]
else:
result.setdefault(m.picking_id, [])
result[m.picking_id].append( (m, dest) )
return result
def _prepare_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
"""Prepare the definition (values) to create a new chained picking.
:param str picking_name: desired new picking name
:param browse_record picking: source picking (being chained to)
:param str picking_type: desired new picking type
:param list moves_todo: specification of the stock moves to be later included in this
picking, in the form::
[[move, (dest_location, auto_packing, chained_delay, chained_journal,
chained_company_id, chained_picking_type)],
...
]
See also :meth:`stock_location.chained_location_get`.
"""
res_company = self.pool.get('res.company')
return {
'name': picking_name,
'origin': tools.ustr(picking.origin or ''),
'type': picking_type,
'note': picking.note,
'move_type': picking.move_type,
'auto_picking': moves_todo[0][1][1] == 'auto',
'stock_journal_id': moves_todo[0][1][3],
'company_id': moves_todo[0][1][4] or res_company._company_default_get(cr, uid, 'stock.company', context=context),
'partner_id': picking.partner_id.id,
'invoice_state': 'none',
'date': picking.date,
}
def _create_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
picking_obj = self.pool.get('stock.picking')
return picking_obj.create(cr, uid, self._prepare_chained_picking(cr, uid, picking_name, picking, picking_type, moves_todo, context=context))
def create_chained_picking(self, cr, uid, moves, context=None):
res_obj = self.pool.get('res.company')
location_obj = self.pool.get('stock.location')
move_obj = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
new_moves = []
if context is None:
context = {}
seq_obj = self.pool.get('ir.sequence')
for picking, todo in self._chain_compute(cr, uid, moves, context=context).items():
ptype = todo[0][1][5] and todo[0][1][5] or location_obj.picking_type_get(cr, uid, todo[0][0].location_dest_id, todo[0][1][0])
if picking:
# name of new picking according to its type
new_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + ptype)
pickid = self._create_chained_picking(cr, uid, new_pick_name, picking, ptype, todo, context=context)
# Need to check name of old picking because it always considers picking as "OUT" when created from Sales Order
old_ptype = location_obj.picking_type_get(cr, uid, picking.move_lines[0].location_id, picking.move_lines[0].location_dest_id)
if old_ptype != picking.type:
old_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + old_ptype)
self.pool.get('stock.picking').write(cr, uid, [picking.id], {'name': old_pick_name, 'type': old_ptype}, context=context)
else:
pickid = False
for move, (loc, dummy, delay, dummy, company_id, ptype, invoice_state) in todo:
new_id = move_obj.copy(cr, uid, move.id, {
'location_id': move.location_dest_id.id,
'location_dest_id': loc.id,
'date': time.strftime('%Y-%m-%d'),
'picking_id': pickid,
'state': 'waiting',
'company_id': company_id or res_obj._company_default_get(cr, uid, 'stock.company', context=context) ,
'move_history_ids': [],
'date_expected': (datetime.strptime(move.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=delay or 0)).strftime('%Y-%m-%d'),
'move_history_ids2': []}
)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': new_id,
'move_history_ids': [(4, new_id)]
})
new_moves.append(self.browse(cr, uid, [new_id])[0])
if pickid:
wf_service.trg_validate(uid, 'stock.picking', pickid, 'button_confirm', cr)
if new_moves:
new_moves += self.create_chained_picking(cr, uid, new_moves, context)
return new_moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move.
@return: List of ids.
"""
moves = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
self.create_chained_picking(cr, uid, moves, context)
return []
def action_assign(self, cr, uid, ids, *args):
""" Changes state to confirmed or waiting.
@return: List of values
"""
todo = []
for move in self.browse(cr, uid, ids):
if move.state in ('confirmed', 'waiting'):
todo.append(move.id)
res = self.check_assign(cr, uid, todo)
return res
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
def cancel_assign(self, cr, uid, ids, context=None):
""" Changes the state to confirmed.
@return: True
"""
self.write(cr, uid, ids, {'state': 'confirmed'})
# fix for bug lp:707031
# called write of related picking because changing move availability does
# not trigger workflow of picking in order to change the state of picking
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
#
# Duplicate stock.move
#
def check_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
@return: No. of moves done
"""
done = []
count = 0
pickings = {}
if context is None:
context = {}
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.type == 'consu' or move.location_id.usage == 'supplier':
if move.state in ('confirmed', 'waiting'):
done.append(move.id)
pickings[move.picking_id.id] = 1
continue
if move.state in ('confirmed', 'waiting'):
# Important: we must pass lock=True to _product_reserve() to avoid race conditions and double reservations
res = self.pool.get('stock.location')._product_reserve(cr, uid, [move.location_id.id], move.product_id.id, move.product_qty, {'uom': move.product_uom.id}, lock=True)
if res:
#_product_available_test depends on the next status for correct functioning
#the test does not work correctly if the same product occurs multiple times
#in the same order. This is e.g. the case when using the button 'split in two' of
#the stock outgoing form
self.write(cr, uid, [move.id], {'state':'assigned'})
done.append(move.id)
pickings[move.picking_id.id] = 1
r = res.pop(0)
product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, ids, move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
cr.execute('update stock_move set location_id=%s, product_qty=%s, product_uos_qty=%s where id=%s', (r[1], r[0],product_uos_qty, move.id))
while res:
r = res.pop(0)
move_id = self.copy(cr, uid, move.id, {'product_uos_qty': product_uos_qty, 'product_qty': r[0], 'location_id': r[1]})
done.append(move_id)
if done:
count += len(done)
self.write(cr, uid, done, {'state': 'assigned'})
if count:
for pick_id in pickings:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return count
def setlast_tracking(self, cr, uid, ids, context=None):
tracking_obj = self.pool.get('stock.tracking')
picking = self.browse(cr, uid, ids, context=context)[0].picking_id
if picking:
last_track = [line.tracking_id.id for line in picking.move_lines if line.tracking_id]
if not last_track:
last_track = tracking_obj.create(cr, uid, {}, context=context)
else:
last_track.sort()
last_track = last_track[-1]
self.write(cr, uid, ids, {'tracking_id': last_track})
return True
#
# Cancel move => cancel others move and pickings
#
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
if not len(ids):
return True
if context is None:
context = {}
pickings = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('confirmed', 'waiting', 'assigned', 'draft'):
if move.picking_id:
pickings.add(move.picking_id.id)
if move.move_dest_id and move.move_dest_id.state == 'waiting':
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'})
if context.get('call_unlink',False) and move.move_dest_id.picking_id:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False})
if not context.get('call_unlink',False):
for pick in self.pool.get('stock.picking').browse(cr, uid, list(pickings), context=context):
if all(move.state == 'cancel' for move in pick.move_lines):
self.pool.get('stock.picking').write(cr, uid, [pick.id], {'state': 'cancel'})
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
return True
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the move.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj=self.pool.get('product.product')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
if acc_dest == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Output Account of this product and Valuation account on category of this product are same.'))
if acc_src == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Input Account of this product and Valuation account on category of this product are same.'))
if not acc_src:
raise osv.except_osv(_('Error!'), _('Please define stock input account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not acc_dest:
raise osv.except_osv(_('Error!'), _('Please define stock output account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not journal_id:
raise osv.except_osv(_('Error!'), _('Please define journal on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
if not acc_valuation:
raise osv.except_osv(_('Error!'), _('Please define inventory valuation account on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
return journal_id, acc_src, acc_dest, acc_valuation
def _get_reference_accounting_values_for_valuation(self, cr, uid, move, context=None):
"""
Return the reference amount and reference currency representing the inventory valuation for this move.
These reference values should possibly be converted before being posted in Journals to adapt to the primary
and secondary currencies of the relevant accounts.
"""
product_uom_obj = self.pool.get('product.uom')
# by default the reference currency is that of the move's company
reference_currency_id = move.company_id.currency_id.id
default_uom = move.product_id.uom_id.id
qty = product_uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, default_uom)
# if product is set to average price and a specific value was entered in the picking wizard,
# we use it
if move.product_id.cost_method == 'average' and move.price_unit:
reference_amount = qty * move.price_unit
reference_currency_id = move.price_currency_id.id or reference_currency_id
# Otherwise we default to the company's valuation price type, considering that the values of the
# valuation field are expressed in the default currency of the move's company.
else:
if context is None:
context = {}
currency_ctx = dict(context, currency_id = move.company_id.currency_id.id)
amount_unit = move.product_id.price_get('standard_price', context=currency_ctx)[move.product_id.id]
reference_amount = amount_unit * qty
return reference_amount, reference_currency_id
def _create_product_valuation_moves(self, cr, uid, move, context=None):
"""
Generate the appropriate accounting moves if the product being moves is subject
to real_time valuation tracking, and the source or destination location is
a transit location or is outside of the company.
"""
if move.product_id.valuation == 'real_time': # FIXME: product valuation should perhaps be a property?
if context is None:
context = {}
src_company_ctx = dict(context,force_company=move.location_id.company_id.id)
dest_company_ctx = dict(context,force_company=move.location_dest_id.company_id.id)
account_moves = []
# Outgoing moves (or cross-company output part)
if move.location_id.company_id \
and (move.location_id.usage == 'internal' and move.location_dest_id.usage != 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, src_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#returning goods to supplier
if move.location_dest_id.usage == 'supplier':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_src, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_dest, reference_amount, reference_currency_id, context))]
# Incoming moves (or cross-company input part)
if move.location_dest_id.company_id \
and (move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, dest_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#goods return from customer
if move.location_id.usage == 'customer':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_dest, acc_valuation, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_src, acc_valuation, reference_amount, reference_currency_id, context))]
move_obj = self.pool.get('account.move')
for j_id, move_lines in account_moves:
move_obj.create(cr, uid,
{
'journal_id': j_id,
'line_id': move_lines,
'ref': move.picking_id and move.picking_id.name})
def action_done(self, cr, uid, ids, context=None):
""" Makes the move done and if all moves are done, it will finish the picking.
@return:
"""
picking_ids = []
move_ids = []
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state=="draft":
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state in ['done','cancel']:
continue
move_ids.append(move.id)
if move.picking_id:
picking_ids.append(move.picking_id.id)
if move.move_dest_id.id and (move.state != 'done'):
# Downstream move should only be triggered if this move is the last pending upstream move
other_upstream_move_ids = self.search(cr, uid, [('id','!=',move.id),('state','not in',['done','cancel']),
('move_dest_id','=',move.move_dest_id.id)], context=context)
if not other_upstream_move_ids:
self.write(cr, uid, [move.id], {'move_history_ids': [(4, move.move_dest_id.id)]})
if move.move_dest_id.state in ('waiting', 'confirmed'):
self.force_assign(cr, uid, [move.move_dest_id.id], context=context)
if move.move_dest_id.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
if move.move_dest_id.auto_validate:
self.action_done(cr, uid, [move.move_dest_id.id], context=context)
self._create_product_valuation_moves(cr, uid, move, context=context)
if move.state not in ('confirmed','done','assigned'):
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
self.write(cr, uid, move_ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
for id in move_ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
for pick_id in picking_ids:
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return True
def _create_account_move_line(self, cr, uid, move, src_account_id, dest_account_id, reference_amount, reference_currency_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given stock move.
"""
# prepare default values considering that the destination accounts have the reference_currency_id as their main currency
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'debit': reference_amount,
'account_id': dest_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'credit': reference_amount,
'account_id': src_account_id,
}
# if we are posting to accounts in a different currency, provide correct values in both currencies correctly
# when compatible with the optional secondary currency on the account.
# Financial Accounts only accept amounts in secondary currencies if there's no secondary currency on the account
# or if it's the same as that of the secondary amount being posted.
account_obj = self.pool.get('account.account')
src_acct, dest_acct = account_obj.browse(cr, uid, [src_account_id, dest_account_id], context=context)
src_main_currency_id = src_acct.company_id.currency_id.id
dest_main_currency_id = dest_acct.company_id.currency_id.id
cur_obj = self.pool.get('res.currency')
if reference_currency_id != src_main_currency_id:
# fix credit line:
credit_line_vals['credit'] = cur_obj.compute(cr, uid, reference_currency_id, src_main_currency_id, reference_amount, context=context)
if (not src_acct.currency_id) or src_acct.currency_id.id == reference_currency_id:
credit_line_vals.update(currency_id=reference_currency_id, amount_currency=-reference_amount)
if reference_currency_id != dest_main_currency_id:
# fix debit line:
debit_line_vals['debit'] = cur_obj.compute(cr, uid, reference_currency_id, dest_main_currency_id, reference_amount, context=context)
if (not dest_acct.currency_id) or dest_acct.currency_id.id == reference_currency_id:
debit_line_vals.update(currency_id=reference_currency_id, amount_currency=reference_amount)
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
for move in self.browse(cr, uid, ids, context=context):
if move.state != 'draft' and not ctx.get('call_unlink', False):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(
cr, uid, ids, context=ctx)
# _create_lot function is not used anywhere
def _create_lot(self, cr, uid, ids, product_id, prefix=False):
""" Creates production lot
@return: Production lot id
"""
prodlot_obj = self.pool.get('stock.production.lot')
prodlot_id = prodlot_obj.create(cr, uid, {'prefix': prefix, 'product_id': product_id})
return prodlot_id
def action_scrap(self, cr, uid, ids, quantity, location_id, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
#quantity should in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'tracking_id': move.tracking_id.id,
'prodlot_id': move.prodlot_id.id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
self.action_done(cr, uid, res, context=context)
return res
# action_split function is not used anywhere
# FIXME: deprecate this method
def action_split(self, cr, uid, ids, quantity, split_by_qty=1, prefix=False, with_lot=True, context=None):
""" Split Stock Move lines into production lot which specified split by quantity.
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be splited
@param split_by_qty : specify split by qty
@param prefix : specify prefix of production lot
@param with_lot : if true, prodcution lot will assign for split line otherwise not.
@param context: context arguments
@return: Splited move lines
"""
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
if split_by_qty <= 0 or quantity == 0:
return res
uos_qty = split_by_qty / move.product_qty * move.product_uos_qty
quantity_rest = quantity % split_by_qty
uos_qty_rest = split_by_qty / move.product_qty * move.product_uos_qty
update_val = {
'product_qty': split_by_qty,
'product_uos_qty': uos_qty,
}
for idx in range(int(quantity//split_by_qty)):
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
if quantity_rest > 0:
idx = int(quantity//split_by_qty)
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
return res
def action_consume(self, cr, uid, ids, quantity, location_id=False, context=None):
""" Consumed product with specific quatity from specific source location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be consumed
@param quantity : specify consume quantity
@param location_id : specify source location
@param context: context arguments
@return: Consumed lines
"""
#quantity should in MOVE UOM
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
move_qty = move.product_qty
if move_qty <= 0:
raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
quantity_rest = move.product_qty
quantity_rest -= quantity
uos_qty_rest = quantity_rest / move_qty * move.product_uos_qty
if quantity_rest <= 0:
quantity_rest = 0
uos_qty_rest = 0
quantity = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
if quantity_rest > 0:
default_val = {
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'location_id': location_id or move.location_id.id,
}
current_move = self.copy(cr, uid, move.id, default_val)
res += [current_move]
update_val = {}
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
self.write(cr, uid, [move.id], update_val)
else:
quantity_rest = quantity
uos_qty_rest = uos_qty
res += [move.id]
update_val = {
'product_qty' : quantity_rest,
'product_uos_qty' : uos_qty_rest,
'location_id': location_id or move.location_id.id,
}
self.write(cr, uid, [move.id], update_val)
self.action_done(cr, uid, res, context=context)
return res
# FIXME: needs refactoring, this code is partially duplicated in stock_picking.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial pickings and moves done.
@param partial_datas: Dictionary containing details of partial picking
like partner_id, delivery_date, delivery
moves with product_id, product_qty, uom
"""
res = {}
picking_obj = self.pool.get('stock.picking')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
complete, too_many, too_few = [], [], []
move_product_qty = {}
prodlot_ids = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), False)
assert partial_data, _('Missing partial picking data for move #%s.') % (move.id)
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_ids[move.id] = partial_data.get('prodlot_id')
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product.qty_available)\
+ (new_price * qty))/(product.qty_available + qty)
product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
self.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency,
})
for move in too_few:
product_qty = move_product_qty[move.id]
# ecosoft
product = product_obj.browse(cr, uid, move.product_id.id)
# -- ecosoft
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
# ecosoft
#'product_uos_qty': product_qty, #TODO: put correct uos_qty
'product_uos_qty': product_qty * (product.uos_id and product.uos_coeff or 1), #TODO: put correct uos_qty
# -- ecosoft
'picking_id' : move.picking_id.id,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
new_move = self.copy(cr, uid, move.id, defaults)
complete.append(self.browse(cr, uid, new_move))
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - product_qty,
'product_uos_qty': move.product_qty - product_qty,
'prodlot_id': False,
'tracking_id': False,
})
for move in too_many:
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty,
'product_uos_qty': move.product_qty,
})
complete.append(move)
for move in complete:
if prodlot_ids.get(move.id):
self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
self.action_done(cr, uid, [move.id], context=context)
if move.picking_id.id :
# TOCHECK : Done picking if all moves are done
cr.execute("""
SELECT move.id FROM stock_picking pick
RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
WHERE pick.id = %s""",
('done', move.picking_id.id))
res = cr.fetchall()
if len(res) == len(move.picking_id.move_lines):
picking_obj.action_move(cr, uid, [move.picking_id.id])
wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)
return [move.id for move in complete]
stock_move()
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
_columns = {
'name': fields.char('Inventory Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date': fields.datetime('Creation Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_done': fields.datetime('Date done'),
'inventory_line_id': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=True, states={'draft': [('readonly', False)]}),
'move_ids': fields.many2many('stock.move', 'stock_inventory_move_rel', 'inventory_id', 'move_id', 'Created Moves'),
'state': fields.selection( (('draft', 'Draft'), ('cancel','Cancelled'), ('confirm','Confirmed'), ('done', 'Done')), 'Status', readonly=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_ids': [], 'date_done': False})
return super(stock_inventory, self).copy(cr, uid, id, default, context=context)
def _inventory_line_hook(self, cr, uid, inventory_line, move_vals):
""" Creates a stock move from an inventory line
@param inventory_line:
@param move_vals:
@return:
"""
return self.pool.get('stock.move').create(cr, uid, move_vals)
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
""" Confirm the inventory and writes its finished date
@return: True
"""
if context is None:
context = {}
# to perform the correct inventory corrections we need analyze stock location by
# location, never recursively, so we use a special context
product_context = dict(context, compute_child=False)
location_obj = self.pool.get('stock.location')
for inv in self.browse(cr, uid, ids, context=context):
move_ids = []
for line in inv.inventory_line_id:
pid = line.product_id.id
product_context.update(uom=line.product_uom.id, to_date=inv.date, date=inv.date, prodlot_id=line.prod_lot_id.id)
amount = location_obj._product_get(cr, uid, line.location_id.id, [pid], product_context)[pid]
change = line.product_qty - amount
lot_id = line.prod_lot_id.id
if change:
location_id = line.product_id.property_stock_inventory.id
value = {
'name': _('INV:') + (line.inventory_id.name or ''),
'product_id': line.product_id.id,
'product_uom': line.product_uom.id,
'prodlot_id': lot_id,
'date': inv.date,
}
if change > 0:
value.update( {
'product_qty': change,
'location_id': location_id,
'location_dest_id': line.location_id.id,
})
else:
value.update( {
'product_qty': -change,
'location_id': line.location_id.id,
'location_dest_id': location_id,
})
move_ids.append(self._inventory_line_hook(cr, uid, line, value))
self.write(cr, uid, [inv.id], {'state': 'confirm', 'move_ids': [(6, 0, move_ids)]})
self.pool.get('stock.move').action_confirm(cr, uid, move_ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
""" Cancels both stock move and inventory
@return: True
"""
move_obj = self.pool.get('stock.move')
account_move_obj = self.pool.get('account.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
for move in inv.move_ids:
account_move_ids = account_move_obj.search(cr, uid, [('name', '=', move.name)])
if account_move_ids:
account_move_data_l = account_move_obj.read(cr, uid, account_move_ids, ['state'], context=context)
for account_move in account_move_data_l:
if account_move['state'] == 'posted':
raise osv.except_osv(_('User Error!'),
_('In order to cancel this inventory, you must first unpost related journal entries.'))
account_move_obj.unlink(cr, uid, [account_move['id']], context=context)
self.write(cr, uid, [inv.id], {'state': 'cancel'}, context=context)
return True
stock_inventory()
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_rec_name = "inventory_id"
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id','company_id',type='many2one',relation='res.company',string='Company',store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id','state',type='char',string='Status',readonly=True),
}
def _default_stock_location(self, cr, uid, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
_defaults = {
'location_id': _default_stock_location
}
def on_change_product_id(self, cr, uid, ids, location_id, product, uom=False, to_date=False):
""" Changes UoM and name if product_id changes.
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_qty': 0.0, 'product_uom': False, 'prod_lot_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product)
uom = uom or obj_product.uom_id.id
amount = self.pool.get('stock.location')._product_get(cr, uid, location_id, [product], {'uom': uom, 'to_date': to_date, 'compute_child': False})[product]
result = {'product_qty': amount, 'product_uom': uom, 'prod_lot_id': False}
return {'value': result}
stock_inventory_line()
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Name', size=128, required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'partner_id': fields.many2one('res.partner', 'Owner Address'),
'lot_input_id': fields.many2one('stock.location', 'Location Input', required=True, domain=[('usage','<>','view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', required=True, domain=[('usage','=','internal')]),
'lot_output_id': fields.many2one('stock.location', 'Location Output', required=True, domain=[('usage','<>','view')]),
}
def _default_lot_input_stock_id(self, cr, uid, context=None):
try:
lot_input_stock_model, lot_input_stock_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [lot_input_stock_id], 'read', context=context)
except (ValueError, orm.except_orm):
# the user does not have read access on the location or it does not exists
lot_input_stock_id = False
return lot_input_stock_id
def _default_lot_output_id(self, cr, uid, context=None):
try:
lot_input_stock_model, lot_input_stock_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_output')
self.pool.get('stock.location').check_access_rule(cr, uid, [lot_input_stock_id], 'read', context=context)
except (ValueError, orm.except_orm):
# the user does not have read access on the location or it does not exists
lot_output_id = False
return lot_output_id
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'lot_input_id': _default_lot_input_stock_id,
'lot_stock_id': _default_lot_input_stock_id,
'lot_output_id': _default_lot_output_id,
}
stock_warehouse()
#----------------------------------------------------------
# "Empty" Classes that are used to vary from the original stock.picking (that are dedicated to the internal pickings)
# in order to offer a different usability with different views, labels, available reports/wizards...
#----------------------------------------------------------
class stock_picking_in(osv.osv):
_name = "stock.picking.in"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Incoming Shipments"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.in', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Receive'),
('done', 'Received'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Receive: products reserved, simply waiting for confirmation.\n
* Received: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'in',
}
class stock_picking_out(osv.osv):
_name = "stock.picking.out"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Delivery Orders"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.out', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Deliver'),
('done', 'Delivered'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Deliver: products reserved, simply waiting for confirmation.\n
* Delivered: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'out',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 51.795668 | 343 | 0.588281 |
12db7730b345b452b1854f30af2881dd6d72ee41 | 1,180 | py | Python | apps/iam/serializers.py | iSecloud/bk-process-config-manager | f44c01b7a28dd9328cce6e6066eae42d5365070d | [
"MIT"
] | 8 | 2021-07-08T06:53:57.000Z | 2022-03-14T04:05:27.000Z | apps/iam/serializers.py | iSecloud/bk-process-config-manager | f44c01b7a28dd9328cce6e6066eae42d5365070d | [
"MIT"
] | 107 | 2021-07-22T02:20:07.000Z | 2022-03-14T08:37:23.000Z | apps/iam/serializers.py | iSecloud/bk-process-config-manager | f44c01b7a28dd9328cce6e6066eae42d5365070d | [
"MIT"
] | 12 | 2021-07-09T08:59:01.000Z | 2022-03-08T13:40:41.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸 (Blueking) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class IamActionResourceRequestSerializer(serializers.Serializer):
class ResourceSerializer(serializers.Serializer):
type = serializers.CharField(help_text=_("资源类型"))
id = serializers.CharField(help_text=_("资源ID"))
action_ids = serializers.ListField(help_text=_("动作ID列表"))
resources = serializers.ListField(help_text=_("资源列表"), child=ResourceSerializer())
| 53.636364 | 115 | 0.779661 |
80d17aa8059814956657ca819147673516cfdb52 | 13,211 | py | Python | niftynet/application/autoencoder_application.py | Min-Sheng/NiftyNet | 13ff54018d3ea282b94af94b6bce7bd67c0d7cc5 | [
"Apache-2.0"
] | null | null | null | niftynet/application/autoencoder_application.py | Min-Sheng/NiftyNet | 13ff54018d3ea282b94af94b6bce7bd67c0d7cc5 | [
"Apache-2.0"
] | null | null | null | niftynet/application/autoencoder_application.py | Min-Sheng/NiftyNet | 13ff54018d3ea282b94af94b6bce7bd67c0d7cc5 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from niftynet.application.base_application import BaseApplication
from niftynet.engine.application_factory import ApplicationNetFactory
from niftynet.engine.application_factory import OptimiserFactory
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.application_variables import NETWORK_OUTPUT
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.engine.sampler_linear_interpolate import LinearInterpolateSampler
from niftynet.engine.sampler_resize import ResizeSampler
from niftynet.engine.windows_aggregator_identity import WindowAsImageAggregator
from niftynet.io.image_reader import ImageReader
from niftynet.layer.loss_autoencoder import LossFunction
from niftynet.utilities.util_common import look_up_operations
SUPPORTED_INPUT = set(['image', 'feature'])
SUPPORTED_INFERENCE = \
set(['encode', 'encode-decode', 'sample', 'linear_interpolation'])
class AutoencoderApplication(BaseApplication):
REQUIRED_CONFIG_SECTION = "AUTOENCODER"
def __init__(self, net_param, action_param, action):
BaseApplication.__init__(self)
tf.logging.info('starting autoencoder application')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.data_param = None
self.autoencoder_param = None
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.autoencoder_param = task_param
if not self.is_training:
self._infer_type = look_up_operations(
self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
else:
self._infer_type = None
try:
reader_phase = self.action_param.dataset_to_infer
except AttributeError:
reader_phase = None
file_lists = data_partitioner.get_file_lists_by(
phase=reader_phase, action=self.action)
# read each line of csv files into an instance of Subject
if self.is_evaluation:
NotImplementedError('Evaluation is not yet '
'supported in this application.')
if self.is_training:
self.readers = []
for file_list in file_lists:
reader = ImageReader(['image'])
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
if self._infer_type in ('encode', 'encode-decode'):
self.readers = [ImageReader(['image'])]
self.readers[0].initialise(data_param, task_param, file_lists[0])
elif self._infer_type == 'sample':
self.readers = []
elif self._infer_type == 'linear_interpolation':
self.readers = [ImageReader(['feature'])]
self.readers[0].initialise(data_param, task_param, file_lists[0])
# if self.is_training or self._infer_type in ('encode', 'encode-decode'):
# mean_var_normaliser = MeanVarNormalisationLayer(image_name='image')
# self.reader.add_preprocessing_layers([mean_var_normaliser])
def initialise_sampler(self):
self.sampler = []
if self.is_training:
self.sampler.append([ResizeSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=1,
shuffle_buffer=True,
queue_length=self.net_param.queue_length) for reader in
self.readers])
return
if self._infer_type in ('encode', 'encode-decode'):
self.sampler.append([ResizeSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=1,
shuffle_buffer=False,
queue_length=self.net_param.queue_length) for reader in
self.readers])
return
if self._infer_type == 'linear_interpolation':
self.sampler.append([LinearInterpolateSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
n_interpolations=self.autoencoder_param.n_interpolations,
queue_length=self.net_param.queue_length) for reader in
self.readers])
return
def initialise_network(self):
w_regularizer = None
b_regularizer = None
reg_type = self.net_param.reg_type.lower()
decay = self.net_param.decay
if reg_type == 'l2' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l2_regularizer(decay)
b_regularizer = regularizers.l2_regularizer(decay)
elif reg_type == 'l1' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l1_regularizer(decay)
b_regularizer = regularizers.l1_regularizer(decay)
self.net = ApplicationNetFactory.create(self.net_param.name)(
w_regularizer=w_regularizer,
b_regularizer=b_regularizer)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
def switch_sampler(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0][0 if for_training else -1]
return sampler.pop_batch_op()
if self.is_training:
if self.action_param.validation_every_n > 0:
data_dict = tf.cond(tf.logical_not(self.is_validation),
lambda: switch_sampler(True),
lambda: switch_sampler(False))
else:
data_dict = switch_sampler(for_training=True)
image = tf.cast(data_dict['image'], tf.float32)
net_output = self.net(image, is_training=self.is_training)
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.action_param.lr)
loss_func = LossFunction(loss_type=self.action_param.loss_type)
data_loss = loss_func(net_output)
loss = data_loss
if self.net_param.decay > 0.0:
reg_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = loss + reg_loss
grads = self.optimiser.compute_gradients(
loss, colocate_gradients_with_ops=True)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
outputs_collector.add_to_collection(
var=data_loss, name='variational_lower_bound',
average_over_devices=True, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='variational_lower_bound',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=net_output[4], name='Originals',
average_over_devices=False, summary_type='image3_coronal',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=net_output[2], name='Means',
average_over_devices=False, summary_type='image3_coronal',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=net_output[5], name='Variances',
average_over_devices=False, summary_type='image3_coronal',
collection=TF_SUMMARIES)
else:
if self._infer_type in ('encode', 'encode-decode'):
data_dict = self.get_sampler()[0][0].pop_batch_op()
image = tf.cast(data_dict['image'], dtype=tf.float32)
net_output = self.net(image, is_training=False)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=True, collection=NETWORK_OUTPUT)
if self._infer_type == 'encode-decode':
outputs_collector.add_to_collection(
var=net_output[2], name='generated_image',
average_over_devices=True, collection=NETWORK_OUTPUT)
if self._infer_type == 'encode':
outputs_collector.add_to_collection(
var=net_output[7], name='embedded',
average_over_devices=True, collection=NETWORK_OUTPUT)
self.output_decoder = WindowAsImageAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir)
return
elif self._infer_type == 'sample':
image_size = (self.net_param.batch_size,) + \
self.action_param.spatial_window_size + (1,)
dummy_image = tf.zeros(image_size)
net_output = self.net(dummy_image, is_training=False)
noise_shape = net_output[-1].shape.as_list()
stddev = self.autoencoder_param.noise_stddev
noise = tf.random_normal(shape=noise_shape,
mean=0.0,
stddev=stddev,
dtype=tf.float32)
partially_decoded_sample = self.net.shared_decoder(
noise, is_training=False)
decoder_output = self.net.decoder_means(
partially_decoded_sample, is_training=False)
outputs_collector.add_to_collection(
var=decoder_output, name='generated_image',
average_over_devices=True, collection=NETWORK_OUTPUT)
self.output_decoder = WindowAsImageAggregator(
image_reader=None,
output_path=self.action_param.save_seg_dir)
return
elif self._infer_type == 'linear_interpolation':
# construct the entire network
image_size = (self.net_param.batch_size,) + \
self.action_param.spatial_window_size + (1,)
dummy_image = tf.zeros(image_size)
net_output = self.net(dummy_image, is_training=False)
data_dict = self.get_sampler()[0][0].pop_batch_op()
real_code = data_dict['feature']
real_code = tf.reshape(real_code, net_output[-1].get_shape())
partially_decoded_sample = self.net.shared_decoder(
real_code, is_training=False)
decoder_output = self.net.decoder_means(
partially_decoded_sample, is_training=False)
outputs_collector.add_to_collection(
var=decoder_output, name='generated_image',
average_over_devices=True, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['feature_location'], name='location',
average_over_devices=True, collection=NETWORK_OUTPUT)
self.output_decoder = WindowAsImageAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir)
else:
raise NotImplementedError
def interpret_output(self, batch_output):
if self.is_training:
return True
else:
infer_type = look_up_operations(
self.autoencoder_param.inference_type,
SUPPORTED_INFERENCE)
if infer_type == 'encode':
return self.output_decoder.decode_batch(
batch_output['embedded'],
batch_output['location'][:, 0:1])
if infer_type == 'encode-decode':
return self.output_decoder.decode_batch(
batch_output['generated_image'],
batch_output['location'][:, 0:1])
if infer_type == 'sample':
return self.output_decoder.decode_batch(
batch_output['generated_image'],
None)
if infer_type == 'linear_interpolation':
return self.output_decoder.decode_batch(
batch_output['generated_image'],
batch_output['location'][:, :2])
| 47.351254 | 81 | 0.604118 |
708951979ca90ab8aaf18154481b00b683ee195d | 4,575 | py | Python | featuremap/featuremap_hd2.py | tshuangbl/kite_hd2 | d9ea96b57e1244de4c8c84e20f3be217d97235d4 | [
"BSD-2-Clause"
] | null | null | null | featuremap/featuremap_hd2.py | tshuangbl/kite_hd2 | d9ea96b57e1244de4c8c84e20f3be217d97235d4 | [
"BSD-2-Clause"
] | null | null | null | featuremap/featuremap_hd2.py | tshuangbl/kite_hd2 | d9ea96b57e1244de4c8c84e20f3be217d97235d4 | [
"BSD-2-Clause"
] | null | null | null | import argparse
import csv
import os
from collections import OrderedDict
parser=argparse.ArgumentParser()
parser.add_argument("FeatureRefCSV", help="path to csv file with whitelist Feature Barcodes")
parser.add_argument("--t2g", help="path to output t2g file, default ./FeaturesMismatch.t2g", default="./FeaturesMismatch.t2g", type=str)
parser.add_argument("--fa", help="path to output fasta file, default ./FeaturesMismatch.fa", default="./FeaturesMismatch.fa", type=str)
parser.add_argument("--header", help="include this flag if FeatureRefCSV has a header, omit if not", action="store_true")
parser.add_argument("-q", "--quiet", help="don't print processed Feature Barcodes", action="store_true")
args=parser.parse_args()
def version():
print("0.0.2")
"""
get_tags requires a csv-formatted file of feature barcode names and sequences. If your CSV file contains a header, use the --header flag.
"""
def get_tags(filename):
with open(filename, mode='r') as csv_file:
csv_reader = csv.reader(csv_file)
tags = {}
if args.header:
next(csv_reader) #skip header unless the no_header option is specified
if not args.quiet: print('\nCSV includes header row\n')
else:
if not args.quiet:
if not args.quiet: print('\nCSV does not include header row.\n')
for row in csv_reader:
tags[row[0].strip()] = row[1].strip()
return tags
def make_mismatch_map(FeatureDict):
odict = OrderedDict()
counter=0
for item in FeatureDict:
name=(item)
seq=FeatureDict[item]
if counter == 0:
feature_barcode_length = len(seq)
if not args.quiet: print("Feature Barcode Length: "+str(feature_barcode_length)+'\n')
if not args.quiet: print('Read ' + str(len(FeatureDict)) +' Feature Barcodes:\n')
counter+=1
if not args.quiet:
print(name)
print(seq)
odict[name+'-*-*'] = str(seq)[:feature_barcode_length]
for pos in range(feature_barcode_length):
letter =str(seq)[pos]
barcode=list(str(seq)[:feature_barcode_length])
if letter=='A':
barcode[pos]='T'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='G'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='C'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
elif letter=='G':
barcode[pos]='T'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='A'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='C'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
elif letter=='C':
barcode[pos]='T'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='G'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='A'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
else:
barcode[pos]='A'
odict[name+'-'+str(pos)+'-1'] = "".join(barcode)
barcode[pos]='G'
odict[name+'-'+str(pos)+'-2'] = "".join(barcode)
barcode[pos]='C'
odict[name+'-'+str(pos)+'-3'] = "".join(barcode)
return odict
def write_mismatch_map(tag_map, mismatch_t2g_path, mismatch_fasta_path):
tagmap_file = open(mismatch_t2g_path, "w+")
tagmap_fasta = open(mismatch_fasta_path, "w+")
for i in list(tag_map.keys()):
if i[-8:]=='-*-*-*-*':
tagmap_file.write(i[:-8]+'\t'+i[:-8]+'\t'+i[:-8]+'\n')
tagmap_fasta.write(">" + i[:-8] + "\n" +tag_map[i] + "\n")
else:
tagmap_file.write(i+'\t'+'-'.join(i.split('-')[:-4])+'\t'+'-'.join(i.split('-')[:-4])+'\n')
tagmap_fasta.write(">" + i + "\n" +tag_map[i] + "\n")
tagmap_file.close()
tagmap_fasta.close()
#wrapper function for make_mismatch_map and write_mismatch_map, hamming dist = 2
def kite_mismatch_maps(FeatureDict, mismatch_t2g_path, mismatch_fasta_path):
write_mismatch_map(make_mismatch_map(make_mismatch_map(FeatureDict)), mismatch_t2g_path, mismatch_fasta_path)
if not args.quiet: print("\nThe t2g and fasta files are now ready \n")
tags = get_tags(args.FeatureRefCSV)
t2g_path = args.t2g
fasta_path= args.fa
kite_mismatch_maps(tags, t2g_path, fasta_path)
| 41.590909 | 138 | 0.574863 |
7d3dea5cb0522d983b9934647f8d4b38f6f2c435 | 3,471 | py | Python | node/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | Smohamedl/hospitalsystem | 499179ea1a79ad3edb1127a774ef1b8c7bbd9b94 | [
"MIT"
] | null | null | null | node/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | Smohamedl/hospitalsystem | 499179ea1a79ad3edb1127a774ef1b8c7bbd9b94 | [
"MIT"
] | 2 | 2020-03-31T03:57:26.000Z | 2022-01-21T23:34:00.000Z | node/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | Smohamedl/hospitalsystem | 499179ea1a79ad3edb1127a774ef1b8c7bbd9b94 | [
"MIT"
] | null | null | null | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provider a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.items():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| 36.536842 | 78 | 0.755978 |
873914076cc3f270264886e30a2ef0b53410b761 | 51 | py | Python | src/external/__init__.py | atonlife/aTeam | fac686c3a746e80f1978fee72d1c0c346f620f85 | [
"Apache-2.0"
] | null | null | null | src/external/__init__.py | atonlife/aTeam | fac686c3a746e80f1978fee72d1c0c346f620f85 | [
"Apache-2.0"
] | 1 | 2021-04-09T07:20:04.000Z | 2021-04-23T08:18:39.000Z | src/external/__init__.py | atonlife/ateam | fac686c3a746e80f1978fee72d1c0c346f620f85 | [
"Apache-2.0"
] | null | null | null | __all__ = [
'cli',
'config',
'jira',
]
| 8.5 | 13 | 0.392157 |
638bd96c0de26fe078992983fe229b1cab585ede | 1,875 | py | Python | salt/states/disk.py | Achimh3011/salt | b6e6968c22f840df0d43bea7e99c188c623b850b | [
"Apache-2.0"
] | null | null | null | salt/states/disk.py | Achimh3011/salt | b6e6968c22f840df0d43bea7e99c188c623b850b | [
"Apache-2.0"
] | null | null | null | salt/states/disk.py | Achimh3011/salt | b6e6968c22f840df0d43bea7e99c188c623b850b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Disk monitoring state
Monitor the state of disk resources
'''
from __future__ import absolute_import
# Import salt libs
from six import string_types
__monitor__ = [
'status',
]
def status(name, maximum=None, minimum=None):
'''
Return the current disk usage stats for the named mount point
'''
# Monitoring state, no changes will be made so no test interface needed
ret = {'name': name,
'result': False,
'comment': '',
'changes': {},
'data': {}} # Data field for monitoring state
data = __salt__['disk.usage']()
if name not in data:
ret['result'] = False
ret['comment'] += 'Named disk mount not present '
return ret
if maximum:
try:
if isinstance(maximum, string_types):
maximum = int(maximum.strip('%'))
except Exception:
ret['comment'] += 'Max argument must be an integer '
if minimum:
try:
if isinstance(minimum, string_types):
minimum = int(minimum.strip('%'))
except Exception:
ret['comment'] += 'Min argument must be an integer '
if minimum and maximum:
if minimum >= maximum:
ret['comment'] += 'Min must be less than max'
if ret['comment']:
return ret
cap = int(data[name]['capacity'].strip('%'))
ret['data'] = data[name]
if minimum:
if cap < minimum:
ret['comment'] = 'Disk is below minimum of {0} at {1}'.format(
minimum, cap)
return ret
if maximum:
if cap > maximum:
ret['comment'] = 'Disk is above maximum of {0} at {1}'.format(
maximum, cap)
return ret
ret['comment'] = 'Disk in acceptable range'
ret['result'] = True
return ret
| 28.846154 | 75 | 0.5472 |
5727a4f16a4551bce49f1f3308a9dd7f9e63795d | 494 | py | Python | examples/the_weather.py | louisronron/ColorFunk | aef341bed941b2e9d63abc64ab7b02348a271583 | [
"MIT"
] | null | null | null | examples/the_weather.py | louisronron/ColorFunk | aef341bed941b2e9d63abc64ab7b02348a271583 | [
"MIT"
] | null | null | null | examples/the_weather.py | louisronron/ColorFunk | aef341bed941b2e9d63abc64ab7b02348a271583 | [
"MIT"
] | 1 | 2020-05-30T08:26:46.000Z | 2020-05-30T08:26:46.000Z | # import modules
from colorexlib.colorex import ColorExGrid
# set tile grid options
options = dict()
options['source'] = ['data\\the_weather.csv', 'csv']
options['title'] = 'Weather Data 2017'
options['subtitle'] = 'The general weather data for year 2017'
options['theme'] = 'themes\\barbie.cxt'
# create ColorEx object, passing options.
colorex_grid = ColorExGrid(options)
# create color grid in HTML
colorex_grid.to_html('the_weather.html',
'templates\\default.html')
| 29.058824 | 63 | 0.714575 |
e7d8054be978b8c41cd948b8c248fe8209556b71 | 684 | py | Python | model_training.py | avaidsr/WOTBot | 1d4d4d39859470d8ab032a5641d369f6cb919f94 | [
"MIT"
] | null | null | null | model_training.py | avaidsr/WOTBot | 1d4d4d39859470d8ab032a5641d369f6cb919f94 | [
"MIT"
] | null | null | null | model_training.py | avaidsr/WOTBot | 1d4d4d39859470d8ab032a5641d369f6cb919f94 | [
"MIT"
] | null | null | null |
from model import load_data, NeuralNetwork
input_size = 120 * 320
data_path = "training_data/*.npz"
X_train, X_valid, y_train, y_valid = load_data(input_size, data_path)
# train a neural network
layer_sizes = [input_size, 32, 4]
nn = NeuralNetwork()
nn.create(layer_sizes)
nn.train(X_train, y_train)
# evaluate on train data
train_accuracy = nn.evaluate(X_train, y_train)
print("Train accuracy: ", "{0:.2f}%".format(train_accuracy * 100))
# evaluate on validation data
validation_accuracy = nn.evaluate(X_valid, y_valid)
print("Validation accuracy: ", "{0:.2f}%".format(validation_accuracy * 100))
# save model
model_path = "saved_model/nn_model.xml"
nn.save_model(model_path)
| 26.307692 | 76 | 0.755848 |
605aef78222041530639c99acbacb9ac3603c918 | 14,816 | py | Python | EPro-PnP-6DoF/lib/test.py | Lakonik/EPro-PnP | 931df847190ce10eddd1dc3e3168ce1a2f295ffa | [
"Apache-2.0"
] | 19 | 2022-03-21T10:22:24.000Z | 2022-03-30T15:43:46.000Z | EPro-PnP-6DoF/lib/test.py | Lakonik/EPro-PnP | 931df847190ce10eddd1dc3e3168ce1a2f295ffa | [
"Apache-2.0"
] | null | null | null | EPro-PnP-6DoF/lib/test.py | Lakonik/EPro-PnP | 931df847190ce10eddd1dc3e3168ce1a2f295ffa | [
"Apache-2.0"
] | 3 | 2022-03-26T08:08:24.000Z | 2022-03-30T11:17:11.000Z | """
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
This file is modified from
https://github.com/LZGMatrix/CDPN_ICCV2019_ZhigangLi
"""
import math
import torch
import numpy as np
import os, sys
from utils.utils import AverageMeter
from utils.eval import calc_all_errs, Evaluation
from utils.img import im_norm_255
import cv2
import ref
from progress.bar import Bar
import os
import utils.fancy_logger as logger
from utils.tictoc import tic, toc
from builtins import input
from utils.fs import mkdir_p
from scipy.linalg import logm
import numpy.linalg as LA
import time
import matplotlib.pyplot as plt
from numba import jit, njit
from ops.pnp.camera import PerspectiveCamera
from ops.pnp.cost_fun import AdaptiveHuberPnPCost
from ops.pnp.levenberg_marquardt import LMSolver
from ops.pnp.epropnp import EProPnP6DoF
from scipy.spatial.transform import Rotation as R
from utils.draw_orient_density import draw_orient_density
def test(epoch, cfg, data_loader, model, obj_vtx, obj_info, criterions):
model.eval()
Eval = Evaluation(cfg.dataset, obj_info, obj_vtx)
if 'trans' in cfg.pytorch.task.lower():
Eval_trans = Evaluation(cfg.dataset, obj_info, obj_vtx)
if not cfg.test.ignore_cache_file:
est_cache_file = cfg.test.cache_file
# gt_cache_file = cfg.test.cache_file.replace('pose_est', 'pose_gt')
gt_cache_file = cfg.test.cache_file.replace('_est', '_gt')
if os.path.exists(est_cache_file) and os.path.exists(gt_cache_file):
Eval.pose_est_all = np.load(est_cache_file, allow_pickle=True).tolist()
Eval.pose_gt_all = np.load(gt_cache_file, allow_pickle=True).tolist()
fig_save_path = os.path.join(cfg.pytorch.save_path, str(epoch))
mkdir_p(fig_save_path)
if 'all' in cfg.test.test_mode.lower():
Eval.evaluate_pose()
Eval.evaluate_pose_add(fig_save_path)
Eval.evaluate_pose_arp_2d(fig_save_path)
elif 'pose' in cfg.test.test_mode.lower():
Eval.evaluate_pose()
elif 'add' in cfg.test.test_mode.lower():
Eval.evaluate_pose_add(fig_save_path)
elif 'arp' in cfg.test.test_mode.lower():
Eval.evaluate_pose_arp_2d(fig_save_path)
else:
raise Exception("Wrong test mode: {}".format(cfg.test.test_mode))
return None, None
else:
logger.info("test cache file {} and {} not exist!".format(est_cache_file, gt_cache_file))
userAns = input("Generating cache file from model [Y(y)/N(n)]:")
if userAns.lower() == 'n':
sys.exit(0)
else:
logger.info("Generating test cache file!")
preds = {}
Loss = AverageMeter()
Loss_rot = AverageMeter()
Loss_trans = AverageMeter()
num_iters = len(data_loader)
bar = Bar('{}'.format(cfg.pytorch.exp_id[-60:]), max=num_iters)
time_monitor = False
vis_dir = os.path.join(cfg.pytorch.save_path, 'test_vis_{}'.format(epoch))
if not os.path.exists(vis_dir):
os.makedirs(vis_dir)
cam_intrinsic_np = cfg.dataset.camera_matrix.astype(np.float32)
cam_intrinsic = torch.from_numpy(cam_intrinsic_np).cuda(cfg.pytorch.gpu)
epropnp = EProPnP6DoF(
mc_samples=512,
num_iter=4,
solver=LMSolver(
dof=6,
num_iter=3)).cuda(cfg.pytorch.gpu)
for i, (obj, obj_id, inp, pose, c_box, s_box, box, trans_local) in enumerate(data_loader):
if cfg.pytorch.gpu > -1:
inp_var = inp.cuda(cfg.pytorch.gpu, async=True).float()
c_box = c_box.to(inp_var.device)
s_box = s_box.to(inp_var.device)
box = box.to(inp_var.device)
else:
inp_var = inp.float()
bs = len(inp)
# forward propagation
with torch.no_grad():
(noc, w2d, scale), pred_trans = model(inp_var)
w2d = w2d.flatten(2)
# we use an alternative to standard softmax, i.e., normalizing the mean before exponential map
w2d = (w2d - w2d.mean(dim=-1, keepdim=True)
- math.log(w2d.size(-1))).exp().reshape(bs, 2, 64, 64) * scale[..., None, None]
if i % cfg.test.disp_interval == 0:
# input image
inp_rgb = (inp[0].cpu().numpy().copy() * 255)[[2, 1, 0], :, :].astype(np.uint8)
cfg.writer.add_image('input_image', inp_rgb, i)
cv2.imwrite(os.path.join(vis_dir, '{}_inp.png'.format(i)), inp_rgb.transpose(1,2,0)[:, :, ::-1])
if 'rot' in cfg.pytorch.task.lower():
# coordinates map
pred_coor = noc[0].data.cpu().numpy().copy()
# write to image
pred_coor[0] = im_norm_255(pred_coor[0])
pred_coor[1] = im_norm_255(pred_coor[1])
pred_coor[2] = im_norm_255(pred_coor[2])
pred_coor = np.asarray(pred_coor, dtype=np.uint8)
plt.imsave(os.path.join(vis_dir, '{}_coor_x_pred.png'.format(i)), pred_coor[0])
plt.imsave(os.path.join(vis_dir, '{}_coor_y_pred.png'.format(i)), pred_coor[1])
plt.imsave(os.path.join(vis_dir, '{}_coor_z_pred.png'.format(i)), pred_coor[2])
plt.imsave(os.path.join(vis_dir, '{}_coor_xyz.png'.format(i)), pred_coor.transpose(1, 2, 0))
# write to image
# confidence map
pred_conf = w2d[0].mean(dim=0).data.cpu().numpy().copy()
pred_conf = (im_norm_255(pred_conf)).astype(np.uint8)
cfg.writer.add_image('test_conf_pred', np.expand_dims(pred_conf, axis=0), i)
cv2.imwrite(os.path.join(vis_dir, '{}_conf_pred.png'.format(i)), pred_conf)
dim = [[abs(obj_info[obj_id_]['min_x']),
abs(obj_info[obj_id_]['min_y']),
abs(obj_info[obj_id_]['min_z'])] for obj_id_ in obj_id.cpu().numpy()]
dim = noc.new_tensor(dim) # (n, 3)
pose_gt = pose.cpu().numpy()
if 'rot' in cfg.pytorch.task.lower():
# building 2D-3D correspondences
x3d = noc.permute(0, 2, 3, 1) * dim[:, None, None, :]
pred_conf = w2d.mean(dim=1) # (n, h, w)
# pred_conf_min = pred_conf.reshape(bs, -1).min(dim=-1)[0][:, None, None] # (n, 1, 1)
# pred_conf_max = pred_conf.reshape(bs, -1).max(dim=-1)[0][:, None, None] # (n, 1, 1)
# pred_conf = (pred_conf - pred_conf_min) / (pred_conf_max - pred_conf_min) # (n, h, w)
w2d = w2d.permute(0, 2, 3, 1) # (n, h, w, 2)
s = s_box.to(torch.int64) # (n, )
wh_begin = c_box.to(torch.int64) - s[:, None] / 2. # (n, 2)
wh_unit = s.to(torch.float32) / cfg.dataiter.out_res # (n, )
pred_conf_np = pred_conf.cpu().numpy()
valid_mask = pred_conf_np >= np.quantile(pred_conf_np.reshape(bs, -1), 0.8,
axis=1, keepdims=True)[..., None]
wh_arange = torch.arange(cfg.dataiter.out_res, device=x3d.device, dtype=torch.float32)
y, x = torch.meshgrid(wh_arange, wh_arange) # (h, w)
# (n, h, w, 2)
x2d = torch.stack((wh_begin[:, 0, None, None] + x * wh_unit[:, None, None],
wh_begin[:, 1, None, None] + y * wh_unit[:, None, None]), dim=-1)
if 'trans' in cfg.pytorch.task.lower():
# compute T from translation head
ratio_delta_c = pred_trans[:, :2] # (n, 2)
ratio_depth = pred_trans[:, 2] # (n, )
pred_depth = ratio_depth * (cfg.dataiter.out_res / s_box) # (n, )
pred_c = ratio_delta_c * box[:, 2:] + c_box # (n, 2)
pred_x = (pred_c[:, 0] - cfg.dataset.camera_matrix[0, 2]) * pred_depth / cfg.dataset.camera_matrix[0, 0]
pred_y = (pred_c[:, 1] - cfg.dataset.camera_matrix[1, 2]) * pred_depth / cfg.dataset.camera_matrix[1, 1]
T_vector_trans = torch.stack([pred_x, pred_y, pred_depth], dim=-1) # (n, 3)
pose_est_trans = torch.cat((torch.eye(3, device=pred_x.device).expand(bs, -1, -1),
T_vector_trans.reshape(bs, 3, 1)), dim=-1).cpu().numpy() # (n, 3, 4)
if 'rot' in cfg.pytorch.task.lower():
dist_coeffs = np.zeros((4, 1), dtype=np.float32) # Assuming no lens distortion
# for fair comparison we use EPnP initialization
R_quats = []
T_vectors = []
x2d_np = x2d.cpu().numpy()
x3d_np = x3d.cpu().numpy()
for x2d_np_, x3d_np_, mask_np_ in zip(x2d_np, x3d_np, valid_mask):
_, R_vector, T_vector = cv2.solvePnP(
x3d_np_[mask_np_], x2d_np_[mask_np_], cam_intrinsic_np, dist_coeffs, flags=cv2.SOLVEPNP_EPNP)
q = R.from_rotvec(R_vector.reshape(-1)).as_quat()[[3, 0, 1, 2]]
R_quats.append(q)
T_vectors.append(T_vector.reshape(-1))
R_quats = x2d.new_tensor(R_quats)
T_vectors = x2d.new_tensor(T_vectors)
pose_init = torch.cat((T_vectors, R_quats), dim=-1) # (n, 7)
# Gauss-Newton optimize
x2d = x2d.reshape(bs, -1, 2)
w2d = w2d.reshape(bs, -1, 2)
x3d = x3d.reshape(bs, -1, 3)
camera = PerspectiveCamera(
cam_mats=cam_intrinsic[None].expand(bs, -1, -1), z_min=0.01)
cost_fun = AdaptiveHuberPnPCost(
relative_delta=0.1)
if time_monitor:
torch.cuda.synchronize(device=x3d.device)
t_begin = time.time()
cost_fun.set_param(x2d, w2d)
pose_opt = epropnp(
x3d, x2d, w2d, camera, cost_fun, pose_init=pose_init, fast_mode=True)[0]
if time_monitor:
torch.cuda.synchronize(device=x3d.device)
t_end = time.time()
logger.info("Batch PnP time: {:04f}".format(t_end - t_begin))
if i % cfg.test.disp_interval == 0:
_, _, _, pose_samples, pose_sample_logweights, _ = epropnp.monte_carlo_forward(
x3d, x2d, w2d, camera, cost_fun,
pose_init=pose_opt, force_init_solve=False, fast_mode=True)
draw = draw_orient_density(
pose_opt[:1], pose_samples[:, :1], pose_sample_logweights[:, :1]).squeeze(0) # (h, w, 3)
plt.imsave(os.path.join(vis_dir, '{}_orient_distr.png'.format(i)),
(draw * 255).clip(min=0, max=255).astype(np.uint8))
T_vectors, R_quats = pose_opt.split([3, 4], dim=-1) # (n, [3, 4])
R_matrix = R.from_quat(R_quats[:, [1, 2, 3, 0]].cpu().numpy()).as_matrix() # (n, 3, 3)
pose_est = np.concatenate([R_matrix, T_vectors.reshape(bs, 3, 1).cpu().numpy()], axis=-1)
if 'trans' in cfg.pytorch.task.lower():
pose_est_trans = np.concatenate((R_matrix, T_vector_trans.reshape(bs, 3, 1)), axis=-1)
for obj_, pose_est_, pose_gt_ in zip(obj, pose_est, pose_gt):
Eval.pose_est_all[obj_].append(pose_est_)
Eval.pose_gt_all[obj_].append(pose_gt_)
Eval.num[obj_] += 1
Eval.numAll += 1
if 'trans' in cfg.pytorch.task.lower():
for obj_, pose_est_trans_, pose_gt_ in zip(obj, pose_est_trans, pose_gt):
Eval_trans.pose_est_all[obj_].append(pose_est_trans_)
Eval_trans.pose_gt_all[obj_].append(pose_gt_)
Eval_trans.num[obj_] += 1
Eval_trans.numAll += 1
Bar.suffix = 'test Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.4f} | Loss_rot {loss_rot.avg:.4f} | Loss_trans {loss_trans.avg:.4f}'.format(
epoch, i, num_iters, total=bar.elapsed_td, eta=bar.eta_td, loss=Loss, loss_rot=Loss_rot, loss_trans=Loss_trans)
bar.next()
epoch_save_path = os.path.join(cfg.pytorch.save_path, str(epoch))
if not os.path.exists(epoch_save_path):
os.makedirs(epoch_save_path)
if 'rot' in cfg.pytorch.task.lower():
logger.info("{} Evaluate of Rotation Branch of Epoch {} {}".format('-'*40, epoch, '-'*40))
preds['poseGT'] = Eval.pose_gt_all
preds['poseEst'] = Eval.pose_est_all
if cfg.pytorch.test:
np.save(os.path.join(epoch_save_path, 'pose_est_all_test.npy'), Eval.pose_est_all)
np.save(os.path.join(epoch_save_path, 'pose_gt_all_test.npy'), Eval.pose_gt_all)
else:
np.save(os.path.join(epoch_save_path, 'pose_est_all_epoch{}.npy'.format(epoch)), Eval.pose_est_all)
np.save(os.path.join(epoch_save_path, 'pose_gt_all_epoch{}.npy'.format(epoch)), Eval.pose_gt_all)
# evaluation
if 'all' in cfg.test.test_mode.lower():
Eval.evaluate_pose()
Eval.evaluate_pose_add(epoch_save_path)
Eval.evaluate_pose_arp_2d(epoch_save_path)
else:
if 'pose' in cfg.test.test_mode.lower():
Eval.evaluate_pose()
if 'add' in cfg.test.test_mode.lower():
Eval.evaluate_pose_add(epoch_save_path)
if 'arp' in cfg.test.test_mode.lower():
Eval.evaluate_pose_arp_2d(epoch_save_path)
if 'trans' in cfg.pytorch.task.lower():
logger.info("{} Evaluate of Translation Branch of Epoch {} {}".format('-'*40, epoch, '-'*40))
preds['poseGT'] = Eval_trans.pose_gt_all
preds['poseEst'] = Eval_trans.pose_est_all
if cfg.pytorch.test:
np.save(os.path.join(epoch_save_path, 'pose_est_all_test_trans.npy'), Eval_trans.pose_est_all)
np.save(os.path.join(epoch_save_path, 'pose_gt_all_test_trans.npy'), Eval_trans.pose_gt_all)
else:
np.save(os.path.join(epoch_save_path, 'pose_est_all_trans_epoch{}.npy'.format(epoch)), Eval_trans.pose_est_all)
np.save(os.path.join(epoch_save_path, 'pose_gt_all_trans_epoch{}.npy'.format(epoch)), Eval_trans.pose_gt_all)
# evaluation
if 'all' in cfg.test.test_mode.lower():
Eval_trans.evaluate_pose()
Eval_trans.evaluate_pose_add(epoch_save_path)
Eval_trans.evaluate_pose_arp_2d(epoch_save_path)
else:
if 'pose' in cfg.test.test_mode.lower():
Eval_trans.evaluate_pose()
if 'add' in cfg.test.test_mode.lower():
Eval_trans.evaluate_pose_add(epoch_save_path)
if 'arp' in cfg.test.test_mode.lower():
Eval_trans.evaluate_pose_arp_2d(epoch_save_path)
bar.finish()
return {'Loss': Loss.avg, 'Loss_rot': Loss_rot.avg, 'Loss_trans': Loss_trans.avg}, preds
| 47.793548 | 174 | 0.592333 |
767877fadf66cdcee14de25694d09ceac5742333 | 9,000 | py | Python | segmentation_models/models/unet.py | AllWashedOut/segmentation_models | af85e3130195d3058f8d78cbdc521be84fdc7b16 | [
"MIT"
] | null | null | null | segmentation_models/models/unet.py | AllWashedOut/segmentation_models | af85e3130195d3058f8d78cbdc521be84fdc7b16 | [
"MIT"
] | null | null | null | segmentation_models/models/unet.py | AllWashedOut/segmentation_models | af85e3130195d3058f8d78cbdc521be84fdc7b16 | [
"MIT"
] | 2 | 2021-04-09T10:31:21.000Z | 2021-09-06T09:38:29.000Z | from keras_applications import get_submodules_from_kwargs
from ._common_blocks import Conv2dBn
from ._utils import freeze_model, filter_keras_submodules
from ..backbones.backbones_factory import Backbones
backend = None
layers = None
models = None
keras_utils = None
# ---------------------------------------------------------------------
# Utility functions
# ---------------------------------------------------------------------
def get_submodules():
return {
'backend': backend,
'models': models,
'layers': layers,
'utils': keras_utils,
}
# ---------------------------------------------------------------------
# Blocks
# ---------------------------------------------------------------------
def Conv3x3BnReLU(filters, use_batchnorm, name=None):
kwargs = get_submodules()
def wrapper(input_tensor):
return Conv2dBn(
filters,
kernel_size=3,
activation='relu',
kernel_initializer='he_uniform',
padding='same',
use_batchnorm=use_batchnorm,
name=name,
**kwargs
)(input_tensor)
return wrapper
def DecoderUpsamplingX2Block(filters, stage, use_batchnorm=False):
up_name = 'decoder_stage{}_upsampling'.format(stage)
conv1_name = 'decoder_stage{}a'.format(stage)
conv2_name = 'decoder_stage{}b'.format(stage)
concat_name = 'decoder_stage{}_concat'.format(stage)
concat_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor, skip=None):
x = layers.UpSampling2D(size=2, name=up_name)(input_tensor)
if skip is not None:
#skip = layers.Dropout(0.3)(skip)
x = layers.Concatenate(axis=concat_axis, name=concat_name)([x, skip])
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv1_name)(x)
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv2_name)(x)
return x
return wrapper
def DecoderTransposeX2Block(filters, stage, use_batchnorm=False):
transp_name = 'decoder_stage{}a_transpose'.format(stage)
bn_name = 'decoder_stage{}a_bn'.format(stage)
relu_name = 'decoder_stage{}a_relu'.format(stage)
conv_block_name = 'decoder_stage{}b'.format(stage)
concat_name = 'decoder_stage{}_concat'.format(stage)
concat_axis = bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def layer(input_tensor, skip=None):
x = layers.Conv2DTranspose(
filters,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
name=transp_name,
use_bias=not use_batchnorm,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
x = layers.Activation('relu', name=relu_name)(x)
if skip is not None:
x = layers.Concatenate(axis=concat_axis, name=concat_name)([x, skip])
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv_block_name)(x)
return x
return layer
# ---------------------------------------------------------------------
# Unet Decoder
# ---------------------------------------------------------------------
def build_unet(
backbone,
decoder_block,
skip_connection_layers,
decoder_filters=(256, 128, 64, 32, 16),
n_upsample_blocks=5,
classes=1,
activation='sigmoid',
use_batchnorm=True,
center_dropout=0.0,
):
input_ = backbone.input
x = backbone.output
# extract skip connections
skips = ([backbone.get_layer(name=i).output if isinstance(i, str)
else backbone.get_layer(index=i).output for i in skip_connection_layers])
# Dropout between encoder/decoder
if center_dropout:
x = layers.Dropout(center_dropout)(x)
# add center block if last encoder operation was maxpooling (for vgg models)
if isinstance(backbone.layers[-1], layers.MaxPooling2D):
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block1')(x)
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block2')(x)
# building decoder blocks
for i in range(n_upsample_blocks):
if i < len(skips):
skip = skips[i]
else:
skip = None
x = decoder_block(decoder_filters[i], stage=i, use_batchnorm=use_batchnorm)(x, skip)
# model head (define number of output classes)
x = layers.Conv2D(
filters=classes,
kernel_size=(3, 3),
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
name='final_conv',
)(x)
x = layers.Activation(activation, name=activation)(x)
# create keras model instance
model = models.Model(input_, x)
return model
# ---------------------------------------------------------------------
# Unet Model
# ---------------------------------------------------------------------
def Unet(
backbone_name='vgg16',
input_shape=(None, None, 3),
classes=1,
activation='sigmoid',
weights=None,
encoder_weights='imagenet',
encoder_freeze=False,
encoder_features='default',
decoder_block_type='upsampling',
decoder_filters=(256, 128, 64, 32, 16),
decoder_use_batchnorm=True,
center_dropout=0.0,
**kwargs
):
""" Unet_ is a fully convolution neural network for image semantic segmentation
Args:
backbone_name: name of classification model (without last dense layers) used as feature
extractor to build segmentation model.
input_shape: shape of input data/image ``(H, W, C)``, in general
case you do not need to set ``H`` and ``W`` shapes, just pass ``(None, None, C)`` to make your model be
able to process images af any size, but ``H`` and ``W`` of input images should be divisible by factor ``32``.
classes: a number of classes for output (output shape - ``(h, w, classes)``).
activation: name of one of ``keras.activations`` for last model layer
(e.g. ``sigmoid``, ``softmax``, ``linear``).
weights: optional, path to model weights.
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
encoder_freeze: if ``True`` set all layers of encoder (backbone model) as non-trainable. If a float, freezes
just that fraction of the encoder layers (starting with the earliest layers)
encoder_features: a list of layer numbers or names starting from top of the model.
Each of these layers will be concatenated with corresponding decoder block. If ``default`` is used
layer names are taken from ``DEFAULT_SKIP_CONNECTIONS``.
decoder_block_type: one of blocks with following layers structure:
- `upsampling`: ``UpSampling2D`` -> ``Conv2D`` -> ``Conv2D``
- `transpose`: ``Transpose2D`` -> ``Conv2D``
decoder_filters: list of numbers of ``Conv2D`` layer filters in decoder blocks
decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
is used.
center_dropout: Dropout fraction to apply at the center block, between encoder and decoder. Default is 0.0 (none).
Returns:
``keras.models.Model``: **Unet**
.. _Unet:
https://arxiv.org/pdf/1505.04597
"""
global backend, layers, models, keras_utils
submodule_args = filter_keras_submodules(kwargs)
backend, layers, models, keras_utils = get_submodules_from_kwargs(submodule_args)
if decoder_block_type == 'upsampling':
decoder_block = DecoderUpsamplingX2Block
elif decoder_block_type == 'transpose':
decoder_block = DecoderTransposeX2Block
else:
raise ValueError('Decoder block type should be in ("upsampling", "transpose"). '
'Got: {}'.format(decoder_block_type))
backbone = Backbones.get_backbone(
backbone_name,
input_shape=input_shape,
weights=encoder_weights,
include_top=False,
**kwargs,
)
if encoder_features == 'default':
encoder_features = Backbones.get_feature_layers(backbone_name, n=4)
model = build_unet(
backbone=backbone,
decoder_block=decoder_block,
skip_connection_layers=encoder_features,
decoder_filters=decoder_filters,
classes=classes,
activation=activation,
n_upsample_blocks=len(decoder_filters),
use_batchnorm=decoder_use_batchnorm,
center_dropout=center_dropout,
)
# lock encoder weights for fine-tuning
if encoder_freeze:
fraction = encoder_freeze if isinstance(encoder_freeze, float) else 1.0
freeze_model(backbone, fraction=fraction, **kwargs)
# loading model weights
if weights is not None:
model.load_weights(weights)
return model
| 34.090909 | 122 | 0.607778 |
6bdc658add03d8cea96ca87f7fbefbb86dc859f2 | 9,968 | py | Python | Lib/code.py | jasonadu/Python-2.5 | 93e24b88564de120b1296165b5c55975fdcb8a3c | [
"PSF-2.0"
] | 69 | 2015-01-16T13:12:55.000Z | 2022-02-14T12:55:27.000Z | Lib/code.py | jasonadu/Python-2.5 | 93e24b88564de120b1296165b5c55975fdcb8a3c | [
"PSF-2.0"
] | 3 | 2019-07-19T18:02:02.000Z | 2021-04-25T06:35:42.000Z | Lib/code.py | jasonadu/Python-2.5 | 93e24b88564de120b1296165b5c55975fdcb8a3c | [
"PSF-2.0"
] | 32 | 2015-02-06T12:10:32.000Z | 2019-06-18T03:21:36.000Z | """Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec code in self.locals
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
raw_input(); a subclass may replace this with a different
implementation.
"""
return raw_input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
if __name__ == '__main__':
import pdb
pdb.run("interact()\n")
| 32.363636 | 87 | 0.607043 |
cf76beb2ad88dab5fe01827897783ef548087649 | 131,881 | py | Python | cinder/tests/test_vmware_vmdk.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/test_vmware_vmdk.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/tests/test_vmware_vmdk.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware VMDK driver.
"""
from distutils import version as ver
import mock
import mox
from oslo_utils import units
from oslo_vmware import api
from oslo_vmware import exceptions
from oslo_vmware import image_transfer
import six
from cinder import exception as cinder_exceptions
from cinder.image import glance
from cinder import test
from cinder.volume import configuration
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import volumeops
class FakeVim(object):
@property
def service_content(self):
return mox.MockAnything()
@property
def client(self):
return mox.MockAnything()
def Login(self, session_manager, userName, password):
return mox.MockAnything()
def Logout(self, session_manager):
pass
def TerminateSession(self, session_manager, sessionId):
pass
def SessionIsActive(self, session_manager, sessionID, userName):
pass
class FakeTaskInfo(object):
def __init__(self, state, result=None):
self.state = state
self.result = result
class FakeError(object):
def __init__(self):
self.localizedMessage = None
self.error = FakeError()
class FakeMor(object):
def __init__(self, type, val):
self._type = type
self.value = val
class FakeObject(object):
def __init__(self):
self._fields = {}
def __setitem__(self, key, value):
self._fields[key] = value
def __getitem__(self, item):
return self._fields[item]
class FakeManagedObjectReference(object):
def __init__(self, lis=None):
self.ManagedObjectReference = lis or []
class FakeDatastoreSummary(object):
def __init__(self, freeSpace, capacity, datastore=None, name=None):
self.freeSpace = freeSpace
self.capacity = capacity
self.datastore = datastore
self.name = name
class FakeSnapshotTree(object):
def __init__(self, tree=None, name=None,
snapshot=None, childSnapshotList=None):
self.rootSnapshotList = tree
self.name = name
self.snapshot = snapshot
self.childSnapshotList = childSnapshotList
class FakeElem(object):
def __init__(self, prop_set=None):
self.propSet = prop_set
class FakeProp(object):
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class FakeRetrieveResult(object):
def __init__(self, objects, token):
self.objects = objects
self.token = token
class FakeObj(object):
def __init__(self, obj=None):
self.obj = obj
# TODO(vbala) Split test methods handling multiple cases into multiple methods,
# each handling a specific case.
class VMwareEsxVmdkDriverTestCase(test.TestCase):
"""Test class for VMwareEsxVmdkDriver."""
IP = 'localhost'
PORT = 443
USERNAME = 'username'
PASSWORD = 'password'
VOLUME_FOLDER = 'cinder-volumes'
API_RETRY_COUNT = 3
TASK_POLL_INTERVAL = 5.0
IMG_TX_TIMEOUT = 10
MAX_OBJECTS = 100
TMP_DIR = "/vmware-tmp"
VMDK_DRIVER = vmdk.VMwareEsxVmdkDriver
def setUp(self):
super(VMwareEsxVmdkDriverTestCase, self).setUp()
self._config = mox.MockObject(configuration.Configuration)
self._config.append_config_values(mox.IgnoreArg())
self._config.vmware_host_ip = self.IP
self._config.vmware_host_username = self.USERNAME
self._config.vmware_host_password = self.PASSWORD
self._config.vmware_wsdl_location = None
self._config.vmware_volume_folder = self.VOLUME_FOLDER
self._config.vmware_api_retry_count = self.API_RETRY_COUNT
self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS
self._config.vmware_tmp_dir = self.TMP_DIR
self._db = mock.Mock()
self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config,
db=self._db)
api_retry_count = self._config.vmware_api_retry_count,
task_poll_interval = self._config.vmware_task_poll_interval,
self._session = api.VMwareAPISession(self.IP, self.USERNAME,
self.PASSWORD, api_retry_count,
task_poll_interval,
create_session=False)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self.MAX_OBJECTS)
self._vim = FakeVim()
def test_do_setup(self):
"""Test do_setup."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.ReplayAll()
self._driver.do_setup(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_check_for_setup_error(self):
"""Test check_for_setup_error."""
self._driver.check_for_setup_error()
def test_get_volume_stats(self):
"""Test get_volume_stats."""
stats = self._driver.get_volume_stats()
self.assertEqual(stats['vendor_name'], 'VMware')
self.assertEqual(stats['driver_version'], self._driver.VERSION)
self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI')
self.assertEqual(stats['reserved_percentage'], 0)
self.assertEqual(stats['total_capacity_gb'], 'unknown')
self.assertEqual(stats['free_capacity_gb'], 'unknown')
def test_create_volume(self):
"""Test create_volume."""
driver = self._driver
host = mock.sentinel.host
rp = mock.sentinel.resource_pool
folder = mock.sentinel.folder
summary = mock.sentinel.summary
driver._select_ds_for_volume = mock.MagicMock()
driver._select_ds_for_volume.return_value = (host, rp, folder,
summary)
# invoke the create_volume call
volume = {'name': 'fake_volume'}
driver.create_volume(volume)
# verify calls made
driver._select_ds_for_volume.assert_called_once_with(volume)
# test create_volume call when _select_ds_for_volume fails
driver._select_ds_for_volume.side_effect = exceptions.VimException('')
self.assertRaises(exceptions.VimFaultException, driver.create_volume,
volume)
# Clear side effects.
driver._select_ds_for_volume.side_effect = None
def test_delete_volume_without_backing(self):
"""Test delete_volume without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing('hello_world').AndReturn(None)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_with_backing(self):
"""Test delete_volume with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
backing = FakeMor('VirtualMachine', 'my_vm')
FakeMor('Task', 'my_task')
m.StubOutWithMock(self._volumeops, 'get_backing')
m.StubOutWithMock(self._volumeops, 'delete_backing')
self._volumeops.get_backing('hello_world').AndReturn(backing)
self._volumeops.delete_backing(backing)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_create_export(self):
"""Test create_export."""
self._driver.create_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_ensure_export(self):
"""Test ensure_export."""
self._driver.ensure_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_remove_export(self):
"""Test remove_export."""
self._driver.remove_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_terminate_connection(self):
"""Test terminate_connection."""
self._driver.terminate_connection(mox.IgnoreArg(), mox.IgnoreArg(),
force=mox.IgnoreArg())
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_select_datastore_summary(self):
"""Test _select_datastore_summary."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
datastore3 = FakeMor('Datastore', 'my_ds_3')
datastore4 = FakeMor('Datastore', 'my_ds_4')
datastores = [datastore1, datastore2, datastore3, datastore4]
m.StubOutWithMock(self._volumeops, 'get_summary')
summary1 = FakeDatastoreSummary(5, 100)
summary2 = FakeDatastoreSummary(25, 100)
summary3 = FakeDatastoreSummary(50, 100)
summary4 = FakeDatastoreSummary(75, 100)
self._volumeops.get_summary(
datastore1).MultipleTimes().AndReturn(summary1)
self._volumeops.get_summary(
datastore2).MultipleTimes().AndReturn(summary2)
self._volumeops.get_summary(
datastore3).MultipleTimes().AndReturn(summary3)
self._volumeops.get_summary(
datastore4).MultipleTimes().AndReturn(summary4)
m.StubOutWithMock(self._volumeops, 'get_connected_hosts')
host1 = FakeMor('HostSystem', 'my_host_1')
host2 = FakeMor('HostSystem', 'my_host_2')
host3 = FakeMor('HostSystem', 'my_host_3')
host4 = FakeMor('HostSystem', 'my_host_4')
self._volumeops.get_connected_hosts(
datastore1).MultipleTimes().AndReturn([host1, host2, host3, host4])
self._volumeops.get_connected_hosts(
datastore2).MultipleTimes().AndReturn([host1, host2, host3])
self._volumeops.get_connected_hosts(
datastore3).MultipleTimes().AndReturn([host1, host2])
self._volumeops.get_connected_hosts(
datastore4).MultipleTimes().AndReturn([host1, host2])
m.ReplayAll()
summary = self._driver._select_datastore_summary(1, datastores)
self.assertEqual(summary, summary1)
summary = self._driver._select_datastore_summary(10, datastores)
self.assertEqual(summary, summary2)
summary = self._driver._select_datastore_summary(40, datastores)
self.assertEqual(summary, summary4)
self.assertRaises(exceptions.VimException,
self._driver._select_datastore_summary,
100, datastores)
m.UnsetStubs()
m.VerifyAll()
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = mock.sentinel.datastores
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
driver._get_storage_profile.assert_called_once_with(volume)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size, dss)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_disk_type(self, get_volume_type_extra_specs):
"""Test _get_disk_type."""
# Test with no volume type.
volume = {'volume_type_id': None}
self.assertEqual(vmdk.THIN_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
# Test with valid vmdk_type.
volume_type_id = mock.sentinel.volume_type_id
volume = {'volume_type_id': volume_type_id}
get_volume_type_extra_specs.return_value = vmdk.THICK_VMDK_TYPE
self.assertEqual(vmdk.THICK_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
get_volume_type_extra_specs.assert_called_once_with(volume_type_id,
'vmware:vmdk_type')
# Test with invalid vmdk_type.
get_volume_type_extra_specs.return_value = 'sparse'
self.assertRaises(vmdk_exceptions.InvalidDiskTypeException,
vmdk.VMwareEsxVmdkDriver._get_disk_type,
volume)
def test_create_snapshot_without_backing(self):
"""Test vmdk.create_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_with_backing(self):
"""Test vmdk.create_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snapshot_name'
snapshot['display_description'] = 'snapshot_desc'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'create_snapshot')
self._volumeops.create_snapshot(backing, snapshot['name'],
snapshot['display_description'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_when_attached(self):
"""Test vmdk.create_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(cinder_exceptions.InvalidVolume,
self._driver.create_snapshot, snapshot)
def test_delete_snapshot_without_backing(self):
"""Test delete_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_with_backing(self):
"""Test delete_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['name'] = 'snapshot_name'
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'delete_snapshot')
self._volumeops.delete_snapshot(backing,
snapshot['name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_when_attached(self):
"""Test delete_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(cinder_exceptions.InvalidVolume,
self._driver.delete_snapshot, snapshot)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = mock.sentinel.volume
fake_size = 1
src_vref = {'name': 'src_snapshot_name', 'size': fake_size}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
src_vmdk = "[datastore] src_vm/src_vm.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
mock_vops.get_vmdk_path.assert_called_once_with(backing)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
_extend_virtual_disk)
def _test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
"""Test _create_backing_by_copying."""
fake_volume = {'size': 2, 'name': 'fake_volume-0000000000001'}
fake_size = 1
fake_src_vmdk_path = "[datastore] src_vm/src_vm.vmdk"
fake_backing = mock.sentinel.backing
fake_vmdk_path = mock.sentinel.path
# "[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
create_backing.return_value = fake_backing
volumeops.get_vmdk_path.return_value = fake_vmdk_path
volumeops.get_dc.return_value = fake_dc
# Test with fake_volume['size'] greater than fake_size
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
create_backing.assert_called_once_with(fake_volume)
volumeops.get_vmdk_path.assert_called_once_with(fake_backing)
volumeops.get_dc.assert_called_once_with(fake_backing)
volumeops.delete_vmdk_file.assert_called_once_with(fake_vmdk_path,
fake_dc)
volumeops.copy_vmdk_file.assert_called_once_with(fake_dc,
fake_src_vmdk_path,
fake_vmdk_path)
_extend_virtual_disk.assert_called_once_with(fake_volume['size'],
fake_vmdk_path,
fake_dc)
# Reset all the mocks and test with fake_volume['size']
# not greater than fake_size
_extend_virtual_disk.reset_mock()
fake_size = 2
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
self.assertFalse(_extend_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 1}
fake_size = snapshot['volume_size']
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
src_vmdk = "[datastore] src_vm/src_vm-001.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
mock_vops.get_vmdk_path.assert_called_once_with(snap_moref)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
def _test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
fake_name = u'volume-00000001'
new_size = '21'
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = mock.sentinel.datastore
fake_summary.name = 'fake_name'
fake_backing = mock.sentinel.backing
volume_ops.get_backing.return_value = fake_backing
# If there is enough space in the datastore, where the volume is
# located, then the rest of this method will not be called.
self._driver.extend_volume(fake_vol, new_size)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
self.assertFalse(_select_ds_for_volume.called)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
# If there is not enough space in the datastore, where the volume is
# located, then the rest of this method will be called. The first time
# _extend_virtual_disk is called, VimFaultException is raised. The
# second time it is called, there is no exception.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [exceptions.
VimFaultException([],
'Error'), None]
# When _select_ds_for_volume raises no exception.
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
self._driver.extend_volume(fake_vol, new_size)
_select_ds_for_volume.assert_called_with(new_size)
volume_ops.get_backing.assert_called_with(fake_name)
volume_ops.relocate_backing.assert_called_with(fake_backing,
fake_summary.datastore,
fake_rp,
fake_host)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
volume_ops.move_backing_to_folder.assert_called_with(fake_backing,
fake_folder)
# If get_backing raises error_util.VimException,
# this exception will be caught for volume extend.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [exceptions.
VimFaultException([],
'Error'), None]
volume_ops.get_backing.side_effect = exceptions.VimException('Error')
self.assertRaises(exceptions.VimException, self._driver.extend_volume,
fake_vol, new_size)
# If _select_ds_for_volume raised an exception, the rest code will
# not be called.
_extend_virtual_disk.reset_mock()
volume_ops.get_backing.reset_mock()
volume_ops.relocate_backing.reset_mock()
volume_ops.move_backing_to_folder.reset_mock()
_extend_virtual_disk.side_effect = [exceptions.
VimFaultException([],
'Error'), None]
_select_ds_for_volume.side_effect = exceptions.VimException('Error')
self.assertRaises(exceptions.VimException, self._driver.extend_volume,
fake_vol, new_size)
_extend_virtual_disk.assert_called_once_with(fake_name, new_size)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
def test_copy_image_to_volume_non_vmdk(self):
"""Test copy_image_to_volume for a non-vmdk disk format."""
fake_context = mock.sentinel.context
fake_image_id = 'image-123456789'
fake_image_meta = {'disk_format': 'novmdk'}
image_service = mock.Mock()
image_service.show.return_value = fake_image_meta
fake_volume = {'name': 'fake_name', 'size': 1}
self.assertRaises(cinder_exceptions.ImageUnacceptable,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
def test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
self._test_copy_image_to_volume_non_stream_optimized(
create_backing,
get_ds_name_folder_path,
get_disk_type,
create_disk_from_sparse_image,
create_disk_from_preallocated_image,
vops,
select_ds_for_volume,
generate_uuid,
extend_disk)
def _test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
image_size_in_bytes = 2 * units.Gi
adapter_type = 'lsiLogic'
image_meta = {'disk_format': 'vmdk',
'size': image_size_in_bytes,
'properties': {'vmware_disktype': 'sparse',
'vmwware_adaptertype': adapter_type}}
image_service = mock.Mock(glance.GlanceImageService)
image_service.show.return_value = image_meta
backing = mock.Mock()
def create_backing_mock(volume, create_params):
self.assertTrue(create_params[vmdk.CREATE_PARAM_DISK_LESS])
return backing
create_backing.side_effect = create_backing_mock
ds_name = mock.Mock()
folder_path = mock.Mock()
get_ds_name_folder_path.return_value = (ds_name, folder_path)
summary = mock.Mock()
select_ds_for_volume.return_value = (mock.sentinel.host,
mock.sentinel.rp,
mock.sentinel.folder,
summary)
uuid = "6b77b25a-9136-470e-899e-3c930e570d8e"
generate_uuid.return_value = uuid
host = mock.Mock()
dc_ref = mock.Mock()
vops.get_host.return_value = host
vops.get_dc.return_value = dc_ref
disk_type = vmdk.EAGER_ZEROED_THICK_VMDK_TYPE
get_disk_type.return_value = disk_type
path = mock.Mock()
create_disk_from_sparse_image.return_value = path
create_disk_from_preallocated_image.return_value = path
volume_size = 2
vops.get_disk_size.return_value = volume_size * units.Gi
context = mock.Mock()
volume = {'name': 'volume_name',
'id': 'volume_id',
'size': volume_size}
image_id = mock.Mock()
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True,
vmdk.CREATE_PARAM_BACKING_NAME: uuid}
create_backing.assert_called_once_with(volume,
create_params=create_params)
create_disk_from_sparse_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, uuid)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, path.get_descriptor_ds_file_path())
select_ds_for_volume.assert_called_once_with(volume)
vops.clone_backing.assert_called_once_with(
volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
vops.delete_backing.assert_called_once_with(backing)
self.assertFalse(extend_disk.called)
vops.get_disk_size.return_value = 1 * units.Gi
create_backing.reset_mock()
vops.attach_disk_to_backing.reset_mock()
vops.delete_backing.reset_mock()
image_meta['properties']['vmware_disktype'] = 'preallocated'
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
del create_params[vmdk.CREATE_PARAM_BACKING_NAME]
create_backing.assert_called_once_with(volume,
create_params=create_params)
create_disk_from_preallocated_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, volume['name'], adapter_type)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, path.get_descriptor_ds_file_path())
extend_disk.assert_called_once_with(volume['name'], volume['size'])
extend_disk.reset_mock()
create_disk_from_preallocated_image.side_effect = (
exceptions.VimException("Error"))
self.assertRaises(exceptions.VimException,
self._driver.copy_image_to_volume,
context, volume, image_service, image_id)
vops.delete_backing.assert_called_once_with(backing)
self.assertFalse(extend_disk.called)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
self._test_create_virtual_disk_from_preallocated_image(
vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk)
def _test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_ds_name = "nfs"
dest_folder_path = "A/B/"
dest_disk_name = "disk-1"
adapter_type = "ide"
dc_ref = mock.sentinel.dc_ref
ds_name = "local-0"
folder_path = "cinder_temp"
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
path = mock.Mock()
dest_path = mock.Mock()
flat_extent_path.side_effect = [path, dest_path]
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, path.get_flat_extent_file_path())
copy_temp_virtual_disk.assert_called_once_with(dc_ref, path,
dest_dc_ref, dest_path)
self.assertEqual(dest_path, ret)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
self._test_create_virtual_disk_from_preallocated_image_with_no_copy(
vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk)
def _test_create_virtual_disk_from_preallocated_image_with_no_copy(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.Mock(value=mock.sentinel.dest_dc_ref)
dest_ds_name = "nfs"
dest_folder_path = "A/B/"
dest_disk_name = "disk-1"
adapter_type = "ide"
dc_ref = mock.Mock(value=mock.sentinel.dest_dc_ref)
ds_name = dest_ds_name
folder_path = "cinder_temp"
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
path = mock.Mock()
flat_extent_path.return_value = path
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, path.get_flat_extent_file_path())
self.assertFalse(copy_temp_virtual_disk.called)
self.assertEqual(path, ret)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_copy_error(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
self._test_create_virtual_disk_from_preallocated_image_with_copy_error(
vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk)
def _test_create_virtual_disk_from_preallocated_image_with_copy_error(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dest_dc_ref = mock.sentinel.dest_dc_ref
dest_ds_name = "nfs"
dest_folder_path = "A/B/"
dest_disk_name = "disk-1"
adapter_type = "ide"
dc_ref = mock.sentinel.dc_ref
ds_name = "local-0"
folder_path = "cinder_temp"
get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path)
path = mock.Mock()
dest_path = mock.Mock()
flat_extent_path.side_effect = [path, dest_path]
copy_image.side_effect = exceptions.VimException("error")
self.assertRaises(
exceptions.VimException,
self._driver._create_virtual_disk_from_preallocated_image,
context, image_service, image_id, image_size_in_bytes, dest_dc_ref,
dest_ds_name, dest_folder_path, dest_disk_name, adapter_type)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, path.get_flat_extent_file_path())
vops.delete_file.assert_called_once_with(
path.get_descriptor_ds_file_path(), dc_ref)
self.assertFalse(copy_temp_virtual_disk.called)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
def test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
self._test_create_virtual_disk_from_sparse_image(
copy_image, copy_temp_virtual_disk, flat_extent_path, sparse_path)
def _test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dc_ref = mock.Mock()
ds_name = "nfs"
folder_path = "A/B/"
disk_name = "disk-1"
src_path = mock.Mock()
sparse_path.return_value = src_path
dest_path = mock.Mock()
flat_extent_path.return_value = dest_path
ret = self._driver._create_virtual_disk_from_sparse_image(
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, src_path.get_descriptor_file_path())
copy_temp_virtual_disk.assert_called_once_with(
dc_ref, src_path, dc_ref, dest_path)
self.assertEqual(dest_path, ret)
@mock.patch.object(image_transfer, 'download_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self,
volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
download_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
download_image)
def _test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
download_image):
fake_context = mock.Mock()
fake_backing = mock.sentinel.backing
fake_image_id = 'image-id'
size = 5 * units.Gi
size_gb = float(size) / units.Gi
fake_volume_size = 1 + size_gb
adapter_type = 'ide'
fake_image_meta = {'disk_format': 'vmdk', 'size': size,
'container_format': 'bare',
'properties': {'vmware_disktype': 'streamOptimized',
'vmware_adaptertype': adapter_type}}
image_service = mock.Mock(glance.GlanceImageService)
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.sentinel.summary
fake_summary.name = "datastore-1"
fake_vm_create_spec = mock.sentinel.spec
fake_disk_type = 'thin'
vol_name = 'fake_volume name'
vol_id = '12345'
fake_volume = {'name': vol_name,
'id': vol_id,
'size': fake_volume_size,
'volume_type_id': None}
cf = session.vim.client.factory
vm_import_spec = cf.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = fake_vm_create_spec
timeout = self._config.vmware_image_transfer_timeout_secs
image_service.show.return_value = fake_image_meta
volumeops.get_create_spec.return_value = fake_vm_create_spec
volumeops.get_backing.return_value = fake_backing
# If _select_ds_for_volume raises an exception, get_create_spec
# will not be called.
_select_ds_for_volume.side_effect = exceptions.VimException('Error')
self.assertRaises(cinder_exceptions.VolumeBackendAPIException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(volumeops.get_create_spec.called)
# If the volume size is greater then than the backing's disk size,
# _extend_vmdk_virtual_disk will be called.
_select_ds_for_volume.side_effect = None
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
profile_id = 'profile-1'
get_profile_id.return_value = profile_id
volumeops.get_disk_size.return_value = size
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
image_service.show.assert_called_with(fake_context, fake_image_id)
_select_ds_for_volume.assert_called_with(fake_volume)
get_profile_id.assert_called_once_with(fake_volume)
volumeops.get_create_spec.assert_called_with(fake_volume['name'],
0,
fake_disk_type,
fake_summary.name,
profile_id,
adapter_type)
self.assertTrue(download_image.called)
download_image.assert_called_with(fake_context, timeout,
image_service,
fake_image_id,
session=session,
host=self.IP,
port=self.PORT,
resource_pool=fake_rp,
vm_folder=fake_folder,
vm_import_spec=vm_import_spec,
image_size=size)
_extend_virtual_disk.assert_called_once_with(fake_volume['name'],
fake_volume_size)
# If the volume size is not greater then than backing's disk size,
# _extend_vmdk_virtual_disk will not be called.
volumeops.get_disk_size.return_value = fake_volume_size * units.Gi
_extend_virtual_disk.reset_mock()
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(_extend_virtual_disk.called)
# If fetch_stream_optimized_image raises an exception,
# get_backing and delete_backing will be called.
download_image.side_effect = exceptions.VimException('error')
self.assertRaises(exceptions.VimException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
volumeops.get_backing.assert_called_with(fake_volume['name'])
volumeops.delete_backing.assert_called_with(fake_backing)
self.assertFalse(_extend_virtual_disk.called)
def test_copy_volume_to_image_non_vmdk(self):
"""Test copy_volume_to_image for a non-vmdk disk format."""
m = self.mox
image_meta = FakeObject()
image_meta['disk_format'] = 'novmdk'
volume = FakeObject()
volume['name'] = 'vol-name'
volume['volume_attachment'] = None
m.ReplayAll()
self.assertRaises(cinder_exceptions.ImageUnacceptable,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_when_attached(self):
"""Test copy_volume_to_image when volume is attached."""
m = self.mox
volume = FakeObject()
volume['volume_attachment'] = [mock.sentinel.volume_attachment]
m.ReplayAll()
self.assertRaises(cinder_exceptions.InvalidVolume,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_vmdk(self):
"""Test copy_volume_to_image for a valid vmdk disk format."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
image_id = 'image-id-1'
image_meta = FakeObject()
image_meta['disk_format'] = 'vmdk'
image_meta['id'] = image_id
image_meta['name'] = image_id
image_meta['is_public'] = True
image_service = FakeObject()
vol_name = 'volume-123456789'
project_id = 'project-owner-id-123'
volume = FakeObject()
volume['name'] = vol_name
size_gb = 5
size = size_gb * units.Gi
volume['size'] = size_gb
volume['project_id'] = project_id
volume['volume_attachment'] = None
# volumeops.get_backing
backing = FakeMor("VirtualMachine", "my_vm")
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing(vol_name).AndReturn(backing)
# volumeops.get_vmdk_path
datastore_name = 'datastore1'
file_path = 'my_folder/my_nested_folder/my_vm.vmdk'
vmdk_file_path = '[%s] %s' % (datastore_name, file_path)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
self._volumeops.get_vmdk_path(backing).AndReturn(vmdk_file_path)
# vmware_images.upload_image
timeout = self._config.vmware_image_transfer_timeout_secs
host_ip = self.IP
m.StubOutWithMock(image_transfer, 'upload_image')
image_transfer.upload_image(mox.IgnoreArg(),
timeout,
image_service,
image_id,
project_id,
session=self._session,
host=host_ip,
port=self.PORT,
vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=size,
image_name=image_id,
image_version=1,
is_public=True)
m.ReplayAll()
self._driver.copy_volume_to_image(mox.IgnoreArg(), volume,
image_service, image_meta)
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
def test_in_use(self):
# Test with in-use volume.
vol = {'size': 1, 'status': 'in-use', 'name': 'vol-1',
'volume_type_id': 'def'}
vol['volume_attachment'] = [mock.sentinel.volume_attachment]
self.assertTrue(self._driver._in_use(vol))
# Test with available volume.
vol['status'] = 'available'
vol['volume_attachment'] = None
self.assertFalse(self._driver._in_use(vol))
vol['volume_attachment'] = []
self.assertFalse(self._driver._in_use(vol))
def _test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, genereate_uuid,
delete_temp_backing):
self._driver._storage_policy_enabled = True
context = mock.sentinel.context
diff = mock.sentinel.diff
host = mock.sentinel.host
new_type = {'id': 'abc'}
# Test with in-use volume.
vol = {'size': 1, 'status': 'retyping', 'name': 'vol-1',
'volume_type_id': 'def'}
vol['volume_attachment'] = [mock.sentinel.volume_attachment]
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no backing.
vops.get_backing.return_value = None
vol['volume_attachment'] = None
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, no profile change and
# compliant datastore.
ds_value = mock.sentinel.datastore_value
datastore = mock.Mock(value=ds_value)
vops.get_datastore.return_value = datastore
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
None,
None]
ds_sel.is_datastore_compliant.return_value = True
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, profile change and
# compliant datastore.
new_profile = mock.sentinel.new_profile
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
ds_sel.is_datastore_compliant.return_value = True
profile_id = mock.sentinel.profile_id
ds_sel.get_profile_id.return_value = profile_id
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Test with disk type conversion, profile change and a backing with
# snapshots. Also test the no candidate datastore case.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
ds_sel.select_datastore.return_value = ()
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: new_profile,
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
# Modify the previous case with a candidate datastore which is
# different than the backing's current datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
host = mock.sentinel.host
rp = mock.sentinel.rp
candidate_ds = mock.Mock(value=mock.sentinel.candidate_ds_value)
summary = mock.Mock(datastore=candidate_ds)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Modify the previous case with no profile change.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
'gold-1']
ds_sel.select_datastore.reset_mock()
vops.relocate_backing.reset_mock()
vops.move_backing_to_folder.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: 'gold-1',
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
self.assertFalse(vops.change_backing_profile.called)
# Test with disk type conversion, profile change, backing with
# no snapshots and candidate datastore which is same as the backing
# datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = False
summary.datastore = datastore
uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc'
genereate_uuid.return_value = uuid
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.rename_backing.assert_called_once_with(backing, uuid)
vops.clone_backing.assert_called_once_with(
vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, vmdk.THIN_VMDK_TYPE, host, rp)
delete_temp_backing.assert_called_once_with(backing)
vops.change_backing_profile.assert_called_once_with(clone,
profile_id)
# Modify the previous case with exception during clone.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.clone_backing.side_effect = exceptions.VimException('error')
vops.rename_backing.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertRaises(
exceptions.VimException, self._driver.retype, context, vol,
new_type, diff, host)
exp_rename_calls = [mock.call(backing, uuid),
mock.call(backing, vol['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
self.assertFalse(vops.change_backing_profile.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
def _test_extend_vmdk_virtual_disk(self, volume_ops):
fake_backing = mock.sentinel.backing
fake_vmdk_path = "[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
fake_name = 'fake_name'
fake_size = 7
# If the backing is None, get_vmdk_path and get_dc
# will not be called
volume_ops.get_backing.return_value = None
volume_ops.get_vmdk_path.return_value = fake_vmdk_path
volume_ops.get_dc.return_value = fake_dc
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_backing.assert_called_once_with(fake_name)
self.assertFalse(volume_ops.get_vmdk_path.called)
self.assertFalse(volume_ops.get_dc.called)
self.assertFalse(volume_ops.extend_virtual_disk.called)
# Reset the mock and set the backing with a fake,
# all the mocks should be called.
volume_ops.get_backing.reset_mock()
volume_ops.get_backing.return_value = fake_backing
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_vmdk_path.assert_called_once_with(fake_backing)
volume_ops.get_dc.assert_called_once_with(fake_backing)
volume_ops.extend_virtual_disk.assert_called_once_with(fake_size,
fake_vmdk_path,
fake_dc)
# Test the exceptional case for extend_virtual_disk
volume_ops.extend_virtual_disk.side_effect = exceptions.VimException(
'VimException raised.')
self.assertRaises(exceptions.VimException,
self._driver._extend_vmdk_virtual_disk,
fake_name, fake_size)
@mock.patch.object(image_transfer, 'copy_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk)
def _test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
self._db.volume_get.return_value = volume
vops.get_backing.return_value = None
backing = mock.sentinel.backing
create_backing.return_value = backing
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
context = mock.sentinel.context
backup = {'id': 2, 'volume_id': 1}
backup_service = mock.Mock()
self._driver.backup_volume(context, backup, backup_service)
create_backing.assert_called_once_with(volume)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
self.assertEqual(mock.call(tmp_file_path, "wb"),
file_open.call_args_list[0])
copy_disk.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, port=self.PORT, vm=backing, vmdk_file_path=vmdk_path,
vmdk_size=volume['size'] * units.Gi)
self.assertEqual(mock.call(tmp_file_path, "rb"),
file_open.call_args_list[1])
backup_service.backup.assert_called_once_with(backup, tmp_file)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
def _test_restore_backup(
self, vops, generate_uuid, temporary_file, file_open,
restore_backing, extend_volume):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backup = {'id': 2, 'size': 1}
context = mock.sentinel.context
backup_service = mock.Mock()
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.snapshot_exists.return_value = True
self.assertRaises(
cinder_exceptions.InvalidVolume, self._driver.restore_backup,
context, backup, volume, backup_service)
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vops.snapshot_exists.return_value = False
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
self.assertFalse(extend_volume.called)
temporary_file.reset_mock()
file_open.reset_mock()
backup_service.reset_mock()
restore_backing.reset_mock()
volume = {'name': 'vol-1', 'id': 1, 'size': 2}
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
extend_volume.assert_called_once_with(volume, volume['size'])
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
def _test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
src_uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = src_uuid
src = mock.sentinel.src
create_backing.return_value = src
summary = mock.Mock()
summary.datastore = mock.sentinel.datastore
select_ds.return_value = (mock.sentinel.host, mock.sentinel.rp,
mock.ANY, summary)
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
context = mock.sentinel.context
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backing = None
tmp_file_path = mock.sentinel.tmp_file_path
backup_size = units.Gi
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
delete_temp_backing.assert_called_once_with(src)
create_backing.reset_mock()
vops.clone_backing.reset_mock()
delete_temp_backing.reset_mock()
dest_uuid = "de4b0708-f947-4abe-98f8-75e52ce03b7b"
tmp_uuid = "82c2a4f0-9064-4d95-bd88-6567a36018fa"
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
dest = mock.sentinel.dest
vops.clone_backing.return_value = dest
backing = mock.sentinel.backing
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host, mock.sentinel.rp)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(backing), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
delete_temp_backing.reset_mock()
vops.rename_backing.reset_mock()
def vops_rename(backing, new_name):
if backing == dest and new_name == volume['name']:
raise exceptions.VimException("error")
vops.rename_backing.side_effect = vops_rename
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
self.assertRaises(
exceptions.VimException, self._driver._restore_backing, context,
volume, backing, tmp_file_path, backup_size)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name']),
mock.call(backing, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(dest), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(image_transfer, 'download_stream_optimized_data')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, download_data, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, download_data, delete_temp_backing)
def _test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, download_data, delete_temp_backing):
rp = mock.sentinel.rp
folder = mock.sentinel.folder
summary = mock.Mock()
summary.name = mock.sentinel.name
select_ds.return_value = (mock.ANY, rp, folder, summary)
import_spec = mock.Mock()
session.vim.client.factory.create.return_value = import_spec
profile_id = 'profile-1'
get_storage_profile_id.return_value = profile_id
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
create_spec = mock.Mock()
vops.get_create_spec.return_value = create_spec
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vm_ref = mock.sentinel.vm_ref
download_data.return_value = vm_ref
context = mock.sentinel.context
name = 'vm-1'
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
tmp_file_path = mock.sentinel.tmp_file_path
file_size_bytes = units.Gi
ret = self._driver._create_backing_from_stream_optimized_file(
context, name, volume, tmp_file_path, file_size_bytes)
self.assertEqual(vm_ref, ret)
vops.get_create_spec.assert_called_once_with(
name, 0, disk_type, summary.name, profile_id)
file_open.assert_called_once_with(tmp_file_path, "rb")
download_data.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, port=self.PORT, resource_pool=rp, vm_folder=folder,
vm_import_spec=import_spec, image_size=file_size_bytes)
download_data.side_effect = exceptions.VimException("error")
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
self.assertRaises(
exceptions.VimException,
self._driver._create_backing_from_stream_optimized_file,
context, name, volume, tmp_file_path, file_size_bytes)
delete_temp_backing.assert_called_once_with(backing)
class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
"""Test class for VMwareVcVmdkDriver."""
VMDK_DRIVER = vmdk.VMwareVcVmdkDriver
DEFAULT_VC_VERSION = '5.5'
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
self._config.vmware_host_version = self.DEFAULT_VC_VERSION
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config,
db=self._db)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_get_vc_version(self, session):
# test config overrides fetching from VC server
version = self._driver._get_vc_version()
self.assertEqual(ver.LooseVersion(self.DEFAULT_VC_VERSION), version)
# explicitly remove config entry
self._driver.configuration.vmware_host_version = None
session.return_value.vim.service_content.about.version = '6.0.1'
version = self._driver._get_vc_version()
self.assertEqual(ver.LooseVersion('6.0.1'), version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup_with_pbm_disabled(self, session, get_vc_version):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
get_vc_version.return_value = ver.LooseVersion('5.0')
self._driver.do_setup(mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
@mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version,
get_pbm_wsdl_location):
vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = None
self.assertRaises(exceptions.VMwareDriverException,
self._driver.do_setup,
mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(
six.text_type(vc_version))
@mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl'
self._driver.do_setup(mock.ANY)
self.assertTrue(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(
six.text_type(vc_version))
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
extend_virtual_disk)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
def test_select_ds_for_volume(self, get_volume_group_folder, vops, ds_sel,
get_storage_profile):
profile = mock.sentinel.profile
get_storage_profile.return_value = profile
host_ref = mock.sentinel.host_ref
rp = mock.sentinel.rp
summary = mock.sentinel.summary
ds_sel.select_datastore.return_value = (host_ref, rp, summary)
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
host = mock.sentinel.host
vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1,
'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0'}
ret = self._driver._select_ds_for_volume(vol, host)
self.assertEqual((host_ref, rp, folder, summary), ret)
exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}
ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=[host])
vops.get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(dc)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
def test_select_ds_for_volume_with_no_host(
self, get_volume_group_folder, vops, ds_sel, get_storage_profile):
profile = mock.sentinel.profile
get_storage_profile.return_value = profile
host_ref = mock.sentinel.host_ref
rp = mock.sentinel.rp
summary = mock.sentinel.summary
ds_sel.select_datastore.return_value = (host_ref, rp, summary)
dc = mock.sentinel.dc
vops.get_dc.return_value = dc
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1,
'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0'}
ret = self._driver._select_ds_for_volume(vol)
self.assertEqual((host_ref, rp, folder, summary), ret)
exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}
ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None)
vops.get_dc.assert_called_once_with(rp)
get_volume_group_folder.assert_called_once_with(dc)
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_select_ds_for_volume_with_no_best_candidate(
self, ds_sel, get_storage_profile):
profile = mock.sentinel.profile
get_storage_profile.return_value = profile
ds_sel.select_datastore.return_value = ()
vol = {'id': 'c1037b23-c5e9-4446-815f-3e097cbf5bb0', 'size': 1,
'name': 'vol-c1037b23-c5e9-4446-815f-3e097cbf5bb0'}
self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
self._driver._select_ds_for_volume, vol)
exp_req = {hub.DatastoreSelector.SIZE_BYTES: units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}
ds_sel.select_datastore.assert_called_once_with(exp_req, hosts=None)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_relocate_backing')
def test_initialize_connection_with_instance_and_backing(
self, relocate_backing, vops):
instance = mock.sentinel.instance
connector = {'instance': instance}
backing = mock.Mock(value=mock.sentinel.backing_value)
vops.get_backing.return_value = backing
host = mock.sentinel.host
vops.get_host.return_value = host
volume = {'name': 'vol-1', 'id': 1}
conn_info = self._driver.initialize_connection(volume, connector)
relocate_backing.assert_called_once_with(volume, backing, host)
self.assertEqual('vmdk', conn_info['driver_volume_type'])
self.assertEqual(backing.value, conn_info['data']['volume'])
self.assertEqual(volume['id'],
conn_info['data']['volume_id'])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_relocate_backing')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
def test_initialize_connection_with_instance_and_no_backing(
self, create_backing, relocate_backing, vops):
instance = mock.sentinel.instance
connector = {'instance': instance}
vops.get_backing.return_value = None
host = mock.sentinel.host
vops.get_host.return_value = host
backing = mock.Mock(value=mock.sentinel.backing_value)
create_backing.return_value = backing
volume = {'name': 'vol-1', 'id': 1}
conn_info = self._driver.initialize_connection(volume, connector)
create_backing.assert_called_once_with(volume, host)
self.assertFalse(relocate_backing.called)
self.assertEqual('vmdk', conn_info['driver_volume_type'])
self.assertEqual(backing.value, conn_info['data']['volume'])
self.assertEqual(volume['id'],
conn_info['data']['volume_id'])
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_relocate_backing')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
def test_initialize_connection_with_no_instance_and_no_backing(
self, create_backing, relocate_backing, vops):
vops.get_backing.return_value = None
host = mock.sentinel.host
vops.get_host.return_value = host
backing = mock.Mock(value=mock.sentinel.backing_value)
create_backing.return_value = backing
connector = {}
volume = {'name': 'vol-1', 'id': 1}
conn_info = self._driver.initialize_connection(volume, connector)
create_backing.assert_called_once_with(volume)
self.assertFalse(relocate_backing.called)
self.assertEqual('vmdk', conn_info['driver_volume_type'])
self.assertEqual(backing.value, conn_info['data']['volume'])
self.assertEqual(volume['id'],
conn_info['data']['volume_id'])
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.StubOutWithMock(self._volumeops, 'create_folder')
self._volumeops.create_folder(mox.IgnoreArg(),
self._config.vmware_volume_folder)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_linked(self, volume_ops, _extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - linked."""
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name',
'name': 'snapshot_name',
'volume_size': 2}
fake_type = volumeops.LINKED_CLONE_TYPE
fake_backing = mock.sentinel.backing
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
fake_type,
None,
host=None,
resource_pool=None)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_full(self, volume_ops, _select_ds_for_volume,
_extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - full."""
fake_host = mock.sentinel.host
fake_backing = mock.sentinel.backing
fake_folder = mock.sentinel.folder
fake_datastore = mock.sentinel.datastore
fake_resource_pool = mock.sentinel.resourcePool
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = fake_datastore
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name',
'volume_size': 2}
_select_ds_for_volume.return_value = (fake_host,
fake_resource_pool,
fake_folder, fake_summary)
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
_select_ds_for_volume.assert_called_with(fake_volume)
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_datastore,
host=fake_host,
resource_pool=
fake_resource_pool)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 2}
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing.assert_called_once_with(volume,
backing,
snap_moref,
default_clone_type,
snapshot['volume_size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with clone type - full."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name', 'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
driver._clone_backing.assert_called_once_with(volume,
backing,
None,
default_clone_type,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_with_backing(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume with clone type - linked."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'available',
'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
driver._clone_backing = mock.MagicMock()
mock_vops.create_snapshot = mock.MagicMock()
mock_vops.create_snapshot.return_value = mock.sentinel.snapshot
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
name = 'snapshot-%s' % volume['id']
mock_vops.create_snapshot.assert_called_once_with(backing, name, None)
driver._clone_backing.assert_called_once_with(volume,
backing,
mock.sentinel.snapshot,
linked_clone,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_when_attached(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume linked clone when volume is attached."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'in-use'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
# invoke the create_volume_from_snapshot api
self.assertRaises(cinder_exceptions.InvalidVolume,
driver.create_cloned_volume,
volume,
src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_storage_profile(self, get_volume_type_extra_specs):
"""Test vmdk _get_storage_profile."""
# volume with no type id returns None
volume = FakeObject()
volume['volume_type_id'] = None
sp = self._driver._get_storage_profile(volume)
self.assertEqual(None, sp, "Without a volume_type_id no storage "
"profile should be returned.")
# profile associated with the volume type should be returned
fake_id = 'fake_volume_id'
volume['volume_type_id'] = fake_id
get_volume_type_extra_specs.return_value = 'fake_profile'
profile = self._driver._get_storage_profile(volume)
self.assertEqual('fake_profile', profile)
spec_key = 'vmware:storage_profile'
get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key)
# None should be returned when no storage profile is
# associated with the volume type
get_volume_type_extra_specs.return_value = False
profile = self._driver._get_storage_profile(volume)
self.assertIsNone(profile)
@mock.patch('oslo_vmware.pbm.convert_datastores_to_hubs')
@mock.patch('oslo_vmware.pbm.get_profile_id_by_name')
@mock.patch('oslo_vmware.pbm.filter_hubs_by_profile')
@mock.patch('oslo_vmware.pbm.filter_datastores_by_hubs')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_filter_ds_by_profile(self, session, filter_dss, filter_hubs,
get_profile_id, dss_to_hubs):
"""Test vmdk _filter_ds_by_profile() method."""
session = session.return_value
# Test with no profile id
datastores = [mock.sentinel.ds1, mock.sentinel.ds2]
profile = 'fake_profile'
get_profile_id.return_value = None
self.assertRaises(exceptions.VimException,
self._driver._filter_ds_by_profile,
datastores, profile)
get_profile_id.assert_called_once_with(session, profile)
# Test with a fake profile id
profileId = 'fake_profile_id'
filtered_dss = [mock.sentinel.ds1]
# patch method calls from _filter_ds_by_profile
get_profile_id.return_value = profileId
pbm_cf = mock.sentinel.pbm_cf
session.pbm.client.factory = pbm_cf
hubs = [mock.sentinel.hub1, mock.sentinel.hub2]
dss_to_hubs.return_value = hubs
filter_hubs.return_value = mock.sentinel.hubs
filter_dss.return_value = filtered_dss
# call _filter_ds_by_profile with a fake profile
actual_dss = self._driver._filter_ds_by_profile(datastores, profile)
# verify return value and called methods
self.assertEqual(filtered_dss, actual_dss,
"Wrong filtered datastores returned.")
dss_to_hubs.assert_called_once_with(pbm_cf, datastores)
filter_hubs.assert_called_once_with(session, hubs, profileId)
filter_dss.assert_called_once_with(mock.sentinel.hubs, datastores)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
driver._storage_policy_enabled = True
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = [mock.sentinel.datastore1, mock.sentinel.datastore2]
filtered_dss = [mock.sentinel.datastore1]
profile = mock.sentinel.profile
def filter_ds(datastores, storage_profile):
return filtered_dss
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.vmfolder
volumeops.create_folder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._get_storage_profile.return_value = profile
driver._filter_ds_by_profile = mock.MagicMock(side_effect=filter_ds)
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
volumeops.create_folder.assert_called_once_with(mock.sentinel.vmfolder,
self.VOLUME_FOLDER)
driver._get_storage_profile.assert_called_once_with(volume)
driver._filter_ds_by_profile.assert_called_once_with(dss, profile)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size,
filtered_dss)
# Clear side effects.
driver._filter_ds_by_profile.side_effect = None
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
def test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
self._test_copy_image_to_volume_non_stream_optimized(
create_backing,
get_ds_name_folder_path,
get_disk_type,
create_disk_from_sparse_image,
create_disk_from_preallocated_image,
vops,
select_ds_for_volume,
generate_uuid,
extend_disk)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
self._test_create_virtual_disk_from_preallocated_image(
vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
self._test_create_virtual_disk_from_preallocated_image_with_no_copy(
vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk)
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image_with_copy_error(
self, vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk):
self._test_create_virtual_disk_from_preallocated_image_with_copy_error(
vops, copy_image, flat_extent_path, get_temp_image_folder,
copy_temp_virtual_disk)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
def test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
self._test_create_virtual_disk_from_sparse_image(
copy_image, copy_temp_virtual_disk, flat_extent_path, sparse_path)
@mock.patch.object(image_transfer, 'download_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
download_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
download_image)
def test_copy_image_to_volume_with_ova_container(self):
image_service = mock.Mock(glance.GlanceImageService)
image_size = 2 * units.Gi
adapter_type = 'ide'
image_meta = {'disk_format': 'vmdk', 'size': image_size,
'container_format': 'ova',
'properties': {'vmware_disktype': 'streamOptimized',
'vmware_adaptertype': adapter_type}}
image_service.show.return_value = image_meta
context = mock.sentinel.context
vol_name = 'volume-51e47214-8e3c-475d-b44b-aea6cd3eef53'
vol_id = '51e47214-8e3c-475d-b44b-aea6cd3eef53'
display_name = 'foo'
volume_size = 4
volume = {'name': vol_name,
'id': vol_id,
'display_name': display_name,
'size': volume_size,
'volume_type_id': None}
image_id = 'image-id'
self.assertRaises(
cinder_exceptions.ImageUnacceptable,
self._driver.copy_image_to_volume, context, volume, image_service,
image_id)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
@mock.patch.object(image_transfer, 'copy_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, copy_disk)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(image_transfer, 'download_stream_optimized_data')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, download_data, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, download_data, delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_with_params(self, vops, select_ds_for_volume):
host = mock.sentinel.host
resource_pool = mock.sentinel.resource_pool
folder = mock.sentinel.folder
summary = mock.sentinel.summary
select_ds_for_volume.return_value = (host, resource_pool, folder,
summary)
volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1}
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
self._driver._create_backing(volume, host, create_params)
vops.create_backing_disk_less.assert_called_once_with('vol-1',
folder,
resource_pool,
host,
summary.name,
None)
create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'}
self._driver._create_backing(volume, host, create_params)
vops.create_backing.assert_called_once_with('vol-1',
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
None,
'ide')
vops.create_backing.reset_mock()
backing_name = "temp-vol"
create_params = {vmdk.CREATE_PARAM_BACKING_NAME: backing_name}
self._driver._create_backing(volume, host, create_params)
vops.create_backing.assert_called_once_with(backing_name,
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
None,
'lsiLogic')
@mock.patch('cinder.openstack.common.fileutils.ensure_tree')
@mock.patch('cinder.openstack.common.fileutils.delete_if_exists')
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close')
def test_temporary_file(
self, close, mkstemp, delete_if_exists, ensure_tree):
fd = mock.sentinel.fd
tmp = mock.sentinel.tmp
mkstemp.return_value = (fd, tmp)
prefix = ".vmdk"
suffix = "test"
with self._driver._temporary_file(prefix=prefix,
suffix=suffix) as tmp_file:
self.assertEqual(tmp, tmp_file)
ensure_tree.assert_called_once_with(self.TMP_DIR)
mkstemp.assert_called_once_with(dir=self.TMP_DIR,
prefix=prefix,
suffix=suffix)
close.assert_called_once_with(fd)
delete_if_exists.assert_called_once_with(tmp)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_nop(self, ds_sel, vops):
volume = {'name': 'vol-1', 'size': 1}
datastore = mock.sentinel.datastore
vops.get_datastore.return_value = datastore
profile = mock.sentinel.profile
vops.get_profile.return_value = profile
vops.is_datastore_accessible.return_value = True
ds_sel.is_datastore_compliant.return_value = True
backing = mock.sentinel.backing
host = mock.sentinel.host
self._driver._relocate_backing(volume, backing, host)
vops.is_datastore_accessible.assert_called_once_with(datastore, host)
ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
profile)
self.assertFalse(vops.relocate_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing_with_no_datastore(
self, ds_sel, vops):
volume = {'name': 'vol-1', 'size': 1}
profile = mock.sentinel.profile
vops.get_profile.return_value = profile
vops.is_datastore_accessible.return_value = True
ds_sel.is_datastore_compliant.return_value = False
ds_sel.select_datastore.return_value = []
backing = mock.sentinel.backing
host = mock.sentinel.host
self.assertRaises(vmdk_exceptions.NoValidDatastoreException,
self._driver._relocate_backing,
volume,
backing,
host)
ds_sel.select_datastore.assert_called_once_with(
{hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host])
self.assertFalse(vops.relocate_backing.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_relocate_backing(
self, ds_sel, get_volume_group_folder, vops):
volume = {'name': 'vol-1', 'size': 1}
vops.is_datastore_accessible.return_value = False
ds_sel.is_datastore_compliant.return_value = True
backing = mock.sentinel.backing
host = mock.sentinel.host
rp = mock.sentinel.rp
datastore = mock.sentinel.datastore
summary = mock.Mock(datastore=datastore)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
self._driver._relocate_backing(volume, backing, host)
vops.relocate_backing.assert_called_once_with(backing,
datastore,
rp,
host)
vops.move_backing_to_folder.assert_called_once_with(backing,
folder)
class ImageDiskTypeTest(test.TestCase):
"""Unit tests for ImageDiskType."""
def test_is_valid(self):
self.assertTrue(vmdk.ImageDiskType.is_valid("thin"))
self.assertTrue(vmdk.ImageDiskType.is_valid("preallocated"))
self.assertTrue(vmdk.ImageDiskType.is_valid("streamOptimized"))
self.assertTrue(vmdk.ImageDiskType.is_valid("sparse"))
self.assertFalse(vmdk.ImageDiskType.is_valid("thick"))
def test_validate(self):
vmdk.ImageDiskType.validate("thin")
vmdk.ImageDiskType.validate("preallocated")
vmdk.ImageDiskType.validate("streamOptimized")
vmdk.ImageDiskType.validate("sparse")
self.assertRaises(cinder_exceptions.ImageUnacceptable,
vmdk.ImageDiskType.validate,
"thick")
| 46.160658 | 79 | 0.633988 |
84d9dd396156f2c7d2b6b9402c6d03729e7f2a63 | 1,214 | py | Python | Codes/python/py1/exsaula3 (2)/ex6.py | Gaazedo/portfolio | 4b09b6fffc6947375a20fee1c5523a12cbcbe970 | [
"MIT"
] | null | null | null | Codes/python/py1/exsaula3 (2)/ex6.py | Gaazedo/portfolio | 4b09b6fffc6947375a20fee1c5523a12cbcbe970 | [
"MIT"
] | null | null | null | Codes/python/py1/exsaula3 (2)/ex6.py | Gaazedo/portfolio | 4b09b6fffc6947375a20fee1c5523a12cbcbe970 | [
"MIT"
] | null | null | null | import random
def contem(v, qtd, e):
for i in range(qtd):
if v[i] == e:
return True
return False
def gerar(v1, v2):
qtd_v1 = 0
qtd_v2 = 0
n = (len(v1) + len(v2)) * 5
while (qtd_v1 < len(v1)):
e = random.randint(1, n)
if not contem(v1, qtd_v1, e) and not contem(v2, qtd_v2, e):
v1[qtd_v1] = e
qtd_v1 += 1
while (qtd_v2 < len(v2)):
e = random.randint(1, n)
if not contem(v1, qtd_v1, e) and not contem(v2, qtd_v2, e):
v2[qtd_v2] = e
qtd_v2 += 1
def intercalar_diferentes(v1, v2, vf):
if v1<v2:
maior,menor=v1,v2
elif len(v1)<len(v2):
maior,menor=v2,v1
else:
menor=v1
x=0
for i in range (len(menor)):
vf[x]=v1[i]
x+=1
vf[x]=v2[i]
x+=1
if len(v1)==len(v2):
return vf
else:
aux1,aux2= len(vf)-(len(maior)-len(menor)),len(maior)-(len(maior)-len(menor))
for k in range(aux1,len(vf)):
vf[k]=maior[aux2]
aux2+=1
return vf
n1 = int(input('Número de elementos em v1: '))
n2 = int(input('Número de elementos em v2: '))
v1 = [0] * n1
v2 = [0] * n2
vf = [0] * (len(v1) + len(v2))
gerar(v1, v2)
print('v1:', v1)
print('v2:', v2)
intercalar_diferentes(v1,v2,vf)
print('vf:',vf)
| 19.580645 | 81 | 0.558484 |
e0f532152f049941c23edc3f8751b666ecd68277 | 45,226 | py | Python | lib/sqlalchemy/ext/automap.py | josteinl/sqlalchemy | 9e7c068d669b209713da62da5748579f92d98129 | [
"MIT"
] | null | null | null | lib/sqlalchemy/ext/automap.py | josteinl/sqlalchemy | 9e7c068d669b209713da62da5748579f92d98129 | [
"MIT"
] | null | null | null | lib/sqlalchemy/ext/automap.py | josteinl/sqlalchemy | 9e7c068d669b209713da62da5748579f92d98129 | [
"MIT"
] | null | null | null | # ext/automap.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""Define an extension to the :mod:`sqlalchemy.ext.declarative` system
which automatically generates mapped classes and relationships from a database
schema, typically though not necessarily one which is reflected.
.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`.
It is hoped that the :class:`.AutomapBase` system provides a quick
and modernized solution to the problem that the very famous
`SQLSoup <https://sqlsoup.readthedocs.io/en/latest/>`_
also tries to solve, that of generating a quick and rudimentary object
model from an existing database on the fly. By addressing the issue strictly
at the mapper configuration level, and integrating fully with existing
Declarative class techniques, :class:`.AutomapBase` seeks to provide
a well-integrated approach to the issue of expediently auto-generating ad-hoc
mappings.
Basic Use
=========
The simplest usage is to reflect an existing database into a new model.
We create a new :class:`.AutomapBase` class in a similar manner as to how
we create a declarative base class, using :func:`.automap_base`.
We then call :meth:`.AutomapBase.prepare` on the resulting base class,
asking it to reflect the schema and produce mappings::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
Base = automap_base()
# engine, suppose it has two tables 'user' and 'address' set up
engine = create_engine("sqlite:///mydatabase.db")
# reflect the tables
Base.prepare(engine, reflect=True)
# mapped classes are now created with names by default
# matching that of the table name.
User = Base.classes.user
Address = Base.classes.address
session = Session(engine)
# rudimentary relationships are produced
session.add(Address(email_address="foo@bar.com", user=User(name="foo")))
session.commit()
# collection-based relationships are by default named
# "<classname>_collection"
u1 = session.query(User).first()
print (u1.address_collection)
Above, calling :meth:`.AutomapBase.prepare` while passing along the
:paramref:`.AutomapBase.prepare.reflect` parameter indicates that the
:meth:`_schema.MetaData.reflect`
method will be called on this declarative base
classes' :class:`_schema.MetaData` collection; then, each **viable**
:class:`_schema.Table` within the :class:`_schema.MetaData`
will get a new mapped class
generated automatically. The :class:`_schema.ForeignKeyConstraint`
objects which
link the various tables together will be used to produce new, bidirectional
:func:`_orm.relationship` objects between classes.
The classes and relationships
follow along a default naming scheme that we can customize. At this point,
our basic mapping consisting of related ``User`` and ``Address`` classes is
ready to use in the traditional way.
.. note:: By **viable**, we mean that for a table to be mapped, it must
specify a primary key. Additionally, if the table is detected as being
a pure association table between two other tables, it will not be directly
mapped and will instead be configured as a many-to-many table between
the mappings for the two referring tables.
Generating Mappings from an Existing MetaData
=============================================
We can pass a pre-declared :class:`_schema.MetaData` object to
:func:`.automap_base`.
This object can be constructed in any way, including programmatically, from
a serialized file, or from itself being reflected using
:meth:`_schema.MetaData.reflect`.
Below we illustrate a combination of reflection and
explicit table declaration::
from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey
from sqlalchemy.ext.automap import automap_base
engine = create_engine("sqlite:///mydatabase.db")
# produce our own MetaData object
metadata = MetaData()
# we can reflect it ourselves from a database, using options
# such as 'only' to limit what tables we look at...
metadata.reflect(engine, only=['user', 'address'])
# ... or just define our own Table objects with it (or combine both)
Table('user_order', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', ForeignKey('user.id'))
)
# we can then produce a set of mappings from this MetaData.
Base = automap_base(metadata=metadata)
# calling prepare() just sets up mapped classes and relationships.
Base.prepare()
# mapped classes are ready
User, Address, Order = Base.classes.user, Base.classes.address,\
Base.classes.user_order
Specifying Classes Explicitly
=============================
The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined
explicitly, in a way similar to that of the :class:`.DeferredReflection` class.
Classes that extend from :class:`.AutomapBase` act like regular declarative
classes, but are not immediately mapped after their construction, and are
instead mapped when we call :meth:`.AutomapBase.prepare`. The
:meth:`.AutomapBase.prepare` method will make use of the classes we've
established based on the table name we use. If our schema contains tables
``user`` and ``address``, we can define one or both of the classes to be used::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
# automap base
Base = automap_base()
# pre-declare User for the 'user' table
class User(Base):
__tablename__ = 'user'
# override schema elements like Columns
user_name = Column('name', String)
# override relationships too, if desired.
# we must use the same name that automap would use for the
# relationship, and also must refer to the class name that automap will
# generate for "address"
address_collection = relationship("address", collection_class=set)
# reflect
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True)
# we still have Address generated from the tablename "address",
# but User is the same as Base.classes.User now
Address = Base.classes.address
u1 = session.query(User).first()
print (u1.address_collection)
# the backref is still there:
a1 = session.query(Address).first()
print (a1.user)
Above, one of the more intricate details is that we illustrated overriding
one of the :func:`_orm.relationship` objects that automap would have created.
To do this, we needed to make sure the names match up with what automap
would normally generate, in that the relationship name would be
``User.address_collection`` and the name of the class referred to, from
automap's perspective, is called ``address``, even though we are referring to
it as ``Address`` within our usage of this class.
Overriding Naming Schemes
=========================
:mod:`.sqlalchemy.ext.automap` is tasked with producing mapped classes and
relationship names based on a schema, which means it has decision points in how
these names are determined. These three decision points are provided using
functions which can be passed to the :meth:`.AutomapBase.prepare` method, and
are known as :func:`.classname_for_table`,
:func:`.name_for_scalar_relationship`,
and :func:`.name_for_collection_relationship`. Any or all of these
functions are provided as in the example below, where we use a "camel case"
scheme for class names and a "pluralizer" for collection names using the
`Inflect <https://pypi.org/project/inflect>`_ package::
import re
import inflect
def camelize_classname(base, tablename, table):
"Produce a 'camelized' class name, e.g. "
"'words_and_underscores' -> 'WordsAndUnderscores'"
return str(tablename[0].upper() + \
re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))
_pluralizer = inflect.engine()
def pluralize_collection(base, local_cls, referred_cls, constraint):
"Produce an 'uncamelized', 'pluralized' class name, e.g. "
"'SomeTerm' -> 'some_terms'"
referred_name = referred_cls.__name__
uncamelized = re.sub(r'[A-Z]',
lambda m: "_%s" % m.group(0).lower(),
referred_name)[1:]
pluralized = _pluralizer.plural(uncamelized)
return pluralized
from sqlalchemy.ext.automap import automap_base
Base = automap_base()
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True,
classname_for_table=camelize_classname,
name_for_collection_relationship=pluralize_collection
)
From the above mapping, we would now have classes ``User`` and ``Address``,
where the collection from ``User`` to ``Address`` is called
``User.addresses``::
User, Address = Base.classes.User, Base.classes.Address
u1 = User(addresses=[Address(email="foo@bar.com")])
Relationship Detection
======================
The vast majority of what automap accomplishes is the generation of
:func:`_orm.relationship` structures based on foreign keys. The mechanism
by which this works for many-to-one and one-to-many relationships is as
follows:
1. A given :class:`_schema.Table`, known to be mapped to a particular class,
is examined for :class:`_schema.ForeignKeyConstraint` objects.
2. From each :class:`_schema.ForeignKeyConstraint`, the remote
:class:`_schema.Table`
object present is matched up to the class to which it is to be mapped,
if any, else it is skipped.
3. As the :class:`_schema.ForeignKeyConstraint`
we are examining corresponds to a
reference from the immediate mapped class, the relationship will be set up
as a many-to-one referring to the referred class; a corresponding
one-to-many backref will be created on the referred class referring
to this class.
4. If any of the columns that are part of the
:class:`_schema.ForeignKeyConstraint`
are not nullable (e.g. ``nullable=False``), a
:paramref:`_orm.relationship.cascade` keyword argument
of ``all, delete-orphan`` will be added to the keyword arguments to
be passed to the relationship or backref. If the
:class:`_schema.ForeignKeyConstraint` reports that
:paramref:`_schema.ForeignKeyConstraint.ondelete`
is set to ``CASCADE`` for a not null or ``SET NULL`` for a nullable
set of columns, the option :paramref:`_orm.relationship.passive_deletes`
flag is set to ``True`` in the set of relationship keyword arguments.
Note that not all backends support reflection of ON DELETE.
.. versionadded:: 1.0.0 - automap will detect non-nullable foreign key
constraints when producing a one-to-many relationship and establish
a default cascade of ``all, delete-orphan`` if so; additionally,
if the constraint specifies
:paramref:`_schema.ForeignKeyConstraint.ondelete`
of ``CASCADE`` for non-nullable or ``SET NULL`` for nullable columns,
the ``passive_deletes=True`` option is also added.
5. The names of the relationships are determined using the
:paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and
:paramref:`.AutomapBase.prepare.name_for_collection_relationship`
callable functions. It is important to note that the default relationship
naming derives the name from the **the actual class name**. If you've
given a particular class an explicit name by declaring it, or specified an
alternate class naming scheme, that's the name from which the relationship
name will be derived.
6. The classes are inspected for an existing mapped property matching these
names. If one is detected on one side, but none on the other side,
:class:`.AutomapBase` attempts to create a relationship on the missing side,
then uses the :paramref:`_orm.relationship.back_populates`
parameter in order to
point the new relationship to the other side.
7. In the usual case where no relationship is on either side,
:meth:`.AutomapBase.prepare` produces a :func:`_orm.relationship` on the
"many-to-one" side and matches it to the other using the
:paramref:`_orm.relationship.backref` parameter.
8. Production of the :func:`_orm.relationship` and optionally the
:func:`.backref`
is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship`
function, which can be supplied by the end-user in order to augment
the arguments passed to :func:`_orm.relationship` or :func:`.backref` or to
make use of custom implementations of these functions.
Custom Relationship Arguments
-----------------------------
The :paramref:`.AutomapBase.prepare.generate_relationship` hook can be used
to add parameters to relationships. For most cases, we can make use of the
existing :func:`.automap.generate_relationship` function to return
the object, after augmenting the given keyword dictionary with our own
arguments.
Below is an illustration of how to send
:paramref:`_orm.relationship.cascade` and
:paramref:`_orm.relationship.passive_deletes`
options along to all one-to-many relationships::
from sqlalchemy.ext.automap import generate_relationship
def _gen_relationship(base, direction, return_fn,
attrname, local_cls, referred_cls, **kw):
if direction is interfaces.ONETOMANY:
kw['cascade'] = 'all, delete-orphan'
kw['passive_deletes'] = True
# make use of the built-in function to actually return
# the result.
return generate_relationship(base, direction, return_fn,
attrname, local_cls, referred_cls, **kw)
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
# automap base
Base = automap_base()
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True,
generate_relationship=_gen_relationship)
Many-to-Many relationships
--------------------------
:mod:`.sqlalchemy.ext.automap` will generate many-to-many relationships, e.g.
those which contain a ``secondary`` argument. The process for producing these
is as follows:
1. A given :class:`_schema.Table` is examined for
:class:`_schema.ForeignKeyConstraint`
objects, before any mapped class has been assigned to it.
2. If the table contains two and exactly two
:class:`_schema.ForeignKeyConstraint`
objects, and all columns within this table are members of these two
:class:`_schema.ForeignKeyConstraint` objects, the table is assumed to be a
"secondary" table, and will **not be mapped directly**.
3. The two (or one, for self-referential) external tables to which the
:class:`_schema.Table`
refers to are matched to the classes to which they will be
mapped, if any.
4. If mapped classes for both sides are located, a many-to-many bi-directional
:func:`_orm.relationship` / :func:`.backref`
pair is created between the two
classes.
5. The override logic for many-to-many works the same as that of one-to-many/
many-to-one; the :func:`.generate_relationship` function is called upon
to generate the structures and existing attributes will be maintained.
Relationships with Inheritance
------------------------------
:mod:`.sqlalchemy.ext.automap` will not generate any relationships between
two classes that are in an inheritance relationship. That is, with two
classes given as follows::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee', 'polymorphic_on': type
}
class Engineer(Employee):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity':'engineer',
}
The foreign key from ``Engineer`` to ``Employee`` is used not for a
relationship, but to establish joined inheritance between the two classes.
Note that this means automap will not generate *any* relationships
for foreign keys that link from a subclass to a superclass. If a mapping
has actual relationships from subclass to superclass as well, those
need to be explicit. Below, as we have two separate foreign keys
from ``Engineer`` to ``Employee``, we need to set up both the relationship
we want as well as the ``inherit_condition``, as these are not things
SQLAlchemy can guess::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee', 'polymorphic_on':type
}
class Engineer(Employee):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
favorite_employee_id = Column(Integer, ForeignKey('employee.id'))
favorite_employee = relationship(Employee,
foreign_keys=favorite_employee_id)
__mapper_args__ = {
'polymorphic_identity':'engineer',
'inherit_condition': id == Employee.id
}
Handling Simple Naming Conflicts
--------------------------------
In the case of naming conflicts during mapping, override any of
:func:`.classname_for_table`, :func:`.name_for_scalar_relationship`,
and :func:`.name_for_collection_relationship` as needed. For example, if
automap is attempting to name a many-to-one relationship the same as an
existing column, an alternate convention can be conditionally selected. Given
a schema:
.. sourcecode:: sql
CREATE TABLE table_a (
id INTEGER PRIMARY KEY
);
CREATE TABLE table_b (
id INTEGER PRIMARY KEY,
table_a INTEGER,
FOREIGN KEY(table_a) REFERENCES table_a(id)
);
The above schema will first automap the ``table_a`` table as a class named
``table_a``; it will then automap a relationship onto the class for ``table_b``
with the same name as this related class, e.g. ``table_a``. This
relationship name conflicts with the mapping column ``table_b.table_a``,
and will emit an error on mapping.
We can resolve this conflict by using an underscore as follows::
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower()
local_table = local_cls.__table__
if name in local_table.columns:
newname = name + "_"
warnings.warn(
"Already detected name %s present. using %s" %
(name, newname))
return newname
return name
Base.prepare(engine, reflect=True,
name_for_scalar_relationship=name_for_scalar_relationship)
Alternatively, we can change the name on the column side. The columns
that are mapped can be modified using the technique described at
:ref:`mapper_column_distinct_names`, by assigning the column explicitly
to a new name::
Base = automap_base()
class TableB(Base):
__tablename__ = 'table_b'
_table_a = Column('table_a', ForeignKey('table_a.id'))
Base.prepare(engine, reflect=True)
Using Automap with Explicit Declarations
========================================
As noted previously, automap has no dependency on reflection, and can make
use of any collection of :class:`_schema.Table` objects within a
:class:`_schema.MetaData`
collection. From this, it follows that automap can also be used
generate missing relationships given an otherwise complete model that fully
defines table metadata::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import Column, Integer, String, ForeignKey
Base = automap_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
email = Column(String)
user_id = Column(ForeignKey('user.id'))
# produce relationships
Base.prepare()
# mapping is complete, with "address_collection" and
# "user" relationships
a1 = Address(email='u1')
a2 = Address(email='u2')
u1 = User(address_collection=[a1, a2])
assert a1.user is u1
Above, given mostly complete ``User`` and ``Address`` mappings, the
:class:`_schema.ForeignKey` which we defined on ``Address.user_id`` allowed a
bidirectional relationship pair ``Address.user`` and
``User.address_collection`` to be generated on the mapped classes.
Note that when subclassing :class:`.AutomapBase`,
the :meth:`.AutomapBase.prepare` method is required; if not called, the classes
we've declared are in an un-mapped state.
.. _automap_intercepting_columns:
Intercepting Column Definitions
===============================
The :class:`_schema.MetaData` and :class:`_schema.Table` objects support an
event hook :meth:`_events.DDLEvents.column_reflect` that may be used to intercept
the information reflected about a database column before the :class:`_schema.Column`
object is constructed. For example if we wanted to map columns using a
naming convention such as ``"attr_<columnname>"``, the event could
be applied as::
@event.listens_for(Base.metadata, "column_reflect")
def column_reflect(inspector, table, column_info):
# set column.key = "attr_<lower_case_name>"
column_info['key'] = "attr_%s" % column_info['name'].lower()
# run reflection
Base.prepare(engine, reflect=True)
.. versionadded:: 1.4.0b2 the :meth:`_events.DDLEvents.column_reflect` event
may be applied to a :class:`_schema.MetaData` object.
.. seealso::
:meth:`_events.DDLEvents.column_reflect`
:ref:`mapper_automated_reflection_schemes` - in the ORM mapping documentation
""" # noqa
from .. import util
from ..orm import backref
from ..orm import declarative_base as _declarative_base
from ..orm import exc as orm_exc
from ..orm import interfaces
from ..orm import relationship
from ..orm.decl_base import _DeferredMapperConfig
from ..orm.mapper import _CONFIGURE_MUTEX
from ..schema import ForeignKeyConstraint
from ..sql import and_
def classname_for_table(base, tablename, table):
"""Return the class name that should be used, given the name
of a table.
The default implementation is::
return str(tablename)
Alternate implementations can be specified using the
:paramref:`.AutomapBase.prepare.classname_for_table`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param tablename: string name of the :class:`_schema.Table`.
:param table: the :class:`_schema.Table` object itself.
:return: a string class name.
.. note::
In Python 2, the string used for the class name **must** be a
non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute
of :class:`_schema.Table` is typically a Python unicode subclass,
so the
``str()`` function should be applied to this name, after accounting for
any non-ASCII characters.
"""
return str(tablename)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
"""Return the attribute name that should be used to refer from one
class to another, for a scalar object reference.
The default implementation is::
return referred_cls.__name__.lower()
Alternate implementations can be specified using the
:paramref:`.AutomapBase.prepare.name_for_scalar_relationship`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param local_cls: the class to be mapped on the local side.
:param referred_cls: the class to be mapped on the referring side.
:param constraint: the :class:`_schema.ForeignKeyConstraint` that is being
inspected to produce this relationship.
"""
return referred_cls.__name__.lower()
def name_for_collection_relationship(
base, local_cls, referred_cls, constraint
):
"""Return the attribute name that should be used to refer from one
class to another, for a collection reference.
The default implementation is::
return referred_cls.__name__.lower() + "_collection"
Alternate implementations
can be specified using the
:paramref:`.AutomapBase.prepare.name_for_collection_relationship`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param local_cls: the class to be mapped on the local side.
:param referred_cls: the class to be mapped on the referring side.
:param constraint: the :class:`_schema.ForeignKeyConstraint` that is being
inspected to produce this relationship.
"""
return referred_cls.__name__.lower() + "_collection"
def generate_relationship(
base, direction, return_fn, attrname, local_cls, referred_cls, **kw
):
r"""Generate a :func:`_orm.relationship` or :func:`.backref`
on behalf of two
mapped classes.
An alternate implementation of this function can be specified using the
:paramref:`.AutomapBase.prepare.generate_relationship` parameter.
The default implementation of this function is as follows::
if return_fn is backref:
return return_fn(attrname, **kw)
elif return_fn is relationship:
return return_fn(referred_cls, **kw)
else:
raise TypeError("Unknown relationship function: %s" % return_fn)
:param base: the :class:`.AutomapBase` class doing the prepare.
:param direction: indicate the "direction" of the relationship; this will
be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOMANY`.
:param return_fn: the function that is used by default to create the
relationship. This will be either :func:`_orm.relationship` or
:func:`.backref`. The :func:`.backref` function's result will be used to
produce a new :func:`_orm.relationship` in a second step,
so it is critical
that user-defined implementations correctly differentiate between the two
functions, if a custom relationship function is being used.
:param attrname: the attribute name to which this relationship is being
assigned. If the value of :paramref:`.generate_relationship.return_fn` is
the :func:`.backref` function, then this name is the name that is being
assigned to the backref.
:param local_cls: the "local" class to which this relationship or backref
will be locally present.
:param referred_cls: the "referred" class to which the relationship or
backref refers to.
:param \**kw: all additional keyword arguments are passed along to the
function.
:return: a :func:`_orm.relationship` or :func:`.backref` construct,
as dictated
by the :paramref:`.generate_relationship.return_fn` parameter.
"""
if return_fn is backref:
return return_fn(attrname, **kw)
elif return_fn is relationship:
return return_fn(referred_cls, **kw)
else:
raise TypeError("Unknown relationship function: %s" % return_fn)
class AutomapBase:
"""Base class for an "automap" schema.
The :class:`.AutomapBase` class can be compared to the "declarative base"
class that is produced by the :func:`.declarative.declarative_base`
function. In practice, the :class:`.AutomapBase` class is always used
as a mixin along with an actual declarative base.
A new subclassable :class:`.AutomapBase` is typically instantiated
using the :func:`.automap_base` function.
.. seealso::
:ref:`automap_toplevel`
"""
__abstract__ = True
classes = None
"""An instance of :class:`.util.Properties` containing classes.
This object behaves much like the ``.c`` collection on a table. Classes
are present under the name they were given, e.g.::
Base = automap_base()
Base.prepare(engine=some_engine, reflect=True)
User, Address = Base.classes.User, Base.classes.Address
"""
@classmethod
@util.deprecated_params(
engine=(
"2.0",
"The :paramref:`_automap.AutomapBase.prepare.engine` parameter "
"is deprecated and will be removed in a future release. "
"Please use the "
":paramref:`_automap.AutomapBase.prepare.autoload_with` "
"parameter.",
),
reflect=(
"2.0",
"The :paramref:`_automap.AutomapBase.prepare.reflect` "
"parameter is deprecated and will be removed in a future "
"release. Reflection is enabled when "
":paramref:`_automap.AutomapBase.prepare.autoload_with` "
"is passed.",
),
)
def prepare(
cls,
autoload_with=None,
engine=None,
reflect=False,
schema=None,
classname_for_table=None,
collection_class=None,
name_for_scalar_relationship=None,
name_for_collection_relationship=None,
generate_relationship=None,
reflection_options=util.EMPTY_DICT,
):
"""Extract mapped classes and relationships from the
:class:`_schema.MetaData` and
perform mappings.
:param engine: an :class:`_engine.Engine` or
:class:`_engine.Connection` with which
to perform schema reflection, if specified.
If the :paramref:`.AutomapBase.prepare.reflect` argument is False,
this object is not used.
:param reflect: if True, the :meth:`_schema.MetaData.reflect`
method is called
on the :class:`_schema.MetaData` associated with this
:class:`.AutomapBase`.
The :class:`_engine.Engine` passed via
:paramref:`.AutomapBase.prepare.engine` will be used to perform the
reflection if present; else, the :class:`_schema.MetaData`
should already be
bound to some engine else the operation will fail.
:param classname_for_table: callable function which will be used to
produce new class names, given a table name. Defaults to
:func:`.classname_for_table`.
:param name_for_scalar_relationship: callable function which will be
used to produce relationship names for scalar relationships. Defaults
to :func:`.name_for_scalar_relationship`.
:param name_for_collection_relationship: callable function which will
be used to produce relationship names for collection-oriented
relationships. Defaults to :func:`.name_for_collection_relationship`.
:param generate_relationship: callable function which will be used to
actually generate :func:`_orm.relationship` and :func:`.backref`
constructs. Defaults to :func:`.generate_relationship`.
:param collection_class: the Python collection class that will be used
when a new :func:`_orm.relationship`
object is created that represents a
collection. Defaults to ``list``.
:param schema: When present in conjunction with the
:paramref:`.AutomapBase.prepare.reflect` flag, is passed to
:meth:`_schema.MetaData.reflect`
to indicate the primary schema where tables
should be reflected from. When omitted, the default schema in use
by the database connection is used.
.. versionadded:: 1.1
:param reflection_options: When present, this dictionary of options
will be passed to :meth:`_schema.MetaData.reflect`
to supply general reflection-specific options like ``only`` and/or
dialect-specific options like ``oracle_resolve_synonyms``.
.. versionadded:: 1.4
"""
glbls = globals()
if classname_for_table is None:
classname_for_table = glbls["classname_for_table"]
if name_for_scalar_relationship is None:
name_for_scalar_relationship = glbls[
"name_for_scalar_relationship"
]
if name_for_collection_relationship is None:
name_for_collection_relationship = glbls[
"name_for_collection_relationship"
]
if generate_relationship is None:
generate_relationship = glbls["generate_relationship"]
if collection_class is None:
collection_class = list
if autoload_with:
reflect = True
if engine:
autoload_with = engine
if reflect:
opts = dict(
schema=schema,
extend_existing=True,
autoload_replace=False,
)
if reflection_options:
opts.update(reflection_options)
cls.metadata.reflect(autoload_with, **opts)
with _CONFIGURE_MUTEX:
table_to_map_config = dict(
(m.local_table, m)
for m in _DeferredMapperConfig.classes_for_base(
cls, sort=False
)
)
many_to_many = []
for table in cls.metadata.tables.values():
lcl_m2m, rem_m2m, m2m_const = _is_many_to_many(cls, table)
if lcl_m2m is not None:
many_to_many.append((lcl_m2m, rem_m2m, m2m_const, table))
elif not table.primary_key:
continue
elif table not in table_to_map_config:
mapped_cls = type(
classname_for_table(cls, table.name, table),
(cls,),
{"__table__": table},
)
map_config = _DeferredMapperConfig.config_for_cls(
mapped_cls
)
cls.classes[map_config.cls.__name__] = mapped_cls
table_to_map_config[table] = map_config
for map_config in table_to_map_config.values():
_relationships_for_fks(
cls,
map_config,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
)
for lcl_m2m, rem_m2m, m2m_const, table in many_to_many:
_m2m_relationship(
cls,
lcl_m2m,
rem_m2m,
m2m_const,
table,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
)
for map_config in _DeferredMapperConfig.classes_for_base(cls):
map_config.map()
_sa_decl_prepare = True
"""Indicate that the mapping of classes should be deferred.
The presence of this attribute name indicates to declarative
that the call to mapper() should not occur immediately; instead,
information about the table and attributes to be mapped are gathered
into an internal structure called _DeferredMapperConfig. These
objects can be collected later using classes_for_base(), additional
mapping decisions can be made, and then the map() method will actually
apply the mapping.
The only real reason this deferral of the whole
thing is needed is to support primary key columns that aren't reflected
yet when the class is declared; everything else can theoretically be
added to the mapper later. However, the _DeferredMapperConfig is a
nice interface in any case which exists at that not usually exposed point
at which declarative has the class and the Table but hasn't called
mapper() yet.
"""
@classmethod
def _sa_raise_deferred_config(cls):
raise orm_exc.UnmappedClassError(
cls,
msg="Class %s is a subclass of AutomapBase. "
"Mappings are not produced until the .prepare() "
"method is called on the class hierarchy."
% orm_exc._safe_cls_name(cls),
)
def automap_base(declarative_base=None, **kw):
r"""Produce a declarative automap base.
This function produces a new base class that is a product of the
:class:`.AutomapBase` class as well a declarative base produced by
:func:`.declarative.declarative_base`.
All parameters other than ``declarative_base`` are keyword arguments
that are passed directly to the :func:`.declarative.declarative_base`
function.
:param declarative_base: an existing class produced by
:func:`.declarative.declarative_base`. When this is passed, the function
no longer invokes :func:`.declarative.declarative_base` itself, and all
other keyword arguments are ignored.
:param \**kw: keyword arguments are passed along to
:func:`.declarative.declarative_base`.
"""
if declarative_base is None:
Base = _declarative_base(**kw)
else:
Base = declarative_base
return type(
Base.__name__,
(AutomapBase, Base),
{"__abstract__": True, "classes": util.Properties({})},
)
def _is_many_to_many(automap_base, table):
fk_constraints = [
const
for const in table.constraints
if isinstance(const, ForeignKeyConstraint)
]
if len(fk_constraints) != 2:
return None, None, None
cols = sum(
[
[fk.parent for fk in fk_constraint.elements]
for fk_constraint in fk_constraints
],
[],
)
if set(cols) != set(table.c):
return None, None, None
return (
fk_constraints[0].elements[0].column.table,
fk_constraints[1].elements[0].column.table,
fk_constraints,
)
def _relationships_for_fks(
automap_base,
map_config,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
):
local_table = map_config.local_table
local_cls = map_config.cls # derived from a weakref, may be None
if local_table is None or local_cls is None:
return
for constraint in local_table.constraints:
if isinstance(constraint, ForeignKeyConstraint):
fks = constraint.elements
referred_table = fks[0].column.table
referred_cfg = table_to_map_config.get(referred_table, None)
if referred_cfg is None:
continue
referred_cls = referred_cfg.cls
if local_cls is not referred_cls and issubclass(
local_cls, referred_cls
):
continue
relationship_name = name_for_scalar_relationship(
automap_base, local_cls, referred_cls, constraint
)
backref_name = name_for_collection_relationship(
automap_base, referred_cls, local_cls, constraint
)
o2m_kws = {}
nullable = False not in {fk.parent.nullable for fk in fks}
if not nullable:
o2m_kws["cascade"] = "all, delete-orphan"
if (
constraint.ondelete
and constraint.ondelete.lower() == "cascade"
):
o2m_kws["passive_deletes"] = True
else:
if (
constraint.ondelete
and constraint.ondelete.lower() == "set null"
):
o2m_kws["passive_deletes"] = True
create_backref = backref_name not in referred_cfg.properties
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.ONETOMANY,
backref,
backref_name,
referred_cls,
local_cls,
collection_class=collection_class,
**o2m_kws,
)
else:
backref_obj = None
rel = generate_relationship(
automap_base,
interfaces.MANYTOONE,
relationship,
relationship_name,
local_cls,
referred_cls,
foreign_keys=[fk.parent for fk in constraint.elements],
backref=backref_obj,
remote_side=[fk.column for fk in constraint.elements],
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name
].back_populates = relationship_name
elif create_backref:
rel = generate_relationship(
automap_base,
interfaces.ONETOMANY,
relationship,
backref_name,
referred_cls,
local_cls,
foreign_keys=[fk.parent for fk in constraint.elements],
back_populates=relationship_name,
collection_class=collection_class,
**o2m_kws,
)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name
].back_populates = backref_name
def _m2m_relationship(
automap_base,
lcl_m2m,
rem_m2m,
m2m_const,
table,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
):
map_config = table_to_map_config.get(lcl_m2m, None)
referred_cfg = table_to_map_config.get(rem_m2m, None)
if map_config is None or referred_cfg is None:
return
local_cls = map_config.cls
referred_cls = referred_cfg.cls
relationship_name = name_for_collection_relationship(
automap_base, local_cls, referred_cls, m2m_const[0]
)
backref_name = name_for_collection_relationship(
automap_base, referred_cls, local_cls, m2m_const[1]
)
create_backref = backref_name not in referred_cfg.properties
if table in table_to_map_config:
overlaps = "__*"
else:
overlaps = None
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
backref,
backref_name,
referred_cls,
local_cls,
collection_class=collection_class,
overlaps=overlaps,
)
else:
backref_obj = None
rel = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
relationship,
relationship_name,
local_cls,
referred_cls,
overlaps=overlaps,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[0].elements
),
secondaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[1].elements
),
backref=backref_obj,
collection_class=collection_class,
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name
].back_populates = relationship_name
elif create_backref:
rel = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
relationship,
backref_name,
referred_cls,
local_cls,
overlaps=overlaps,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[1].elements
),
secondaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[0].elements
),
back_populates=relationship_name,
collection_class=collection_class,
)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name
].back_populates = backref_name
| 36.88907 | 84 | 0.663004 |
c376e76dbdcf98412097511fdc5487daecfc3483 | 2,026 | py | Python | ifttt_bme280.py | KeitetsuWorks/ifttt_bme280 | b4d27a444d09a1a60eca8b0322eff41396ea9970 | [
"MIT"
] | null | null | null | ifttt_bme280.py | KeitetsuWorks/ifttt_bme280 | b4d27a444d09a1a60eca8b0322eff41396ea9970 | [
"MIT"
] | null | null | null | ifttt_bme280.py | KeitetsuWorks/ifttt_bme280 | b4d27a444d09a1a60eca8b0322eff41396ea9970 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## @file ifttt_bme280.py
## @brief Script to send BME280 data to IFTTT server
## @author Keitetsu
## @date 2018/12/02
## @copyright Copyright (c) 2018 Keitetsu
## @par License
## This software is released under the MIT License.
##
import json
import requests
import smbus2
import bme280
import time
def post_BME280Data_to_IFTTT(data):
with open('ifttt_bme280.json', 'r') as f:
dict_configs = json.load(f)
str_key = dict_configs['ifttt']['key']
str_event = dict_configs['ifttt']['event']
str_url = 'https://maker.ifttt.com/trigger/' \
+ str_event \
+ '/with/key/' \
+ str_key
str_temperature = '{0:-.2f}'.format(data.temperature)
str_humidity = '{0:.2f}'.format(data.humidity)
str_pressure = '{0:.2f}'.format(data.pressure)
dict_json_data = {
"value1": str_temperature,
"value2": str_humidity,
"value3": str_pressure
}
json_data = json.dumps(dict_json_data)
dict_headers = {'Content-Type': 'application/json'}
# IFTTT Webhooks
print("begin request")
ifttt_session = requests.Session()
ifttt_response = ifttt_session.post(str_url, data = json_data, headers = dict_headers)
print("response status code: %d" % (ifttt_response.status_code))
if ifttt_response.status_code == 200:
print("response text: %s" % (ifttt_response.text))
ret_val = True
else:
ret_val = False
print("end request")
return ret_val
if __name__ == '__main__':
port = 1
address = 0x76
bus = smbus2.SMBus(port)
calibration_params = bme280.load_calibration_params(bus, address)
# the sample method will take a single reading and return a
# compensated_reading object
data = bme280.sample(bus, address, calibration_params)
for i in range(0, 5):
result = post_BME280Data_to_IFTTT(data)
if result == True:
break
time.sleep(2)
| 26.311688 | 90 | 0.633761 |
b71622eb2aef9f4cb99810b841bd3e3859fba695 | 483 | py | Python | api/migrations/0003_container_prev_art.py | maravger/edgy-controller | 140bf952fcc43bbba344f169c782132d500e1343 | [
"MIT"
] | 1 | 2021-01-10T22:16:10.000Z | 2021-01-10T22:16:10.000Z | api/migrations/0003_container_prev_art.py | maravger/edgy-controller | 140bf952fcc43bbba344f169c782132d500e1343 | [
"MIT"
] | 8 | 2018-05-10T13:27:47.000Z | 2022-01-13T00:42:56.000Z | api/migrations/0003_container_prev_art.py | maravger/edgy-controller | 140bf952fcc43bbba344f169c782132d500e1343 | [
"MIT"
] | 2 | 2018-03-26T15:08:19.000Z | 2019-05-13T18:03:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-24 14:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20180424_1428'),
]
operations = [
migrations.AddField(
model_name='container',
name='prev_art',
field=models.DecimalField(decimal_places=3, default=0.0, max_digits=6),
),
]
| 23 | 83 | 0.627329 |
b7c181c137e3cd0ed9d11de8bcc904ee3a943755 | 139 | py | Python | filepart/split.py | UnknownPlayer78/filepart | 0ebb3a4428b1d415185b13be24034f688ac6185b | [
"MIT"
] | 1 | 2020-01-18T14:13:43.000Z | 2020-01-18T14:13:43.000Z | filepart/split.py | UnknownPlayer78/filepart | 0ebb3a4428b1d415185b13be24034f688ac6185b | [
"MIT"
] | null | null | null | filepart/split.py | UnknownPlayer78/filepart | 0ebb3a4428b1d415185b13be24034f688ac6185b | [
"MIT"
] | null | null | null | class Splitter:
def __init__(self, file, parts, output="./"):
print(file, parts, output)
def split(self):
pass | 23.166667 | 49 | 0.568345 |
ab39ff63d5bbe250f778c31a4e286c2515c48934 | 1,683 | py | Python | spyne/test/interop/test_msgpackrpc_client_http.py | edustaff/spyne | 27f2061325d29a55803fb47b1b37978ab21ea240 | [
"BSD-3-Clause"
] | 786 | 2015-01-04T10:46:28.000Z | 2022-03-31T19:24:35.000Z | spyne/test/interop/test_msgpackrpc_client_http.py | edustaff/spyne | 27f2061325d29a55803fb47b1b37978ab21ea240 | [
"BSD-3-Clause"
] | 248 | 2015-01-01T21:52:47.000Z | 2022-03-09T08:55:04.000Z | spyne/test/interop/test_msgpackrpc_client_http.py | edustaff/spyne | 27f2061325d29a55803fb47b1b37978ab21ea240 | [
"BSD-3-Clause"
] | 210 | 2015-01-10T14:20:31.000Z | 2022-03-09T08:38:43.000Z | #!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import unittest
from spyne.client.http import HttpClient
from spyne.test.interop._test_soap_client_base import SpyneClientTestBase
from spyne.test.interop.server.msgpackrpc_http_basic import msgpackrpc_application, port
from spyne.util.etreeconv import root_dict_to_etree
class TestSpyneHttpClient(SpyneClientTestBase, unittest.TestCase):
def setUp(self):
SpyneClientTestBase.setUp(self, 'msgpack_rpc_http')
self.client = HttpClient('http://localhost:%d/' % port[0],
msgpackrpc_application)
self.ns = "spyne.test.interop.server"
@unittest.skip("MessagePackRpc does not support header")
def test_echo_in_header(self):
pass
@unittest.skip("MessagePackRpc does not support header")
def test_send_out_header(self):
pass
if __name__ == '__main__':
unittest.main()
| 36.586957 | 88 | 0.733214 |
59d20cc5b8d7daa3488a2f9023ebed0ef2a79c69 | 20,101 | py | Python | src/lib/opts.py | nrupatunga/CenterNet | 53806f68295062868953081063cd2f82e92f3d4a | [
"MIT"
] | null | null | null | src/lib/opts.py | nrupatunga/CenterNet | 53806f68295062868953081063cd2f82e92f3d4a | [
"MIT"
] | null | null | null | src/lib/opts.py | nrupatunga/CenterNet | 53806f68295062868953081063cd2f82e92f3d4a | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
class opts(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
# basic experiment setting
self.parser.add_argument('task', default='ctdet',
help='ctdet | ddd | multi_pose | exdet')
self.parser.add_argument('--dataset', default='coco',
help='coco | kitti | coco_hp | pascal')
self.parser.add_argument('--exp_id', default='default')
self.parser.add_argument('--test', action='store_true')
self.parser.add_argument('--debug', type=int, default=0,
help='level of visualization.'
'1: only show the final detection results'
'2: show the network output features'
'3: use matplot to display' # useful when lunching training with ipython notebook
'4: save all visualizations to disk')
self.parser.add_argument('--demo', default='',
help='path to image/ image folders/ video. '
'or "webcam"')
self.parser.add_argument('--load_model', default='',
help='path to pretrained model')
self.parser.add_argument('--resume', action='store_true',
help='resume an experiment. '
'Reloaded the optimizer parameter and '
'set load_model to model_last.pth '
'in the exp dir if load_model is empty.')
# system
self.parser.add_argument('--gpus', default='0',
help='-1 for CPU, use comma for multiple gpus')
self.parser.add_argument('--num_workers', type=int, default=4,
help='dataloader threads. 0 for single-thread.')
self.parser.add_argument('--not_cuda_benchmark', action='store_true',
help='disable when the input size is not fixed.')
self.parser.add_argument('--seed', type=int, default=317,
help='random seed') # from CornerNet
# log
self.parser.add_argument('--print_iter', type=int, default=0,
help='disable progress bar and print to screen.')
self.parser.add_argument('--hide_data_time', action='store_true',
help='not display time during training.')
self.parser.add_argument('--save_all', action='store_true',
help='save model to disk every 5 epochs.')
self.parser.add_argument('--metric', default='loss',
help='main metric to save best model')
self.parser.add_argument('--vis_thresh', type=float, default=0.3,
help='visualization threshold.')
self.parser.add_argument('--debugger_theme', default='white',
choices=['white', 'black'])
# model
self.parser.add_argument('--arch', default='dla_34',
help='model architecture. Currently tested'
'res_18 | res_101 | resdcn_18 | resdcn_101 |'
'dlav0_34 | dla_34 | hourglass')
self.parser.add_argument('--head_conv', type=int, default=-1,
help='conv layer channels for output head'
'0 for no conv layer'
'-1 for default setting: '
'64 for resnets and 256 for dla.')
self.parser.add_argument('--down_ratio', type=int, default=4,
help='output stride. Currently only supports 4.')
# input
self.parser.add_argument('--input_res', type=int, default=-1,
help='input height and width. -1 for default from '
'dataset. Will be overriden by input_h | input_w')
self.parser.add_argument('--input_h', type=int, default=-1,
help='input height. -1 for default from dataset.')
self.parser.add_argument('--input_w', type=int, default=-1,
help='input width. -1 for default from dataset.')
# train
self.parser.add_argument('--lr', type=float, default=1.25e-4,
help='learning rate for batch size 32.')
self.parser.add_argument('--lr_step', type=str, default='90,120',
help='drop learning rate by 10.')
self.parser.add_argument('--num_epochs', type=int, default=140,
help='total training epochs.')
self.parser.add_argument('--batch_size', type=int, default=32,
help='batch size')
self.parser.add_argument('--master_batch_size', type=int, default=-1,
help='batch size on the master gpu.')
self.parser.add_argument('--num_iters', type=int, default=-1,
help='default: #samples / batch_size.')
self.parser.add_argument('--val_intervals', type=int, default=5,
help='number of epochs to run validation.')
self.parser.add_argument('--trainval', action='store_true',
help='include validation in training and '
'test on test set')
# test
self.parser.add_argument('--flip_test', action='store_true',
help='flip data augmentation.')
self.parser.add_argument('--test_scales', type=str, default='1',
help='multi scale test augmentation.')
self.parser.add_argument('--nms', action='store_true',
help='run nms in testing.')
self.parser.add_argument('--K', type=int, default=100,
help='max number of output objects.')
self.parser.add_argument('--not_prefetch_test', action='store_true',
help='not use parallal data pre-processing.')
self.parser.add_argument('--fix_res', action='store_true',
help='fix testing resolution or keep '
'the original resolution')
self.parser.add_argument('--keep_res', action='store_true',
help='keep the original resolution'
' during validation.')
# dataset
self.parser.add_argument('--not_rand_crop', action='store_true',
help='not use the random crop data augmentation'
'from CornerNet.')
self.parser.add_argument('--shift', type=float, default=0.1,
help='when not using random crop'
'apply shift augmentation.')
self.parser.add_argument('--scale', type=float, default=0.4,
help='when not using random crop'
'apply scale augmentation.')
self.parser.add_argument('--rotate', type=float, default=0,
help='when not using random crop'
'apply rotation augmentation.')
self.parser.add_argument('--flip', type=float, default=0.5,
help='probability of applying flip augmentation.')
self.parser.add_argument('--no_color_aug', action='store_true',
help='not use the color augmenation '
'from CornerNet')
# multi_pose
self.parser.add_argument('--aug_rot', type=float, default=0,
help='probability of applying '
'rotation augmentation.')
# ddd
self.parser.add_argument('--aug_ddd', type=float, default=0.5,
help='probability of applying crop augmentation.')
self.parser.add_argument('--rect_mask', action='store_true',
help='for ignored object, apply mask on the '
'rectangular region or just center point.')
self.parser.add_argument('--kitti_split', default='3dop',
help='different validation split for kitti: '
'3dop | subcnn')
# loss
self.parser.add_argument('--mse_loss', action='store_true',
help='use mse loss or focal loss to train '
'keypoint heatmaps.')
# ctdet
self.parser.add_argument('--reg_loss', default='l1',
help='regression loss: sl1 | l1 | l2')
self.parser.add_argument('--hm_weight', type=float, default=1,
help='loss weight for keypoint heatmaps.')
self.parser.add_argument('--off_weight', type=float, default=1,
help='loss weight for keypoint local offsets.')
self.parser.add_argument('--wh_weight', type=float, default=0.1,
help='loss weight for bounding box size.')
# multi_pose
self.parser.add_argument('--hp_weight', type=float, default=1,
help='loss weight for human pose offset.')
self.parser.add_argument('--hm_hp_weight', type=float, default=1,
help='loss weight for human keypoint heatmap.')
# ddd
self.parser.add_argument('--dep_weight', type=float, default=1,
help='loss weight for depth.')
self.parser.add_argument('--dim_weight', type=float, default=1,
help='loss weight for 3d bounding box size.')
self.parser.add_argument('--rot_weight', type=float, default=1,
help='loss weight for orientation.')
self.parser.add_argument('--peak_thresh', type=float, default=0.2)
# task
# ctdet
self.parser.add_argument('--norm_wh', action='store_true',
help='L1(\hat(y) / y, 1) or L1(\hat(y), y)')
self.parser.add_argument('--dense_wh', action='store_true',
help='apply weighted regression near center or '
'just apply regression on center point.')
self.parser.add_argument('--cat_spec_wh', action='store_true',
help='category specific bounding box size.')
self.parser.add_argument('--not_reg_offset', action='store_true',
help='not regress local offset.')
# exdet
self.parser.add_argument('--agnostic_ex', action='store_true',
help='use category agnostic extreme points.')
self.parser.add_argument('--scores_thresh', type=float, default=0.1,
help='threshold for extreme point heatmap.')
self.parser.add_argument('--center_thresh', type=float, default=0.1,
help='threshold for centermap.')
self.parser.add_argument('--aggr_weight', type=float, default=0.0,
help='edge aggregation weight.')
# multi_pose
self.parser.add_argument('--dense_hp', action='store_true',
help='apply weighted pose regression near center '
'or just apply regression on center point.')
self.parser.add_argument('--not_hm_hp', action='store_true',
help='not estimate human joint heatmap, '
'directly use the joint offset from center.')
self.parser.add_argument('--not_reg_hp_offset', action='store_true',
help='not regress local offset for '
'human joint heatmaps.')
self.parser.add_argument('--not_reg_bbox', action='store_true',
help='not regression bounding box size.')
# ground truth validation
self.parser.add_argument('--eval_oracle_hm', action='store_true',
help='use ground center heatmap.')
self.parser.add_argument('--eval_oracle_wh', action='store_true',
help='use ground truth bounding box size.')
self.parser.add_argument('--eval_oracle_offset', action='store_true',
help='use ground truth local heatmap offset.')
self.parser.add_argument('--eval_oracle_kps', action='store_true',
help='use ground truth human pose offset.')
self.parser.add_argument('--eval_oracle_hmhp', action='store_true',
help='use ground truth human joint heatmaps.')
self.parser.add_argument('--eval_oracle_hp_offset', action='store_true',
help='use ground truth human joint local offset.')
self.parser.add_argument('--eval_oracle_dep', action='store_true',
help='use ground truth depth.')
def parse(self, args=''):
if args == '':
opt = self.parser.parse_args()
else:
opt = self.parser.parse_args(args)
opt.gpus_str = opt.gpus
opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]
opt.gpus = [i for i in range(
len(opt.gpus))] if opt.gpus[0] >= 0 else [-1]
opt.lr_step = [int(i) for i in opt.lr_step.split(',')]
opt.test_scales = [float(i) for i in opt.test_scales.split(',')]
opt.fix_res = not opt.keep_res
print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.')
opt.reg_offset = not opt.not_reg_offset
opt.reg_bbox = not opt.not_reg_bbox
opt.hm_hp = not opt.not_hm_hp
opt.reg_hp_offset = (not opt.not_reg_hp_offset) and opt.hm_hp
if opt.head_conv == -1: # init default head_conv
opt.head_conv = 256 if 'dla' in opt.arch else 64
opt.pad = 127 if 'hourglass' in opt.arch else 31
opt.num_stacks = 2 if opt.arch == 'hourglass' else 1
if opt.trainval:
opt.val_intervals = 100000000
if opt.debug > 0:
opt.num_workers = 0
opt.batch_size = 1
opt.gpus = [opt.gpus[0]]
opt.master_batch_size = -1
if opt.master_batch_size == -1:
opt.master_batch_size = opt.batch_size // len(opt.gpus)
rest_batch_size = (opt.batch_size - opt.master_batch_size)
opt.chunk_sizes = [opt.master_batch_size]
for i in range(len(opt.gpus) - 1):
slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
if i < rest_batch_size % (len(opt.gpus) - 1):
slave_chunk_size += 1
opt.chunk_sizes.append(slave_chunk_size)
print('training chunk_sizes:', opt.chunk_sizes)
opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
opt.data_dir = os.path.join(opt.root_dir, 'data')
opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task)
opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)
opt.debug_dir = os.path.join(opt.save_dir, 'debug')
print('The output will be saved to ', opt.save_dir)
if opt.resume and opt.load_model == '':
model_path = opt.save_dir[:-4] if opt.save_dir.endswith('TEST') \
else opt.save_dir
opt.load_model = os.path.join(model_path, 'model_last.pth')
return opt
def update_dataset_info_and_set_heads(self, opt, dataset):
input_h, input_w = dataset.default_resolution
opt.mean, opt.std = dataset.mean, dataset.std
opt.num_classes = dataset.num_classes
# input_h(w): opt.input_h overrides opt.input_res overrides dataset default
input_h = opt.input_res if opt.input_res > 0 else input_h
input_w = opt.input_res if opt.input_res > 0 else input_w
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
opt.output_h = opt.input_h // opt.down_ratio
opt.output_w = opt.input_w // opt.down_ratio
opt.input_res = max(opt.input_h, opt.input_w)
opt.output_res = max(opt.output_h, opt.output_w)
if opt.task == 'exdet':
# assert opt.dataset in ['coco']
num_hm = 1 if opt.agnostic_ex else opt.num_classes
opt.heads = {'hm_t': num_hm, 'hm_l': num_hm,
'hm_b': num_hm, 'hm_r': num_hm,
'hm_c': opt.num_classes}
if opt.reg_offset:
opt.heads.update(
{'reg_t': 2, 'reg_l': 2, 'reg_b': 2, 'reg_r': 2})
elif opt.task == 'ddd':
# assert opt.dataset in ['gta', 'kitti', 'viper']
opt.heads = {'hm': opt.num_classes, 'dep': 1, 'rot': 8, 'dim': 3}
if opt.reg_bbox:
opt.heads.update(
{'wh': 2})
if opt.reg_offset:
opt.heads.update({'reg': 2})
elif opt.task == 'ctdet':
# assert opt.dataset in ['pascal', 'coco']
opt.heads = {'hm': opt.num_classes,
'wh': 2 if not opt.cat_spec_wh else 2 * opt.num_classes}
if opt.reg_offset:
opt.heads.update({'reg': 2})
elif opt.task == 'multi_pose':
# assert opt.dataset in ['coco_hp']
opt.flip_idx = dataset.flip_idx
opt.heads = {'hm': opt.num_classes, 'wh': 2, 'hps': 34}
if opt.reg_offset:
opt.heads.update({'reg': 2})
if opt.hm_hp:
opt.heads.update({'hm_hp': 17})
if opt.reg_hp_offset:
opt.heads.update({'hp_offset': 2})
else:
assert 0, 'task not defined!'
print('heads', opt.heads)
return opt
def init(self, args=''):
default_dataset_info = {
'ctdet': {'default_resolution': [512, 512], 'num_classes': 80,
'mean': [0.408, 0.447, 0.470], 'std': [0.289, 0.274, 0.278],
'dataset': 'coco'},
'exdet': {'default_resolution': [512, 512], 'num_classes': 80,
'mean': [0.408, 0.447, 0.470], 'std': [0.289, 0.274, 0.278],
'dataset': 'coco'},
'multi_pose': {
'default_resolution': [512, 512], 'num_classes': 1,
'mean': [0.408, 0.447, 0.470], 'std': [0.289, 0.274, 0.278],
'dataset': 'coco_hp', 'num_joints': 17,
'flip_idx': [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]},
'ddd': {'default_resolution': [384, 1280], 'num_classes': 3,
'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],
'dataset': 'kitti'},
}
class Struct:
def __init__(self, entries):
for k, v in entries.items():
self.__setattr__(k, v)
opt = self.parse(args)
dataset = Struct(default_dataset_info[opt.task])
opt.dataset = dataset.dataset
opt = self.update_dataset_info_and_set_heads(opt, dataset)
return opt
| 55.071233 | 120 | 0.518631 |
6cf6bd8bcffd0df7b1b160f84783f1c11483d17b | 5,446 | py | Python | mezzanine/conf/forms.py | duboviy/mezzanine | cf6c79cd8f1c817416cc210c22232fa6ecc7dd82 | [
"BSD-2-Clause"
] | 3 | 2019-05-14T13:43:26.000Z | 2021-11-09T11:27:16.000Z | mezzanine/conf/forms.py | duboviy/mezzanine | cf6c79cd8f1c817416cc210c22232fa6ecc7dd82 | [
"BSD-2-Clause"
] | 9 | 2020-03-24T16:20:31.000Z | 2022-03-11T23:32:38.000Z | mezzanine/conf/forms.py | duboviy/mezzanine | cf6c79cd8f1c817416cc210c22232fa6ecc7dd82 | [
"BSD-2-Clause"
] | 2 | 2019-11-12T14:09:39.000Z | 2019-11-12T14:11:16.000Z | from __future__ import unicode_literals
from future.builtins import int
from collections import defaultdict
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import activate, get_language, ugettext_lazy as _
from django.template.defaultfilters import urlize
from mezzanine.conf import settings, registry
from mezzanine.conf.models import Setting
if settings.USE_MODELTRANSLATION:
from collections import OrderedDict
from modeltranslation.utils import build_localized_fieldname
FIELD_TYPES = {
bool: forms.BooleanField,
int: forms.IntegerField,
float: forms.FloatField,
}
class SettingsForm(forms.Form):
"""
Form for settings - creates a field for each setting in
``mezzanine.conf`` that is marked as editable.
"""
def __init__(self, *args, **kwargs):
super(SettingsForm, self).__init__(*args, **kwargs)
# Create a form field for each editable setting's from its type.
active_language = get_language()
for name in sorted(registry.keys()):
setting = registry[name]
if setting["editable"]:
field_class = FIELD_TYPES.get(setting["type"], forms.CharField)
if settings.USE_MODELTRANSLATION and setting["translatable"]:
for code in OrderedDict(settings.LANGUAGES):
try:
activate(code)
except:
pass
else:
self._init_field(setting, field_class, name, code,
active_language,)
else:
self._init_field(setting, field_class, name)
activate(active_language)
def _init_field(self, setting, field_class, name, code=None,
active_language=None,):
"""
Initialize a field whether it is built with a custom name for a
specific translation language or not.
"""
initial = getattr(settings, name)
if (code and code != active_language and
settings.USE_MODELTRANSLATION and setting["translatable"]):
try:
initial = Setting.objects.get(name=name).value
except Setting.DoesNotExist:
pass
kwargs = {
"label": setting["label"] + ":",
"required": setting["type"] in (int, float),
"initial": initial,
"help_text": self.format_help(setting["description"]),
}
if setting["choices"]:
field_class = forms.ChoiceField
kwargs["choices"] = setting["choices"]
field_instance = field_class(**kwargs)
code_name = ('_modeltranslation_' + code if code else '')
self.fields[name + code_name] = field_instance
css_class = field_class.__name__.lower()
field_instance.widget.attrs["class"] = css_class
if code:
field_instance.widget.attrs["class"] += " modeltranslation"
def __iter__(self):
"""
Calculate and apply a group heading to each field and order by
the heading.
"""
fields = list(super(SettingsForm, self).__iter__())
group = lambda field: field.name.split("_", 1)[0].title()
misc = _("Miscellaneous")
groups = defaultdict(int)
for field in fields:
groups[group(field)] += 1
for (i, field) in enumerate(fields):
setattr(fields[i], "group", group(field))
if groups[fields[i].group] == 1:
fields[i].group = misc
return iter(sorted(fields, key=lambda x: (x.group == misc, x.group)))
def save(self):
"""
Save each of the settings to the DB.
"""
active_language = get_language()
for (name, value) in self.cleaned_data.items():
if name not in registry:
name, code = name.rsplit('_modeltranslation_', 1)
else:
code = None
setting_obj, created = Setting.objects.get_or_create(name=name)
if settings.USE_MODELTRANSLATION:
if registry[name]["translatable"]:
try:
activate(code)
except:
pass
finally:
setting_obj.value = value
activate(active_language)
else:
# Duplicate the value of the setting for every language
for code in OrderedDict(settings.LANGUAGES):
setattr(setting_obj,
build_localized_fieldname('value', code),
value)
else:
setting_obj.value = value
setting_obj.save()
def format_help(self, description):
"""
Format the setting's description into HTML.
"""
for bold in ("``", "*"):
parts = []
if description is None:
description = ""
for i, s in enumerate(description.split(bold)):
parts.append(s if i % 2 == 0 else "<b>%s</b>" % s)
description = "".join(parts)
description = urlize(description, autoescape=False)
return mark_safe(description.replace("\n", "<br>"))
| 37.558621 | 79 | 0.557657 |
0605a016075ad9e31c2542854fe316da9272399c | 709 | py | Python | SearchFunctions/virustotal.py | semustafacevik/SearchDomainDH | dc2409c42f6749b5e7f2373c60c21d463531a507 | [
"bzip2-1.0.6"
] | null | null | null | SearchFunctions/virustotal.py | semustafacevik/SearchDomainDH | dc2409c42f6749b5e7f2373c60c21d463531a507 | [
"bzip2-1.0.6"
] | null | null | null | SearchFunctions/virustotal.py | semustafacevik/SearchDomainDH | dc2409c42f6749b5e7f2373c60c21d463531a507 | [
"bzip2-1.0.6"
] | null | null | null | from Extensions.functions import *
import requests
class VirusTotalSearch:
def __init__(self, word):
self.word = word
self.results = ''
self.total_results = ''
def do_search_virustotal(self):
print('\nSearching VirusTotal...')
base_url = f'https://www.virustotal.com/ui/domains/{self.word}/subdomains?relationships=resolutions&cursor=STMwCi4%3D&limit=40'
headers = {'User-Agent': get_user_agent()}
response = requests.get(base_url, headers=headers)
self.results = response.content.decode('UTF-8')
self.total_results += self.results
result['result_virustotal'] = self.total_results
print('OK - VirusTotal!') | 30.826087 | 135 | 0.662906 |
79dac6716cb26b7e840ea05ce70b8e46d804effc | 1,370 | py | Python | python/paddle/distributed/fleet/base/runtime_factory.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/distributed/fleet/base/runtime_factory.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/distributed/fleet/base/runtime_factory.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-09-24T11:23:36.000Z | 2021-09-24T11:23:36.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..runtime.collective_runtime import CollectiveRuntime
from ..runtime.parameter_server_runtime import ParameterServerRuntime
from ...ps.the_one_ps import TheOnePSRuntime
__all__ = []
class RuntimeFactory(object):
def __init__(self):
pass
def _create_runtime(self, context):
if context["role_maker"]._is_collective:
collective_runtime = CollectiveRuntime()
collective_runtime._set_basic_info(context)
return collective_runtime
k_steps = context["valid_strategy"].a_sync_configs["k_steps"]
if not context["role_maker"]._is_collective and k_steps >= 0:
ps_runtime = TheOnePSRuntime()
ps_runtime._set_basic_info(context)
return ps_runtime
| 36.052632 | 74 | 0.729197 |
32eb7d910fa0fd865aaca51af7eaf41889871f79 | 2,256 | py | Python | visualization.py | sonomarina/Stock-sentiment-analysis | 2b3ddd88c73916ea06d39ef5d455f9220c69fdf7 | [
"Unlicense"
] | null | null | null | visualization.py | sonomarina/Stock-sentiment-analysis | 2b3ddd88c73916ea06d39ef5d455f9220c69fdf7 | [
"Unlicense"
] | null | null | null | visualization.py | sonomarina/Stock-sentiment-analysis | 2b3ddd88c73916ea06d39ef5d455f9220c69fdf7 | [
"Unlicense"
] | null | null | null | # =============================================================================
# Import OHLCV data and perform basic visualizations
# Author : Mayank Rasu
# Please report bug/issues in the Q&A section
# =============================================================================
# Import necesary libraries
import pandas as pd
import pandas_datareader.data as pdr
import datetime
import matplotlib.pyplot as plt
# Download historical data for required stocks
tickers = ["MSFT","AMZN","AAPL","CSCO","IBM","FB"]
close_prices = pd.DataFrame() # dataframe to store close price of each ticker
attempt = 0 # initializing passthrough variable
drop = [] # initializing list to store tickers whose close price was successfully extracted
while len(tickers) != 0 and attempt <= 5:
tickers = [j for j in tickers if j not in drop] # removing stocks whose data has been extracted from the ticker list
for i in range(len(tickers)):
try:
temp = pdr.get_data_yahoo(tickers[i],datetime.date.today()-datetime.timedelta(3650),datetime.date.today())
temp.dropna(inplace = True)
close_prices[tickers[i]] = temp["Adj Close"]
drop.append(tickers[i])
except:
print(tickers[i]," :failed to fetch data...retrying")
continue
attempt+=1
# Handling NaN Values
close_prices.fillna(method='bfill',axis=0,inplace=True) # Replaces NaN values with the next valid value along the column
daily_return = close_prices.pct_change() # Creates dataframe with daily return for each stock
# Data vizualization
close_prices.plot() # Plot of all the stocks superimposed on the same chart
cp_standardized = (close_prices - close_prices.mean())/close_prices.std() # Standardization
cp_standardized.plot() # Plot of all the stocks standardized and superimposed on the same chart
close_prices.plot(subplots=True, layout = (3,2), title = "Tech Stock Price Evolution", grid =True) # Subplots of the stocks
# Pyplot demo
fig, ax = plt.subplots()
plt.style.available
plt.style.use('ggplot')
ax.set(title="Daily return on tech stocks", xlabel="Tech Stocks", ylabel = "Daily Returns")
plt.bar(daily_return.columns,daily_return.mean())
| 42.566038 | 124 | 0.656028 |
a829c687f1208a0f6888e659c43a1c70eb8af998 | 2,044 | py | Python | html_parsing/get_video_list__xcadr.com.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | html_parsing/get_video_list__xcadr.com.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | html_parsing/get_video_list__xcadr.com.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def get_video_list(url):
import requests
rs = requests.get(url)
from bs4 import BeautifulSoup
root = BeautifulSoup(rs.content, 'lxml')
import re
pattern_get_duration = re.compile('(\d+).:(\d+).')
items = []
for item in root.select('#playlist_view_playlist_view_items > .item'):
url = item.a['href']
title = item.a['title']
url_thumb = item.select_one('img')['src']
# Example: "60%", "80%"
rating = item.select_one('.rating').text
rating = int(rating.replace('%', ''))
# Example: "0м:31с", "6м:32с"
duration = item.select_one('.duration').text
minutes, seconds = map(int, pattern_get_duration.findall(duration)[0])
duration = minutes * 60 + seconds
items.append({
'title': title,
'url': url,
'duration': duration,
'rating': rating,
'url_thumb': url_thumb
})
return items
if __name__ == '__main__':
items = get_video_list('http://xcadr.com/collection/seks-v-poze-naezdnicy/')
print('Total:', len(items))
items = get_video_list('http://xcadr.com/collection/sceny-bdsm-v-filmah/')
print('Total:', len(items))
print()
items = get_video_list('http://xcadr.com/collection/luchshie-sceny-v-bane/')
print('Total:', len(items))
def print_items(items):
for i, item in enumerate(items, 1):
print('{0:2}. "{title}" ({duration} secs, rating: {rating}): {url} [{url_thumb}]'.format(i, **item))
print('Sorted by duration (top 5):')
new_items = sorted(items, key=lambda x: x['duration'], reverse=True)[:5]
print_items(new_items)
print()
print('Sorted by duration:')
new_items = sorted(items, key=lambda x: x['duration'], reverse=True)
print_items(new_items)
print()
print('Sorted by rating:')
new_items = sorted(items, key=lambda x: x['rating'], reverse=True)
print_items(new_items)
| 28.388889 | 112 | 0.598337 |
3e1795a2e00c7d3c1ac50842486f291421933de3 | 179 | py | Python | scatter_plot.py | AbhigyanRanjan0505/vwh4qtc893uo | fcc0ff2d5e23ac2322e4c5afe7b6ed8c54c62a84 | [
"MIT"
] | null | null | null | scatter_plot.py | AbhigyanRanjan0505/vwh4qtc893uo | fcc0ff2d5e23ac2322e4c5afe7b6ed8c54c62a84 | [
"MIT"
] | null | null | null | scatter_plot.py | AbhigyanRanjan0505/vwh4qtc893uo | fcc0ff2d5e23ac2322e4c5afe7b6ed8c54c62a84 | [
"MIT"
] | null | null | null | import pandas as pd
import plotly.express as px
df = pd.read_csv("covid.csv")
fig = px.scatter(df, x="Date", y="Cases", color="Country",
size_max=60)
fig.show()
| 22.375 | 58 | 0.631285 |
58236057f47d5ddb7aa7f77d27b96998051159c1 | 9,423 | py | Python | src/lib/past/builtins/noniterators.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 17 | 2017-08-04T15:41:05.000Z | 2020-10-16T18:02:41.000Z | src/lib/past/builtins/noniterators.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 3 | 2017-08-04T23:37:37.000Z | 2017-08-04T23:38:34.000Z | src/lib/past/builtins/noniterators.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 3 | 2017-12-07T16:30:59.000Z | 2019-06-16T02:48:28.000Z | """
This module is designed to be used as follows::
from past.builtins.noniterators import filter, map, range, reduce, zip
And then, for example::
assert isinstance(range(5), list)
The list-producing functions this brings in are::
- ``filter``
- ``map``
- ``range``
- ``reduce``
- ``zip``
"""
from __future__ import division, absolute_import, print_function
from itertools import chain, starmap
import itertools # since zip_longest doesn't exist on Py2
from past.types import basestring
from past.utils import PY3
def flatmap(f, items):
return chain.from_iterable(map(f, items))
if PY3:
import builtins
# list-producing versions of the major Python iterating functions
def oldfilter(*args):
"""
filter(function or None, sequence) -> list, tuple, or string
Return those items of sequence for which function(item) is true.
If function is None, return the items that are true. If sequence
is a tuple or string, return the same type, else return a list.
"""
mytype = type(args[1])
if isinstance(args[1], basestring):
return mytype().join(builtins.filter(*args))
elif isinstance(args[1], (tuple, list)):
return mytype(builtins.filter(*args))
else:
# Fall back to list. Is this the right thing to do?
return list(builtins.filter(*args))
# This is surprisingly difficult to get right. For example, the
# solutions here fail with the test cases in the docstring below:
# http://stackoverflow.com/questions/8072755/
def oldmap(func, *iterables):
"""
map(function, sequence[, sequence, ...]) -> list
Return a list of the results of applying the function to the
items of the argument sequence(s). If more than one sequence is
given, the function is called with an argument list consisting of
the corresponding item of each sequence, substituting None for
missing values when not all sequences have the same length. If
the function is None, return a list of the items of the sequence
(or a list of tuples if more than one sequence).
Test cases:
>>> oldmap(None, 'hello world')
['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd']
>>> oldmap(None, range(4))
[0, 1, 2, 3]
More test cases are in past.tests.test_builtins.
"""
zipped = itertools.zip_longest(*iterables)
l = list(zipped)
if len(l) == 0:
return []
if func is None:
result = l
else:
result = list(starmap(func, l))
# Inspect to see whether it's a simple sequence of tuples
try:
if max([len(item) for item in result]) == 1:
return list(chain.from_iterable(result))
# return list(flatmap(func, result))
except TypeError as e:
# Simple objects like ints have no len()
pass
return result
############################
### For reference, the source code for Py2.7 map function:
# static PyObject *
# builtin_map(PyObject *self, PyObject *args)
# {
# typedef struct {
# PyObject *it; /* the iterator object */
# int saw_StopIteration; /* bool: did the iterator end? */
# } sequence;
#
# PyObject *func, *result;
# sequence *seqs = NULL, *sqp;
# Py_ssize_t n, len;
# register int i, j;
#
# n = PyTuple_Size(args);
# if (n < 2) {
# PyErr_SetString(PyExc_TypeError,
# "map() requires at least two args");
# return NULL;
# }
#
# func = PyTuple_GetItem(args, 0);
# n--;
#
# if (func == Py_None) {
# if (PyErr_WarnPy3k("map(None, ...) not supported in 3.x; "
# "use list(...)", 1) < 0)
# return NULL;
# if (n == 1) {
# /* map(None, S) is the same as list(S). */
# return PySequence_List(PyTuple_GetItem(args, 1));
# }
# }
#
# /* Get space for sequence descriptors. Must NULL out the iterator
# * pointers so that jumping to Fail_2 later doesn't see trash.
# */
# if ((seqs = PyMem_NEW(sequence, n)) == NULL) {
# PyErr_NoMemory();
# return NULL;
# }
# for (i = 0; i < n; ++i) {
# seqs[i].it = (PyObject*)NULL;
# seqs[i].saw_StopIteration = 0;
# }
#
# /* Do a first pass to obtain iterators for the arguments, and set len
# * to the largest of their lengths.
# */
# len = 0;
# for (i = 0, sqp = seqs; i < n; ++i, ++sqp) {
# PyObject *curseq;
# Py_ssize_t curlen;
#
# /* Get iterator. */
# curseq = PyTuple_GetItem(args, i+1);
# sqp->it = PyObject_GetIter(curseq);
# if (sqp->it == NULL) {
# static char errmsg[] =
# "argument %d to map() must support iteration";
# char errbuf[sizeof(errmsg) + 25];
# PyOS_snprintf(errbuf, sizeof(errbuf), errmsg, i+2);
# PyErr_SetString(PyExc_TypeError, errbuf);
# goto Fail_2;
# }
#
# /* Update len. */
# curlen = _PyObject_LengthHint(curseq, 8);
# if (curlen > len)
# len = curlen;
# }
#
# /* Get space for the result list. */
# if ((result = (PyObject *) PyList_New(len)) == NULL)
# goto Fail_2;
#
# /* Iterate over the sequences until all have stopped. */
# for (i = 0; ; ++i) {
# PyObject *alist, *item=NULL, *value;
# int numactive = 0;
#
# if (func == Py_None && n == 1)
# alist = NULL;
# else if ((alist = PyTuple_New(n)) == NULL)
# goto Fail_1;
#
# for (j = 0, sqp = seqs; j < n; ++j, ++sqp) {
# if (sqp->saw_StopIteration) {
# Py_INCREF(Py_None);
# item = Py_None;
# }
# else {
# item = PyIter_Next(sqp->it);
# if (item)
# ++numactive;
# else {
# if (PyErr_Occurred()) {
# Py_XDECREF(alist);
# goto Fail_1;
# }
# Py_INCREF(Py_None);
# item = Py_None;
# sqp->saw_StopIteration = 1;
# }
# }
# if (alist)
# PyTuple_SET_ITEM(alist, j, item);
# else
# break;
# }
#
# if (!alist)
# alist = item;
#
# if (numactive == 0) {
# Py_DECREF(alist);
# break;
# }
#
# if (func == Py_None)
# value = alist;
# else {
# value = PyEval_CallObject(func, alist);
# Py_DECREF(alist);
# if (value == NULL)
# goto Fail_1;
# }
# if (i >= len) {
# int status = PyList_Append(result, value);
# Py_DECREF(value);
# if (status < 0)
# goto Fail_1;
# }
# else if (PyList_SetItem(result, i, value) < 0)
# goto Fail_1;
# }
#
# if (i < len && PyList_SetSlice(result, i, len, NULL) < 0)
# goto Fail_1;
#
# goto Succeed;
#
# Fail_1:
# Py_DECREF(result);
# Fail_2:
# result = NULL;
# Succeed:
# assert(seqs);
# for (i = 0; i < n; ++i)
# Py_XDECREF(seqs[i].it);
# PyMem_DEL(seqs);
# return result;
# }
def oldrange(*args, **kwargs):
return list(builtins.range(*args, **kwargs))
def oldzip(*args, **kwargs):
return list(builtins.zip(*args, **kwargs))
filter = oldfilter
map = oldmap
range = oldrange
from functools import reduce
zip = oldzip
__all__ = ['filter', 'map', 'range', 'reduce', 'zip']
else:
import __builtin__
# Python 2-builtin ranges produce lists
filter = __builtin__.filter
map = __builtin__.map
range = __builtin__.range
reduce = __builtin__.reduce
zip = __builtin__.zip
__all__ = []
| 33.653571 | 83 | 0.448583 |
f4bc9f37ecfb32b5ab94788f97e628c120dcf0f3 | 3,022 | py | Python | test/model/seq2seq/test_forking_sequence_splitter.py | ykaitao/gluon-ts | 9622550974e9e0819e25438fc45353f8a6474b55 | [
"Apache-2.0"
] | 1 | 2020-01-19T13:27:51.000Z | 2020-01-19T13:27:51.000Z | test/model/seq2seq/test_forking_sequence_splitter.py | ykaitao/gluon-ts | 9622550974e9e0819e25438fc45353f8a6474b55 | [
"Apache-2.0"
] | null | null | null | test/model/seq2seq/test_forking_sequence_splitter.py | ykaitao/gluon-ts | 9622550974e9e0819e25438fc45353f8a6474b55 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
import numpy as np
# First-party imports
from gluonts import transform
from gluonts.dataset.common import ListDataset
from gluonts.dataset.field_names import FieldName
from gluonts.model.seq2seq._transform import ForkingSequenceSplitter
# if we import TestSplitSampler as Test... pytest thinks it's a test
from gluonts.transform import TestSplitSampler as TSplitSampler
def test_forking_sequence_splitter() -> None:
def make_dataset(N, train_length):
# generates 2 ** N - 1 timeseries with constant increasing values
n = 2 ** N - 1
targets = np.arange(n * train_length).reshape((n, train_length))
return ListDataset(
[
{"start": "2012-01-01", "target": targets[i, :]}
for i in range(n)
],
freq="D",
)
ds = make_dataset(1, 20)
trans = transform.Chain(
trans=[
transform.AddAgeFeature(
target_field=FieldName.TARGET,
output_field="age",
pred_length=10,
),
ForkingSequenceSplitter(
train_sampler=TSplitSampler(),
time_series_fields=["age"],
enc_len=5,
dec_len=3,
),
]
)
out = trans(iter(ds), is_train=True)
transformed_data = next(iter(out))
future_target = np.array(
[
[13.0, 14.0, 15.0],
[14.0, 15.0, 16.0],
[15.0, 16.0, 17.0],
[16.0, 17.0, 18.0],
[17.0, 18.0, 19.0],
]
)
assert (
np.linalg.norm(future_target - transformed_data["future_target"])
< 1e-5
), "the forking sequence target should be computed correctly."
trans_oob = transform.Chain(
trans=[
transform.AddAgeFeature(
target_field=FieldName.TARGET,
output_field="age",
pred_length=10,
),
ForkingSequenceSplitter(
train_sampler=TSplitSampler(),
time_series_fields=["age"],
enc_len=20,
dec_len=20,
),
]
)
transformed_data_oob = next(iter(trans_oob(iter(ds), is_train=True)))
assert (
np.sum(transformed_data_oob["future_target"]) - np.sum(np.arange(20))
< 1e-5
), "the forking sequence target should be computed correctly."
| 30.22 | 77 | 0.590999 |
8c848a163ebf0546511813f26bbd32aabe642a8e | 4,052 | py | Python | 2021/src/Pre-processing/Isotropic/Noise/src/modules/CheckSize.py | tansyab1/PhD-project | 15f170c1976e58697454cd992687d808d1b2284a | [
"MIT"
] | null | null | null | 2021/src/Pre-processing/Isotropic/Noise/src/modules/CheckSize.py | tansyab1/PhD-project | 15f170c1976e58697454cd992687d808d1b2284a | [
"MIT"
] | null | null | null | 2021/src/Pre-processing/Isotropic/Noise/src/modules/CheckSize.py | tansyab1/PhD-project | 15f170c1976e58697454cd992687d808d1b2284a | [
"MIT"
] | null | null | null | # import numpy as np
import cv2
# import math
import matplotlib.pyplot as plt
# import os
# import pandas as pd
import glob
import seaborn as sns
from tqdm import tqdm
import matplotlib as mpl
# import warnings; warnings.filterwarnings(action='once')
large = 22
med = 16
small = 12
params = {'legend.fontsize': med,
'figure.figsize': (16, 10),
'axes.labelsize': med,
'axes.titlesize': med,
'xtick.labelsize': med,
'ytick.labelsize': med,
'figure.titlesize': large}
plt.rcParams.update(params)
plt.style.use('seaborn-whitegrid')
sns.set_style("white")
# %matplotlib inline
# Version
print(mpl.__version__) # > 3.0.0
print(sns.__version__) # > 0.9.0
def readImagefromFolder(folder="/home/nguyentansy/PhD-work/PhD-project/2021/src/Pre-processing/Isotropic/data/labeled-images/"):
heights = []
widths = []
widths_label = []
heights_label = []
for filename in tqdm(glob.glob("%s/*/pathological-findings/*/*" % folder)):
img = cv2.imread(filename, 0)
height = img.shape[0]
width = img.shape[1]
heights.append(height)
widths.append(width)
for filename in tqdm(glob.glob("%s/*/*/*/*" % folder)):
img = cv2.imread(filename, 0)
height = img.shape[0]
width = img.shape[1]
heights_label.append(height)
widths_label.append(width)
return heights, widths, heights_label, widths_label
def plotHistogram(heights, widths, heights_label, widths_label):
"""
Plot the histogram of the array.
Parameters
----------
arr : ndarray
Array to plot the histogram.
"""
# Create Fig and gridspec
fig = plt.figure(figsize=(16, 10), dpi=300)
grid = plt.GridSpec(4, 4, hspace=0.5, wspace=0.2)
# Define the axes
ax_main = fig.add_subplot(grid[:-1, :-1])
ax_right = fig.add_subplot(grid[:-1, -1], xticklabels=[], yticklabels=[])
ax_bottom = fig.add_subplot(grid[-1, 0:-1], xticklabels=[], yticklabels=[])
ax_main.scatter(widths_label, heights_label, s=40, c='red', marker=".", label="labeled images",
alpha=0.5, edgecolors='gray', linewidths=.5)
# Scatterplot on main ax
ax_main.scatter(widths, heights, s=40, c='blue', marker=".", label="pathological findings",
alpha=0.5, edgecolors='gray', linewidths=.5)
# histogram on the right
ax_bottom.hist(widths_label, 40, histtype='stepfilled',
orientation='vertical', color='blue')
ax_bottom.invert_yaxis()
# histogram in the bottom
ax_right.hist(heights_label, 40, histtype='stepfilled',
orientation='horizontal', color='green')
# Decorations
ax_main.set(title='Scatterplot with Histograms \n width vs height',
xlabel='width', ylabel='height')
ax_main.title.set_fontsize(10)
for item in ([ax_main.xaxis.label, ax_main.yaxis.label] + ax_main.get_xticklabels() + ax_main.get_yticklabels()):
item.set_fontsize(10)
# xstick = np.arange(0, 2000, 100)
# ystick = np.arange(0, 1200, 100)
# ax_main.set_xticks(xstick)
# ax_main.set_yticks(ystick)
# xlabels = ax_main.get_xticks().tolist()
# ylabels = ax_main.get_yticks().tolist()
# ax_main.set_xticklabels(xlabels, rotation=45)
# ax_main.set_yticklabels(ylabels)
# plt.show()
# # Draw Plot
# plt.figure(figsize=(13, 10), dpi=80)
# sns.histplot(arr, color="g",
# label="noise standard deviation")
# plt.ylim(0, 0.35)
# plt.xticks(np.arange(0, 1.5, 0.05), rotation=45)
# Decoration
plt.title('size analysis', fontsize=12)
ax_main.legend()
filesave = "/home/nguyentansy/PhD-work/PhD-project/2021/src/Pre-processing/Isotropic/Noise/src/denoising_rgb/results/sizehist.png"
plt.savefig(filesave)
if __name__ == "__main__":
height, width, height_label, width_label = readImagefromFolder()
plotHistogram(heights=height, widths=width,
heights_label=height_label, widths_label=width_label)
| 32.677419 | 134 | 0.644373 |
97aab32fbc538ca0395639ff1f27b2a77b7f0e7d | 162,950 | py | Python | resqpy/well/_blocked_well.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | null | null | null | resqpy/well/_blocked_well.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | null | null | null | resqpy/well/_blocked_well.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | null | null | null | """_blocked_well.py: resqpy well module providing blocked well class"""
# Nexus is a registered trademark of the Halliburton Company
# RMS and ROXAR are registered trademarks of Roxar Software Solutions AS, an Emerson company
import logging
log = logging.getLogger(__name__)
import math as maths
import numpy as np
import pandas as pd
from functools import partial
import resqpy.crs as crs
import resqpy.olio.keyword_files as kf
import resqpy.olio.uuid as bu
import resqpy.olio.vector_utilities as vec
import resqpy.olio.wellspec_keywords as wsk
import resqpy.olio.write_hdf5 as rwh5
import resqpy.olio.xml_et as rqet
import resqpy.organize as rqo
import resqpy.property as rqp
import resqpy.weights_and_measures as bwam
from resqpy.olio.base import BaseResqpy
from resqpy.olio.xml_namespaces import curly_namespace as ns
from .well_utils import _pl, find_entry_and_exit, load_hdf5_array, _derive_from_wellspec_check_grid_name, \
_derive_from_wellspec_verify_col_list
from ._trajectory import Trajectory
from ._md_datum import MdDatum
class BlockedWell(BaseResqpy):
"""Class for RESQML Blocked Wellbore Representation (Wells), ie cells visited by wellbore.
RESQML documentation:
The information that allows you to locate, on one or several grids (existing or planned),
the intersection of volume (cells) and surface (faces) elements with a wellbore trajectory
(existing or planned).
note:
measured depth data must be in same crs as those for the related trajectory
"""
resqml_type = 'BlockedWellboreRepresentation'
well_name = rqo.alias_for_attribute("title")
def __init__(self,
parent_model,
uuid = None,
grid = None,
trajectory = None,
wellspec_file = None,
cellio_file = None,
column_ji0 = None,
well_name = None,
check_grid_name = False,
use_face_centres = False,
represented_interp = None,
originator = None,
extra_metadata = None,
add_wellspec_properties = False):
"""Creates a new blocked well object and optionally loads it from xml, or trajectory, or Nexus wellspec file.
arguments:
parent_model (model.Model object): the model which the new blocked well belongs to
uuid (optional): if present, the uuid of an existing blocked wellbore, in which case remaining
arguments are ignored
grid (optional, grid.Grid object): required if intialising from a trajectory or wellspec file;
not used if uuid is not None
trajectory (optional, Trajectory object): the trajectory of the well, to be intersected with the grid;
not used if uuid is not None
wellspec_file (optional, string): filename of an ascii file holding the Nexus wellspec data;
ignored if uuid is not None or trajectory is not None
cellio_file (optional, string): filename of an ascii file holding the RMS exported blocked well data;
ignored if uuid is not None or trajectory is not None or wellspec_file is not None
column_ji0 (optional, pair of ints): column indices (j0, i0) for a 'vertical' well; ignored if
uuid is not None or trajectory is not None or wellspec_file is not None or
cellio_file is not None
well_name (string): the well name as given in the wellspec or cellio file; required if loading from
one of those files; or the name to be used as citation title for a column well
check_grid_name (boolean, default False): if True, the GRID column of the wellspec data will be checked
for a match with the citation title of the grid object; perforations for other grids will be skipped;
if False, all wellspec data is assumed to relate to the grid; only relevant when loading from wellspec
use_face_centres (boolean, default False): if True, cell face centre points are used for the entry and
exit points when constructing the simulation trajectory; if False and ANGLA & ANGLV data are available
then entry and exit points are constructed based on a straight line at those angles passing through
the centre of the cell; only relevant when loading from wellspec
represented_interp (wellbore interpretation object, optional): if present, is noted as the wellbore
interpretation object which this frame relates to; ignored if uuid is not None
originator (str, optional): the name of the person creating the blocked well, defaults to login id;
ignored if uuid is not None
extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the blocked well;
ignored if uuid is not None
add_wellspec_properties (boolean or list of str, default False): if not False, and initialising from
a wellspec file, the blocked well has its hdf5 data written and xml created and properties are
fully created; if a list is provided the elements must be numerical wellspec column names;
if True, all numerical columns other than the cell indices are added as properties
returns:
the newly created blocked well object
notes:
if starting from a wellspec file or column indices, a 'simulation' trajectory and md datum objects are
constructed to go with the blocked well;
column wells might not be truly vertical - the trajectory will consist of linear segments joining the
centres of the k faces in the column;
optional RESQML attributes are not handled by this code (WITSML log reference, interval stratigraphic units,
cell fluid phase units);
mysterious RESQML WellboreFrameIndexableElements is not used in any other RESQML classes and is therefore
not used here
:meta common:
"""
self.trajectory = trajectory #: trajectory object associated with the wellbore
self.trajectory_to_be_written = False
self.feature_to_be_written = False
self.interpretation_to_be_written = False
self.node_count = None #: number of measured depth nodes, each being an entry or exit point of trajectory with a cell
self.node_mds = None #: node_count measured depths (in same units and datum as trajectory) of cell entry and/or exit points
self.cell_count = None #: number of blocked intervals (<= node_count - 1)
self.cell_indices = None #: cell_count natural cell indices, paired with non-null grid_indices
self.grid_indices = None #: node_count-1 indices into grid list for each interval in node_mds; -1 for unblocked interval
self.face_pair_indices = None #: entry, exit face per cell indices, -1 for Target Depth termination within a cell
self.grid_list = [
] #: list of grid objects indexed by grid_indices; for now only handles 1 grid unless loading from xml
self.wellbore_interpretation = None #: associated wellbore interpretation object
self.wellbore_feature = None #: associated wellbore feature object
#: All logs associated with the blockedwellbore; an instance of :class:`resqpy.property.WellIntervalPropertyCollection`
self.logs = None
self.cellind_null = None
self.gridind_null = None
self.facepair_null = None
# face_index_map maps from (axis, p01) to face index value in range 0..5
# this is the default as indicated on page 139 (but not p. 180) of the RESQML Usage Gude v2.0.1
# also assumes K is generally increasing downwards
# see DevOps backlog item 269001 discussion for more information
# self.face_index_map = np.array([[0, 1], [4, 2], [5, 3]], dtype = int)
self.face_index_map = np.array([[0, 1], [2, 4], [5, 3]], dtype = int) # order: top, base, J-, I+, J+, I-
# and the inverse, maps from 0..5 to (axis, p01)
# self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 1], [2, 1], [1, 0], [2, 0]], dtype = int)
self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 0], [2, 1], [1, 1], [2, 0]], dtype = int)
# note: the rework_face_pairs() method, below, overwrites the face indices based on I, J cell indices
super().__init__(model = parent_model,
uuid = uuid,
title = well_name,
originator = originator,
extra_metadata = extra_metadata)
if self.root is None:
self.wellbore_interpretation = represented_interp
grid = self.__set_grid(grid = grid,
wellspec_file = wellspec_file,
cellio_file = cellio_file,
column_ji0 = column_ji0)
# Using dictionary mapping to replicate a switch statement. The init_function is chosen based on the
# data source and the correct function is then called based on the init_function_dict
init_function_dict = {
'trajectory':
partial(self.compute_from_trajectory, self.trajectory, grid),
'wellspec_file':
partial(self.derive_from_wellspec,
wellspec_file,
well_name,
grid,
check_grid_name = check_grid_name,
use_face_centres = use_face_centres,
add_properties = add_wellspec_properties),
'cellio_file':
partial(self.__check_cellio_init_okay,
cellio_file = cellio_file,
well_name = well_name,
grid = grid),
'column_ji0':
partial(self.set_for_column, well_name, grid, column_ji0)
}
chosen_init_method = BlockedWell.__choose_init_data_source(trajectory = self.trajectory,
wellspec_file = wellspec_file,
cellio_file = cellio_file,
column_ji0 = column_ji0)
try:
init_function_dict[chosen_init_method]()
except KeyError:
pass
self.gridind_null = -1
self.facepair_null = -1
self.cellind_null = -1
# else an empty object is returned
def __set_grid(self, grid, wellspec_file, cellio_file, column_ji0):
"""Set the grid to which the blocked well belongs"""
if grid is None and (self.trajectory is not None or wellspec_file is not None or cellio_file is not None or
column_ji0 is not None):
grid_final = self.model.grid()
else:
grid_final = grid
return grid_final
def __check_cellio_init_okay(self, cellio_file, well_name, grid):
""" Checks if BlockedWell object initialization from a cellio file is okay"""
okay = self.import_from_rms_cellio(cellio_file, well_name, grid)
if not okay:
self.node_count = 0
@staticmethod
def __choose_init_data_source(trajectory, wellspec_file, cellio_file, column_ji0):
"""Specify the data source from which the BlockedWell object will be initialized"""
if trajectory is not None:
return "trajectory"
elif wellspec_file is not None:
return "wellspec_file"
elif cellio_file is not None:
return "cellio_file"
elif column_ji0 is not None:
return "column_ji0"
def _load_from_xml(self):
"""Loads the blocked wellbore object from an xml node (and associated hdf5 data)."""
node = self.root
assert node is not None
self.__find_trajectory_uuid(node = node)
self.node_count = rqet.find_tag_int(node, 'NodeCount')
assert self.node_count is not None and self.node_count >= 2, 'problem with blocked well node count'
mds_node = rqet.find_tag(node, 'NodeMd')
assert mds_node is not None, 'blocked well node measured depths hdf5 reference not found in xml'
load_hdf5_array(self, mds_node, 'node_mds')
# Statement below has no effect, is this a bug?
self.node_mds is not None and self.node_mds.ndim == 1 and self.node_mds.size == self.node_count
self.cell_count = rqet.find_tag_int(node, 'CellCount')
assert self.cell_count is not None and self.cell_count > 0
# TODO: remove this if block once RMS export issue resolved
if self.cell_count == self.node_count:
extended_mds = np.empty((self.node_mds.size + 1,))
extended_mds[:-1] = self.node_mds
extended_mds[-1] = self.node_mds[-1] + 1.0
self.node_mds = extended_mds
self.node_count += 1
assert self.cell_count < self.node_count
self.__find_ci_node_and_load_hdf5_array(node = node)
self.__find_fi_node_and_load_hdf5_array(node)
unique_grid_indices = self.__find_gi_node_and_load_hdf5_array(node = node)
self.__find_grid_node(node = node, unique_grid_indices = unique_grid_indices)
interp_uuid = rqet.find_nested_tags_text(node, ['RepresentedInterpretation', 'UUID'])
if interp_uuid is None:
self.wellbore_interpretation = None
else:
self.wellbore_interpretation = rqo.WellboreInterpretation(self.model, uuid = interp_uuid)
# Create blocked well log collection of all log data
self.logs = rqp.WellIntervalPropertyCollection(frame = self)
# Set up matches between cell_indices and grid_indices
self.cell_grid_link = self.map_cell_and_grid_indices()
def __find_trajectory_uuid(self, node):
""" Find and verify the uuid of the trajectory associated with the BlockedWell object."""
trajectory_uuid = bu.uuid_from_string(rqet.find_nested_tags_text(node, ['Trajectory', 'UUID']))
assert trajectory_uuid is not None, 'blocked well trajectory reference not found in xml'
if self.trajectory is None:
self.trajectory = Trajectory(self.model, uuid = trajectory_uuid)
else:
assert bu.matching_uuids(self.trajectory.uuid, trajectory_uuid), 'blocked well trajectory uuid mismatch'
def __find_ci_node_and_load_hdf5_array(self, node):
"""Find the BlockedWell object's cell indices hdf5 reference node and load the array."""
ci_node = rqet.find_tag(node, 'CellIndices')
assert ci_node is not None, 'blocked well cell indices hdf5 reference not found in xml'
load_hdf5_array(self, ci_node, 'cell_indices', dtype = int)
assert (self.cell_indices is not None and self.cell_indices.ndim == 1 and
self.cell_indices.size == self.cell_count), 'mismatch in number of cell indices for blocked well'
self.cellind_null = rqet.find_tag_int(ci_node, 'NullValue')
if self.cellind_null is None:
self.cellind_null = -1 # if no Null found assume -1 default
def __find_fi_node_and_load_hdf5_array(self, node):
"""Find the BlockedWell object's face indices hdf5 reference node and load the array."""
fi_node = rqet.find_tag(node, 'LocalFacePairPerCellIndices')
assert fi_node is not None, 'blocked well face indices hdf5 reference not found in xml'
load_hdf5_array(self, fi_node, 'raw_face_indices', dtype = 'int')
assert self.raw_face_indices is not None, 'failed to load face indices for blocked well'
assert self.raw_face_indices.size == 2 * self.cell_count, 'mismatch in number of cell faces for blocked well'
if self.raw_face_indices.ndim > 1:
self.raw_face_indices = self.raw_face_indices.reshape((self.raw_face_indices.size,))
mask = np.where(self.raw_face_indices == -1)
self.raw_face_indices[mask] = 0
self.face_pair_indices = self.face_index_inverse_map[self.raw_face_indices]
self.face_pair_indices[mask] = (-1, -1)
self.face_pair_indices = self.face_pair_indices.reshape((-1, 2, 2))
del self.raw_face_indices
self.facepair_null = rqet.find_tag_int(fi_node, 'NullValue')
if self.facepair_null is None:
self.facepair_null = -1
def __find_gi_node_and_load_hdf5_array(self, node):
"""Find the BlockedWell object's grid indices hdf5 reference node and load the array."""
gi_node = rqet.find_tag(node, 'GridIndices')
assert gi_node is not None, 'blocked well grid indices hdf5 reference not found in xml'
load_hdf5_array(self, gi_node, 'grid_indices', dtype = 'int')
assert self.grid_indices is not None and self.grid_indices.ndim == 1 and self.grid_indices.size == self.node_count - 1
unique_grid_indices = np.unique(self.grid_indices) # sorted list of unique values
self.gridind_null = rqet.find_tag_int(gi_node, 'NullValue')
if self.gridind_null is None:
self.gridind_null = -1 # if no Null found assume -1 default
return unique_grid_indices
def __find_grid_node(self, node, unique_grid_indices):
"""Find the BlockedWell object's grid reference node(s)."""
grid_node_list = rqet.list_of_tag(node, 'Grid')
assert len(grid_node_list) > 0, 'blocked well grid reference(s) not found in xml'
assert unique_grid_indices[0] >= -1 and unique_grid_indices[-1] < len(
grid_node_list), 'blocked well grid index out of range'
assert np.count_nonzero(
self.grid_indices >= 0) == self.cell_count, 'mismatch in number of blocked well intervals'
self.grid_list = []
for grid_ref_node in grid_node_list:
grid_node = self.model.referenced_node(grid_ref_node)
assert grid_node is not None, 'grid referenced in blocked well xml is not present in model'
grid_uuid = rqet.uuid_for_part_root(grid_node)
grid_obj = self.model.grid(uuid = grid_uuid, find_properties = False)
self.grid_list.append(grid_obj)
def map_cell_and_grid_indices(self):
"""Returns a list of index values linking the grid_indices to cell_indices.
note:
length will match grid_indices, and will show -1 where cell is unblocked
"""
indexmap = []
j = 0
for i in self.grid_indices:
if i == -1:
indexmap.append(-1)
else:
indexmap.append(j)
j += 1
return indexmap
def compressed_grid_indices(self):
"""Returns a list of grid indices excluding the -1 elements (unblocked intervals).
note:
length will match that of cell_indices
"""
compressed = []
for i in self.grid_indices:
if i >= 0:
compressed.append(i)
assert len(compressed) == self.cell_count
return compressed
def number_of_grids(self):
"""Returns the number of grids referenced by the blocked well object."""
if self.grid_list is None:
return 0
return len(self.grid_list)
def single_grid(self):
"""Asserts that exactly one grid is being referenced and returns a grid object for that grid."""
assert len(self.grid_list) == 1, 'blocked well is not referring to exactly one grid'
return self.grid_list[0]
def grid_uuid_list(self):
"""Returns a list of the uuids of the grids referenced by the blocked well object.
:meta common:
"""
uuid_list = []
if self.grid_list is None:
return uuid_list
for g in self.grid_list:
uuid_list.append(g.uuid)
return uuid_list
def cell_indices_kji0(self):
"""Returns a numpy int array of shape (N, 3) of cells visited by well, for a single grid situation.
:meta common:
"""
grid = self.single_grid()
return grid.denaturalized_cell_indices(self.cell_indices)
def cell_indices_and_grid_list(self):
"""Returns a numpy int array of shape (N, 3) of cells visited by well, and a list of grid objects of length N.
:meta common:
"""
grid_for_cell_list = []
grid_indices = self.compressed_grid_indices()
assert len(grid_indices) == self.cell_count
cell_indices = np.empty((self.cell_count, 3), dtype = int)
for cell_number in range(self.cell_count):
grid = self.grid_list[grid_indices[cell_number]]
grid_for_cell_list.append(grid)
cell_indices[cell_number] = grid.denaturalized_cell_index(self.cell_indices[cell_number])
return cell_indices, grid_for_cell_list
def cell_indices_for_grid_uuid(self, grid_uuid):
"""Returns a numpy int array of shape (N, 3) of cells visited by well in specified grid.
:meta common:
"""
if isinstance(grid_uuid, str):
grid_uuid = bu.uuid_from_string(grid_uuid)
ci_list, grid_list = self.cell_indices_and_grid_list()
mask = np.zeros((len(ci_list),), dtype = bool)
for cell_number in range(len(ci_list)):
mask[cell_number] = bu.matching_uuids(grid_list[cell_number].uuid, grid_uuid)
ci_selected = ci_list[mask]
return ci_selected
def box(self, grid_uuid = None):
"""Returns the KJI box containing the cells visited by the well, for single grid if grid_uuid is None."""
if grid_uuid is None:
cells_kji0 = self.cell_indices_kji0()
else:
cells_kji0 = self.cell_indices_for_grid_uuid(grid_uuid)
if cells_kji0 is None or len(cells_kji0) == 0:
return None
well_box = np.empty((2, 3), dtype = int)
well_box[0] = np.min(cells_kji0, axis = 0)
well_box[1] = np.max(cells_kji0, axis = 0)
return well_box
def face_pair_array(self):
"""Returns numpy int array of shape (N, 2, 2) being pairs of face (axis, polarity) pairs, to go with
cell_kji0_array().
note:
each of the N rows in the returned array is of the form:
((entry_face_axis, entry_face_polarity), (exit_face_axis, exit_face_polarity))
where the axis values are in the range 0 to 2 for k, j & i respectively, and
the polarity values are zero for the 'negative' face and 1 for the 'positive' face;
exit values may be -1 to indicate TD within the cell (ie. no exit point)
"""
return self.face_pair_indices
def compute_from_trajectory(self,
trajectory,
grid,
active_only = False,
quad_triangles = True,
use_single_layer_tactics = True):
"""Populate this blocked wellbore object based on intersection of trajectory with cells of grid.
arguments:
trajectory (Trajectory object): the trajectory to intersect with the grid; control_points and crs_uuid attributes must
be populated
grid (grid.Grid object): the grid with which to intersect the trajectory
active_only (boolean, default False): if True, only active cells are included as blocked intervals
quad_triangles (boolean, default True): if True, 4 triangles per cell face are used for the intersection calculations;
if False, only 2 triangles per face are used
use_single_layer_tactics (boolean, default True): if True and the grid does not have k gaps, initial intersection
calculations with fault planes or the outer IK & JK skin of the grid are calculated as if the grid is a single
layer (and only after an intersection is thus found is the actual layer identified); this significantly speeds up
computation but may cause failure in the presence of significantly non-straight pillars and could (rarely) cause
problems where a fault plane is significantly skewed (non-planar) even if individual pillars are straight
note:
this method is computationally intensive and might take ~30 seconds for a tyipical grid and trajectory; large grids,
grids with k gaps, or setting use_single_layer_tactics False will typically result in significantly longer processing time
"""
import resqpy.grid_surface as rgs # was causing circular import issue when at global level
# note: see also extract_box_for_well code
assert trajectory is not None and grid is not None
if np.any(np.isnan(grid.points_ref(masked = False))):
log.warning('grid does not have geometry defined everywhere: attempting fill')
import resqpy.derived_model as rqdm
fill_grid = rqdm.copy_grid(grid)
fill_grid.set_geometry_is_defined(nullify_partial_pillars = True, complete_all = True)
# note: may need to write hdf5 and create xml for fill_grid, depending on use in populate_blocked_well_from_trajectory()
# fill_grid.write_hdf_from_caches()
# fill_grid.create_xml
grid = fill_grid
assert trajectory.control_points is not None and trajectory.crs_uuid is not None and grid.crs_uuid is not None
assert len(trajectory.control_points)
self.trajectory = trajectory
if not self.well_name:
self.well_name = trajectory.title
bw = rgs.populate_blocked_well_from_trajectory(self,
grid,
active_only = active_only,
quad_triangles = quad_triangles,
lazy = False,
use_single_layer_tactics = use_single_layer_tactics)
if bw is None:
raise Exception('failed to generate blocked well from trajectory with uuid: ' + str(trajectory.uuid))
assert bw is self
def set_for_column(self, well_name, grid, col_ji0, skip_inactive = True):
"""Populates empty blocked well for a 'vertical' well in given column; creates simulation trajectory and md
datum.
"""
if well_name:
self.well_name = well_name
col_list = ['IW', 'JW', 'L', 'ANGLA', 'ANGLV'] # NB: L is Layer, ie. k
pinch_col = grid.pinched_out(cache_cp_array = True, cache_pinchout_array = True)[:, col_ji0[0], col_ji0[1]]
if skip_inactive and grid.inactive is not None:
inactive_col = grid.inactive[:, col_ji0[0], col_ji0[1]]
else:
inactive_col = np.zeros(grid.nk, dtype = bool)
data = {'IW': [], 'JW': [], 'L': []}
for k0 in range(grid.nk):
if pinch_col[k0] or inactive_col[k0]:
continue
# note: leaving ANGLA & ANGLV columns as NA will cause K face centres to be used when deriving from dataframe
data['IW'].extend([col_ji0[1] + 1])
data['JW'].extend([col_ji0[0] + 1])
data['L'].extend([k0 + 1])
df = pd.DataFrame(data, columns = col_list)
return self.derive_from_dataframe(df, self.well_name, grid, use_face_centres = True)
def derive_from_wellspec(self,
wellspec_file,
well_name,
grid,
check_grid_name = False,
use_face_centres = False,
add_properties = True):
"""Populates empty blocked well from Nexus WELLSPEC data; creates simulation trajectory and md datum.
args:
wellspec_file (string): path of Nexus ascii file holding WELLSPEC keyword
well_name (string): the name of the well as used in the wellspec data
grid (grid.Grid object): the grid object which the cell indices in the wellspec data relate to
check_grid_name (boolean, default False): if True, the GRID column of the wellspec data will be checked
for a match with the citation title of the grid object; perforations for other grids will be skipped;
if False, all wellspec data is assumed to relate to the grid
use_face_centres (boolean, default False): if True, cell face centre points are used for the entry and
exit points when constructing the simulation trajectory; if False and ANGLA & ANGLV data are available
then entry and exit points are constructed based on a straight line at those angles passing through
the centre of the cell
add_properties (bool or list of str, default True): if True, WELLSPEC columns (other than IW, JW, L & GRID)
are added as property parts for the blocked well; if a list is passed, it must contain a subset of the
columns in the WELLSPEC data
returns:
self if successful; None otherwise
note:
if add_properties is True or present as a list, this method will write the hdf5, create the xml and add
parts to the model for this blocked well and the properties
"""
well_name = self.__derive_from_wellspec_check_well_name(well_name = well_name)
col_list = _derive_from_wellspec_verify_col_list(add_properties = add_properties)
name_for_check, col_list = _derive_from_wellspec_check_grid_name(check_grid_name = check_grid_name,
grid = grid,
col_list = col_list)
wellspec_dict = wsk.load_wellspecs(wellspec_file, well = well_name, column_list = col_list)
assert len(wellspec_dict) == 1, 'no wellspec data found in file ' + wellspec_file + ' for well ' + well_name
df = wellspec_dict[well_name]
assert len(df) > 0, 'no rows of perforation data found in wellspec for well ' + well_name
# name_for_check = grid_name if check_grid_name else None
return self.derive_from_dataframe(df,
well_name,
grid,
grid_name_to_check = name_for_check,
use_face_centres = use_face_centres,
add_as_properties = add_properties)
def __derive_from_wellspec_check_well_name(self, well_name):
""" Set the well name to be used in the wellspec file."""
if well_name:
self.well_name = well_name
else:
well_name = self.well_name
return well_name
def derive_from_cell_list(self, cell_kji0_list, well_name, grid):
"""Populate empty blocked well from numpy int array of shape (N, 3) being list of cells."""
df = pd.DataFrame(columns = ['IW', 'JW', 'L'])
df['IW'] = cell_kji0_list[:, 2] + 1
df['JW'] = cell_kji0_list[:, 1] + 1
df['L'] = cell_kji0_list[:, 0] + 1
return self.derive_from_dataframe(df, well_name, grid, use_face_centres = True)
def derive_from_dataframe(self,
df,
well_name,
grid,
grid_name_to_check = None,
use_face_centres = True,
add_as_properties = False):
"""Populate empty blocked well from WELLSPEC-like dataframe; first columns must be IW, JW, L (i, j, k).
note:
if add_as_properties is True or present as a list of wellspec column names, both the blocked well and
the properties will have their hdf5 data written, xml created and be added as parts to the model
"""
if well_name:
self.well_name = well_name
else:
well_name = self.well_name
assert len(df) > 0, 'empty dataframe for blocked well ' + str(well_name)
length_uom = grid.z_units()
assert grid.xy_units() == length_uom, 'mixed length units in grid crs'
previous_xyz = None
trajectory_mds = []
trajectory_points = [] # entries paired with trajectory_mds
blocked_intervals = [
] # will have one fewer entries than trajectory nodes; 0 = blocked, -1 = not blocked (for grid indices)
blocked_cells_kji0 = [] # will have length equal to number of 0's in blocked intervals
blocked_face_pairs = [
] # same length as blocked_cells_kji0; each is ((entry axis, entry polarity), (exit axis, exit polarity))
log.debug('wellspec dataframe for well ' + str(well_name) + ' has ' + str(len(df)) + ' row' + _pl(len(df)))
skipped_warning_grid = None
angles_present = ('ANGLV' in df.columns and 'ANGLA' in df.columns and not pd.isnull(df.iloc[0]['ANGLV']) and
not pd.isnull(df.iloc[0]['ANGLA']))
# TODO: remove these temporary overrides
angles_present = False
use_face_centres = True
if not angles_present and not use_face_centres:
log.warning(f'ANGLV and/or ANGLA data unavailable for well {well_name}: using face centres')
use_face_centres = True
for i in range(len(df)): # for each row in the dataframe for this well
cell_kji0 = BlockedWell.__cell_kji0_from_df(df, i)
if cell_kji0 is None:
log.error('missing cell index in wellspec data for well ' + str(well_name) + ' row ' + str(i + 1))
continue
row = df.iloc[i]
skipped_warning_grid, skip_row = BlockedWell.__verify_grid_name(grid_name_to_check = grid_name_to_check,
row = row,
skipped_warning_grid = skipped_warning_grid,
well_name = well_name)
if skip_row:
continue
cp = grid.corner_points(cell_kji0 = cell_kji0, cache_resqml_array = False)
assert not np.any(np.isnan(cp)), 'missing geometry for perforation cell for well ' + str(well_name)
entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz = BlockedWell.__calculate_entry_and_exit_axes_polarities_and_points(
angles_present = angles_present,
row = row,
cp = cp,
well_name = well_name,
df = df,
i = i,
cell_kji0 = cell_kji0,
blocked_cells_kji0 = blocked_cells_kji0,
use_face_centres = use_face_centres)
log.debug(
f'cell: {cell_kji0}; entry axis: {entry_axis}; polarity {entry_polarity}; exit axis: {exit_axis}; polarity {exit_polarity}'
)
previous_xyz, trajectory_mds, trajectory_points, blocked_intervals, blocked_cells_kji0, blocked_face_pairs = BlockedWell.__add_interval(
previous_xyz = previous_xyz,
entry_axis = entry_axis,
entry_polarity = entry_polarity,
entry_xyz = entry_xyz,
exit_axis = exit_axis,
exit_polarity = exit_polarity,
exit_xyz = exit_xyz,
cell_kji0 = cell_kji0,
trajectory_mds = trajectory_mds,
trajectory_points = trajectory_points,
blocked_intervals = blocked_intervals,
blocked_cells_kji0 = blocked_cells_kji0,
blocked_face_pairs = blocked_face_pairs)
blocked_count = len(blocked_cells_kji0)
BlockedWell.__check_number_of_blocked_well_intervals(blocked_cells_kji0 = blocked_cells_kji0,
well_name = well_name,
grid_name = grid_name_to_check)
self.node_count = len(trajectory_mds)
self.node_mds = np.array(trajectory_mds)
self.cell_count = len(blocked_cells_kji0)
self.grid_indices = np.array(blocked_intervals, dtype = int) # NB. only supporting one grid at the moment
self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0))
self.face_pair_indices = np.array(blocked_face_pairs, dtype = int)
self.grid_list = [grid]
trajectory_points, trajectory_mds = BlockedWell.__add_tail_to_trajectory_if_necessary(
blocked_count = blocked_count,
exit_axis = exit_axis,
exit_polarity = exit_polarity,
cell_kji0 = cell_kji0,
grid = grid,
trajectory_points = trajectory_points,
trajectory_mds = trajectory_mds)
self.create_md_datum_and_trajectory(grid, trajectory_mds, trajectory_points, length_uom, well_name)
self.__add_as_properties_if_specified(add_as_properties = add_as_properties, df = df, length_uom = length_uom)
return self
@staticmethod
def __cell_kji0_from_df(df, df_row):
row = df.iloc[df_row]
if pd.isna(row[0]) or pd.isna(row[1]) or pd.isna(row[2]):
return None
cell_kji0 = np.empty((3,), dtype = int)
cell_kji0[:] = row[2], row[1], row[0]
cell_kji0[:] -= 1
return cell_kji0
@staticmethod
def __verify_grid_name(grid_name_to_check, row, skipped_warning_grid, well_name):
"""Check whether the grid associated with a row of the dataframe matches the expected grid name."""
skip_row = False
if grid_name_to_check and pd.notna(row['GRID']) and grid_name_to_check != str(row['GRID']).upper():
other_grid = str(row['GRID'])
if skipped_warning_grid != other_grid:
log.warning('skipping perforation(s) in grid ' + other_grid + ' for well ' + str(well_name))
skipped_warning_grid = other_grid
skip_row = True
return skipped_warning_grid, skip_row
@staticmethod
def __calculate_entry_and_exit_axes_polarities_and_points(angles_present, row, cp, well_name, df, i, cell_kji0,
blocked_cells_kji0, use_face_centres):
if angles_present:
entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz = BlockedWell.__calculate_entry_and_exit_axes_polarities_and_points_using_angles(
row = row, cp = cp, well_name = well_name)
else:
# fabricate entry and exit axes and polarities based on indices alone
# note: could use geometry but here a cheap rough-and-ready approach is used
log.debug('row ' + str(i) + ': using cell moves')
entry_axis, entry_polarity, exit_axis, exit_polarity = BlockedWell.__calculate_entry_and_exit_axes_polarities_and_points_using_indices(
df = df, i = i, cell_kji0 = cell_kji0, blocked_cells_kji0 = blocked_cells_kji0)
entry_xyz, exit_xyz = BlockedWell.__override_vector_based_xyz_entry_and_exit_points_if_necessary(
use_face_centres = use_face_centres,
entry_axis = entry_axis,
exit_axis = exit_axis,
entry_polarity = entry_polarity,
exit_polarity = exit_polarity,
cp = cp)
return entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz
@staticmethod
def __calculate_entry_and_exit_axes_polarities_and_points_using_angles(row, cp, well_name):
""" Calculate entry and exit axes, polarities and points using azimuth and inclination angles."""
angla = row['ANGLA']
inclination = row['ANGLV']
if inclination < 0.1:
azimuth = 0.0
else:
i_vector = np.sum(cp[:, :, 1] - cp[:, :, 0], axis = (0, 1))
azimuth = vec.azimuth(i_vector) - angla # see Nexus keyword reference doc
well_vector = vec.unit_vector_from_azimuth_and_inclination(azimuth, inclination) * 10000.0
# todo: the following might be producing NaN's when vector passes precisely through an edge
(entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity,
exit_xyz) = find_entry_and_exit(cp, -well_vector, well_vector, well_name)
return entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz
def __calculate_entry_and_exit_axes_polarities_and_points_using_indices(df, i, cell_kji0, blocked_cells_kji0):
entry_axis, entry_polarity = BlockedWell.__fabricate_entry_axis_and_polarity_using_indices(
i, cell_kji0, blocked_cells_kji0)
exit_axis, exit_polarity = BlockedWell.__fabricate_exit_axis_and_polarity_using_indices(
i, cell_kji0, entry_axis, entry_polarity, df)
return entry_axis, entry_polarity, exit_axis, exit_polarity
@staticmethod
def __fabricate_entry_axis_and_polarity_using_indices(i, cell_kji0, blocked_cells_kji0):
"""Fabricate entry and exit axes and polarities based on indices alone.
note:
could use geometry but here a cheap rough-and-ready approach is used
"""
if i == 0:
entry_axis, entry_polarity = 0, 0 # K-
else:
entry_move = cell_kji0 - blocked_cells_kji0[-1]
log.debug(f'entry move: {entry_move}')
if entry_move[1] == 0 and entry_move[2] == 0: # K move
entry_axis = 0
entry_polarity = 0 if entry_move[0] >= 0 else 1
elif abs(entry_move[1]) > abs(entry_move[2]): # J dominant move
entry_axis = 1
entry_polarity = 0 if entry_move[1] >= 0 else 1
else: # I dominant move
entry_axis = 2
entry_polarity = 0 if entry_move[2] >= 0 else 1
return entry_axis, entry_polarity
@staticmethod
def __fabricate_exit_axis_and_polarity_using_indices(i, cell_kji0, entry_axis, entry_polarity, df):
if i == len(df) - 1:
exit_axis, exit_polarity = entry_axis, 1 - entry_polarity
else:
next_cell_kji0 = BlockedWell.__cell_kji0_from_df(df, i + 1)
if next_cell_kji0 is None:
exit_axis, exit_polarity = entry_axis, 1 - entry_polarity
else:
exit_move = next_cell_kji0 - cell_kji0
log.debug(f'exit move: {exit_move}')
if exit_move[1] == 0 and exit_move[2] == 0: # K move
exit_axis = 0
exit_polarity = 1 if exit_move[0] >= 0 else 0
elif abs(exit_move[1]) > abs(exit_move[2]): # J dominant move
exit_axis = 1
exit_polarity = 1 if exit_move[1] >= 0 else 0
else: # I dominant move
exit_axis = 2
exit_polarity = 1 if exit_move[2] >= 0 else 0
return exit_axis, exit_polarity
@staticmethod
def __override_vector_based_xyz_entry_and_exit_points_if_necessary(use_face_centres, entry_axis, exit_axis,
entry_polarity, exit_polarity, cp):
"""Override the vector based xyz entry and exit with face centres"""
if use_face_centres: # override the vector based xyz entry and exit points with face centres
if entry_axis == 0:
entry_xyz = np.mean(cp[entry_polarity, :, :], axis = (0, 1))
elif entry_axis == 1:
entry_xyz = np.mean(cp[:, entry_polarity, :], axis = (0, 1))
else:
entry_xyz = np.mean(cp[:, :, entry_polarity], axis = (0, 1)) # entry_axis == 2, ie. I
if exit_axis == 0:
exit_xyz = np.mean(cp[exit_polarity, :, :], axis = (0, 1))
elif exit_axis == 1:
exit_xyz = np.mean(cp[:, exit_polarity, :], axis = (0, 1))
else:
exit_xyz = np.mean(cp[:, :, exit_polarity], axis = (0, 1)) # exit_axis == 2, ie. I
return entry_xyz, exit_xyz
@staticmethod
def __add_interval(previous_xyz, entry_axis, entry_polarity, entry_xyz, exit_axis, exit_polarity, exit_xyz,
cell_kji0, trajectory_mds, trajectory_points, blocked_intervals, blocked_cells_kji0,
blocked_face_pairs):
if previous_xyz is None: # first entry
log.debug('adding mean sea level trajectory start')
previous_xyz = entry_xyz.copy()
previous_xyz[2] = 0.0 # use depth zero as md datum
trajectory_mds.append(0.0)
trajectory_points.append(previous_xyz)
if not vec.isclose(previous_xyz, entry_xyz, tolerance = 0.05): # add an unblocked interval
log.debug('adding unblocked interval')
trajectory_points.append(entry_xyz)
new_md = trajectory_mds[-1] + vec.naive_length(entry_xyz - previous_xyz) # assumes x, y & z units are same
trajectory_mds.append(new_md)
blocked_intervals.append(-1) # unblocked interval
previous_xyz = entry_xyz
log.debug('adding blocked interval for cell kji0: ' + str(cell_kji0))
trajectory_points.append(exit_xyz)
new_md = trajectory_mds[-1] + vec.naive_length(exit_xyz - previous_xyz) # assumes x, y & z units are same
trajectory_mds.append(new_md)
blocked_intervals.append(0) # blocked interval
previous_xyz = exit_xyz
blocked_cells_kji0.append(cell_kji0)
blocked_face_pairs.append(((entry_axis, entry_polarity), (exit_axis, exit_polarity)))
return previous_xyz, trajectory_mds, trajectory_points, blocked_intervals, blocked_cells_kji0, blocked_face_pairs
@staticmethod
def __add_tail_to_trajectory_if_necessary(blocked_count, exit_axis, exit_polarity, cell_kji0, grid,
trajectory_points, trajectory_mds):
"""Add tail to trajcetory if last segment terminates at bottom face in bottom layer."""
if blocked_count > 0 and exit_axis == 0 and exit_polarity == 1 and cell_kji0[
0] == grid.nk - 1 and grid.k_direction_is_down:
tail_length = 10.0 # metres or feet
tail_xyz = trajectory_points[-1].copy()
tail_xyz[2] += tail_length * (1.0 if grid.z_inc_down() else -1.0)
trajectory_points.append(tail_xyz)
new_md = trajectory_mds[-1] + tail_length
trajectory_mds.append(new_md)
return trajectory_points, trajectory_mds
def __add_as_properties_if_specified(self, add_as_properties, df, length_uom):
"""If add_as_properties is True or present as a list of wellspec column names, both the blocked well and
the properties will have their hdf5 data written, xml created and be added as parts to the model.
"""
if add_as_properties and len(df.columns) > 3:
# NB: atypical writing of hdf5 data and xml creation in order to support related properties
self.write_hdf5()
self.create_xml()
if isinstance(add_as_properties, list):
for col in add_as_properties:
assert col in df.columns[3:] # could just skip missing columns
property_columns = add_as_properties
else:
property_columns = df.columns[3:]
self._add_df_properties(df, property_columns, length_uom = length_uom)
def import_from_rms_cellio(self,
cellio_file,
well_name,
grid,
include_overburden_unblocked_interval = False,
set_tangent_vectors = False):
"""Populates empty blocked well from RMS cell I/O data; creates simulation trajectory and md datum.
args:
cellio_file (string): path of RMS ascii export file holding blocked well cell I/O data; cell entry and
exit points are expected
well_name (string): the name of the well as used in the cell I/O file
grid (grid.Grid object): the grid object which the cell indices in the cell I/O data relate to
set_tangent_vectors (boolean, default False): if True, tangent vectors will be computed from the well
trajectory's control points
returns:
self if successful; None otherwise
"""
if well_name:
self.well_name = well_name
else:
well_name = self.well_name
grid_name = rqet.citation_title_for_node(grid.root)
length_uom = grid.z_units()
grid_z_inc_down = crs.Crs(grid.model, uuid = grid.crs_uuid).z_inc_down
log.debug('grid z increasing downwards: ' + str(grid_z_inc_down) + '(type: ' + str(type(grid_z_inc_down)) + ')')
cellio_z_inc_down = None
try:
assert ' ' not in well_name, 'cannot import for well name containing spaces'
with open(cellio_file, 'r') as fp:
BlockedWell.__verify_header_lines_in_cellio_file(fp = fp,
well_name = well_name,
cellio_file = cellio_file)
previous_xyz = None
trajectory_mds = []
trajectory_points = [] # entries paired with trajectory_mds
blocked_intervals = [
] # will have one fewer entries than trajectory nodes; 0 = blocked, -1 = not blocked (for grid indices)
blocked_cells_kji0 = [] # will have length equal to number of 0's in blocked intervals
blocked_face_pairs = [
] # same length as blocked_cells_kji0; each is ((entry axis, entry polarity), (exit axis, exit polarity))
while not kf.blank_line(fp):
line = fp.readline()
cell_kji0, entry_xyz, exit_xyz = BlockedWell.__parse_non_blank_line_in_cellio_file(
line = line,
grid = grid,
cellio_z_inc_down = cellio_z_inc_down,
grid_z_inc_down = grid_z_inc_down)
cp, cell_centre, entry_vector, exit_vector = BlockedWell.__calculate_cell_cp_center_and_vectors(
grid = grid,
cell_kji0 = cell_kji0,
entry_xyz = entry_xyz,
exit_xyz = exit_xyz,
well_name = well_name)
# let's hope everything is in the same coordinate reference system!
(entry_axis, entry_polarity, facial_entry_xyz, exit_axis, exit_polarity,
facial_exit_xyz) = find_entry_and_exit(cp, entry_vector, exit_vector, well_name)
if previous_xyz is None: # first entry
previous_xyz = entry_xyz.copy()
if include_overburden_unblocked_interval:
log.debug('adding mean sea level trajectory start')
previous_xyz[2] = 0.0 # use depth zero as md datum
trajectory_mds.append(previous_xyz[2])
trajectory_points.append(previous_xyz)
if not vec.isclose(previous_xyz, entry_xyz, tolerance = 0.05): # add an unblocked interval
log.debug('adding unblocked interval')
trajectory_points.append(entry_xyz)
new_md = trajectory_mds[-1] + vec.naive_length(
entry_xyz - previous_xyz) # assumes x, y & z units are same
trajectory_mds.append(new_md)
blocked_intervals.append(-1) # unblocked interval
previous_xyz = entry_xyz
log.debug('adding blocked interval for cell kji0: ' + str(cell_kji0))
trajectory_points.append(exit_xyz)
new_md = trajectory_mds[-1] + vec.naive_length(
exit_xyz - previous_xyz) # assumes x, y & z units are same
trajectory_mds.append(new_md)
blocked_intervals.append(0) # blocked interval
previous_xyz = exit_xyz
blocked_cells_kji0.append(cell_kji0)
blocked_face_pairs.append(((entry_axis, entry_polarity), (exit_axis, exit_polarity)))
BlockedWell.__check_number_of_blocked_well_intervals(blocked_cells_kji0 = blocked_cells_kji0,
well_name = well_name,
grid_name = grid_name)
self.create_md_datum_and_trajectory(grid,
trajectory_mds,
trajectory_points,
length_uom,
well_name,
set_depth_zero = True,
set_tangent_vectors = set_tangent_vectors)
self.node_count = len(trajectory_mds)
self.node_mds = np.array(trajectory_mds)
self.cell_count = len(blocked_cells_kji0)
self.grid_indices = np.array(blocked_intervals,
dtype = int) # NB. only supporting one grid at the moment
self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0))
self.face_pair_indices = np.array(blocked_face_pairs)
self.grid_list = [grid]
except Exception:
log.exception('failed to import info for blocked well ' + str(well_name) + ' from cell I/O file ' +
str(cellio_file))
return None
return self
@staticmethod
def __verify_header_lines_in_cellio_file(fp, well_name, cellio_file):
"""Find and verify the information in the header lines for the specified well in the RMS cellio file."""
while True:
kf.skip_blank_lines_and_comments(fp)
line = fp.readline() # file format version number?
assert line, 'well ' + str(well_name) + ' not found in file ' + str(cellio_file)
fp.readline() # 'Undefined'
words = fp.readline().split()
assert len(words), 'missing header info in cell I/O file'
if words[0].upper() == well_name.upper():
break
while not kf.blank_line(fp):
fp.readline() # skip to block of data for next well
header_lines = int(fp.readline().strip())
for _ in range(header_lines):
fp.readline()
@staticmethod
def __parse_non_blank_line_in_cellio_file(line, grid, cellio_z_inc_down, grid_z_inc_down):
"""Parse each non-blank line in the RMS cellio file for the relevant parameters."""
words = line.split()
assert len(words) >= 9, 'not enough items on data line in cell I/O file, minimum 9 expected'
i1, j1, k1 = int(words[0]), int(words[1]), int(words[2])
cell_kji0 = np.array((k1 - 1, j1 - 1, i1 - 1), dtype = int)
assert np.all(0 <= cell_kji0) and np.all(
cell_kji0 < grid.extent_kji), 'cell I/O cell index not within grid extent'
entry_xyz = np.array((float(words[3]), float(words[4]), float(words[5])))
exit_xyz = np.array((float(words[6]), float(words[7]), float(words[8])))
if cellio_z_inc_down is None:
cellio_z_inc_down = bool(entry_xyz[2] + exit_xyz[2] > 0.0)
if cellio_z_inc_down != grid_z_inc_down:
entry_xyz[2] = -entry_xyz[2]
exit_xyz[2] = -exit_xyz[2]
return cell_kji0, entry_xyz, exit_xyz
@staticmethod
def __calculate_cell_cp_center_and_vectors(grid, cell_kji0, entry_xyz, exit_xyz, well_name):
"""Calculate the i,j,k coordinates that represent the corner points and center of a perforation cell.
Calculate the entry and exit vectors for the perforation cell.
"""
cp = grid.corner_points(cell_kji0 = cell_kji0, cache_resqml_array = False)
assert not np.any(np.isnan(
cp)), 'missing geometry for perforation cell(kji0) ' + str(cell_kji0) + ' for well ' + str(well_name)
cell_centre = np.mean(cp, axis = (0, 1, 2))
# let's hope everything is in the same coordinate reference system!
entry_vector = 100.0 * (entry_xyz - cell_centre)
exit_vector = 100.0 * (exit_xyz - cell_centre)
return cp, cell_centre, entry_vector, exit_vector
@staticmethod
def __check_number_of_blocked_well_intervals(blocked_cells_kji0, well_name, grid_name):
""" Check that at least one interval is blocked for the specified well."""
blocked_count = len(blocked_cells_kji0)
if blocked_count == 0:
log.warning('no intervals blocked for well ' + well_name + ' in grid ' + str(grid_name))
return None
else:
log.info(
str(blocked_count) + ' interval' + _pl(blocked_count) + ' blocked for well ' + well_name + ' in grid ' +
str(grid_name))
def dataframe(self,
i_col = 'IW',
j_col = 'JW',
k_col = 'L',
one_based = True,
extra_columns_list = [],
ntg_uuid = None,
perm_i_uuid = None,
perm_j_uuid = None,
perm_k_uuid = None,
satw_uuid = None,
sato_uuid = None,
satg_uuid = None,
region_uuid = None,
radw = None,
skin = None,
stat = None,
active_only = False,
min_k0 = None,
max_k0 = None,
k0_list = None,
min_length = None,
min_kh = None,
max_depth = None,
max_satw = None,
min_sato = None,
max_satg = None,
perforation_list = None,
region_list = None,
depth_inc_down = None,
set_k_face_intervals_vertical = False,
anglv_ref = 'normal ij down',
angla_plane_ref = None,
length_mode = 'MD',
length_uom = None,
use_face_centres = False,
preferential_perforation = True,
add_as_properties = False,
use_properties = False):
"""Returns a pandas data frame containing WELLSPEC style data.
arguments:
i_col (string, default 'IW'): the column name to use for cell I index values
j_col (string, default 'JW'): the column name to use for cell J index values
k_col (string, default 'L'): the column name to use for cell K index values
one_based (boolean, default True): if True, simulator protocol i, j & k values are placed in I, J & K columns;
if False, resqml zero based values; this does not affect the interpretation of min_k0 & max_k0 arguments
extra_columns_list (list of string, optional): list of WELLSPEC column names to include in the dataframe, from currently
recognised values: 'GRID', 'ANGLA', 'ANGLV', 'LENGTH', 'KH', 'DEPTH', 'MD', 'X', 'Y', 'RADW', 'SKIN', 'PPERF', 'RADB', 'WI', 'WBC'
ntg_uuid (uuid.UUID, optional): the uuid of the net to gross ratio property; if present is used to downgrade the i & j
permeabilities in the calculation of KH; ignored if 'KH' not in the extra column list and min_kh is not specified;
the argument may also be a dictionary mapping from grid uuid to ntg uuid; if no net to gross data is provided, it
is effectively assumed to be one (or, equivalently, the I & J permeability data is applicable to the gross rock); see
also preferential_perforation argument which can cause adjustment of effective ntg in partially perforated cells
perm_i_uuid (uuid.UUID or dictionary, optional): the uuid of the permeability property in the I direction;
required if 'KH' is included in the extra columns list and min_kh is not specified; ignored otherwise;
the argument may also be a dictionary mapping from grid uuid to perm I uuid
perm_j_uuid (uuid.UUID, optional): the uuid (or dict) of the permeability property in the J direction;
defaults to perm_i_uuid
perm_k_uuid (uuid.UUID, optional): the uuid (or dict) of the permeability property in the K direction;
defaults to perm_i_uuid
satw_uuid (uuid.UUID, optional): the uuid of a water saturation property; required if max_satw is specified; may also
be a dictionary mapping from grid uuid to satw uuid; ignored if max_satw is None
sato_uuid (uuid.UUID, optional): the uuid of an oil saturation property; required if min_sato is specified; may also
be a dictionary mapping from grid uuid to sato uuid; ignored if min_sato is None
satg_uuid (uuid.UUID, optional): the uuid of a gas saturation property; required if max_satg is specified; may also
be a dictionary mapping from grid uuid to satg uuid; ignored if max_satg is None
region_uuid (uuid.UUID, optional): the uuid of a discrete or categorical property, required if region_list is not None;
may also be a dictionary mapping from grid uuid to region uuid; ignored if region_list is None
radw (float, optional): if present, the wellbore radius used for all perforations; must be in correct units for intended
use of the WELLSPEC style dataframe; will default to 0.25 if 'RADW' is included in the extra column list
skin (float, optional): if present, a skin column is included with values set to this constant
stat (string, optional): if present, should be 'ON' or 'OFF' and is used for all perforations; will default to 'ON' if
'STAT' is included in the extra column list
active_only (boolean, default False): if True, only cells that are flagged in the grid object as active are included;
if False, cells are included whether active or not
min_k0 (int, optional): if present, perforations in layers above this are excluded (layer number will be applied
naively to all grids – not recommended when working with more than one grid with different layering)
max_k0 (int, optional): if present, perforations in layers below this are excluded (layer number will be applied
naively to all grids – not recommended when working with more than one grid with different layering)
k0_list (list of int, optional): if present, only perforations in cells in these layers are included (layer numbers
will be applied naively to all grids – not recommended when working with more than one grid with different layering)
min_length (float, optional): if present, a minimum length for an individual perforation interval to be included;
units are the length units of the trajectory object unless length_uom argument is set
min_kh (float, optional): if present, the minimum permeability x length value for which an individual interval is
included; permeabilty uuid(s) must be supplied for the kh calculation; units of the length component are those
of the trajectory object unless length_uom argument is set
max_depth (float, optional): if present, rows are excluded for cells with a centre point depth greater than this value;
max_depth should be positive downwards, with units of measure those of the grid z coordinates
max_satw (float, optional): if present, perforations in cells where the water saturation exceeds this value will
be excluded; satw_uuid must be supplied if this argument is present
min_sato (float, optional): if present, perforations in cells where the oil saturation is less than this value will
be excluded; sato_uuid must be supplied if this argument is present
max_satg (float, optional): if present, perforations in cells where the gas saturation exceeds this value will
be excluded; satg_uuid must be supplied if this argument is present
perforation_list (list of (float, float), optional): if present, a list of perforated intervals; each entry is the
start and end measured depths for a perforation; these do not need to align with cell boundaries
region_list (list of int, optional): if present, a list of region numbers for which rows are to be included; the
property holding the region data is identified by the region_uuid argument
depth_inc_down (boolean, optional): if present and True, the depth values will increase with depth; if False or None,
the direction of the depth values will be determined by the z increasing downwards indicator in the trajectory crs
set_k_face_intervals_vertical (boolean, default False): if True, intervals with entry through K- and exit through K+
will have angla and anglv set to 0.0 (vertical); if False angles will be computed depending on geometry
anglv_ref (string, default 'normal ij down'): either 'gravity', 'z down' (same as gravity), 'z+', 'k down', 'k+',
'normal ij', or 'normal ij down';
the ANGLV angles are relative to a local (per cell) reference vector selected by this keyword
angla_plane_ref (string, optional): string indicating normal vector defining plane onto which trajectory and I axis are
projected for the calculation of ANGLA; options as for anglv_ref, or 'normal well i+' which results in no projection;
defaults to the same as anglv_ref
length_mode (string, default 'MD'): 'MD' or 'straight' indicating which length to use; 'md' takes measured depth
difference between exit and entry; 'straight' uses a naive straight line length between entry and exit;
this will affect values for LENGTH, KH, DEPTH, X & Y
length_uom (string, optional): if present, either 'm' or 'ft': the length units to use for the LENGTH, KH, MD, DEPTH,
X & Y columns if they are present in extra_columns_list; also used to interpret min_length and min_kh; if None, the
length units of the trajectory attribute are used LENGTH, KH & MD and those of the grid are used for DEPTH, X & Y;
RADW value, if present, is assumed to be in the correct units and is not changed; also used implicitly to determine
conversion constant used in calculation of wellbore constant (WBC)
use_face_centres (boolean, default False): if True, the centre points of the entry and exit faces will determine the
vector used as the basis of ANGLA and ANGLV calculations; if False, the trajectory locations for the entry and exit
measured depths will be used
preferential_perforation (boolean, default True): if perforation_list is given, and KH is requested or a min_kh given,
the perforated intervals are assumed to penetrate pay rock preferentially: an effective ntg weighting is computed
to account for any residual non-pay perforated interval; ignored if perforation_list is None or kh values are not
being computed
add_as_properties (boolean or list of str, default False): if True, each column in the extra_columns_list (excluding
GRID and STAT) is added as a property with the blocked well as supporting representation and 'cells' as the
indexable element; any cell that is excluded from the dataframe will have corresponding entries of NaN in all the
properties; if a list is provided it must be a subset of extra_columns_list
use_properties (boolean or list of str, default False): if True, each column in the extra_columns_list (excluding
GRID and STAT) is populated from a property with citation title matching the column name, if it exists
notes:
units of length along wellbore will be those of the trajectory's length_uom (also applies to K.H values) unless
the length_uom argument is used;
the constraints are applied independently for each row and a row is excluded if it fails any constraint;
the min_k0 and max_k0 arguments do not stop later rows within the layer range from being included;
the min_length and min_kh limits apply to individual cell intervals and thus depend on cell size;
the water and oil saturation limits are for saturations at a single time and affect whether the interval
is included in the dataframe – there is no functionality to support turning perforations off and on over time;
the saturation limits do not stop deeper intervals with qualifying saturations from being included;
the k0_list, perforation_list and region_list arguments should be set to None to disable the corresponding functionality,
if set to an empty list, no rows will be included in the dataframe;
if add_as_properties is True, the blocked well must already have been added as a part to the model;
at add_as_properties and use_properties cannot both be True;
add_as_properties and use_properties are only currently functional for single grid blocked wells;
at present, unit conversion is not handled when using properties
:meta common:
"""
assert length_mode in ['MD', 'straight']
assert length_uom is None or length_uom in ['m', 'ft']
anglv_ref, angla_plane_ref = BlockedWell.__verify_angle_references(anglv_ref, angla_plane_ref)
column_list = [i_col, j_col, k_col]
column_list, add_as_properties, use_properties, skin, stat, radw = BlockedWell.__verify_extra_properties_to_be_added_to_dataframe(
extra_columns_list = extra_columns_list,
column_list = column_list,
add_as_properties = add_as_properties,
use_properties = use_properties,
skin = skin,
stat = stat,
radw = radw)
pc = rqp.PropertyCollection(support = self) if use_properties else None
pc_titles = [] if pc is None else pc.titles()
max_satw, min_sato, max_satg = BlockedWell.__verify_saturation_ranges_and_property_uuids(
max_satw, min_sato, max_satg, satw_uuid, sato_uuid, satg_uuid)
min_kh, doing_kh = BlockedWell.__verify_perm_i_uuid_for_kh(min_kh = min_kh,
column_list = column_list,
perm_i_uuid = perm_i_uuid,
pc_titles = pc_titles)
do_well_inflow = BlockedWell.__verify_perm_i_uuid_for_well_inflow(column_list = column_list,
perm_i_uuid = perm_i_uuid,
pc_titles = pc_titles)
perm_j_uuid, perm_k_uuid, isotropic_perm = BlockedWell.__verify_perm_j_k_uuids_for_kh_and_well_inflow(
doing_kh = doing_kh,
do_well_inflow = do_well_inflow,
perm_i_uuid = perm_i_uuid,
perm_j_uuid = perm_j_uuid,
perm_k_uuid = perm_k_uuid)
if min_length is not None and min_length <= 0.0:
min_length = None
if region_list is not None:
assert region_uuid is not None, 'region list specified without region property array'
BlockedWell.__check_perforation_properties_to_be_added(column_list = column_list,
perforation_list = perforation_list)
BlockedWell.__verify_k_layers_to_be_included(min_k0 = min_k0, max_k0 = max_k0, k0_list = k0_list)
doing_angles, doing_xyz, doing_entry_exit = BlockedWell.__verify_if_angles_xyz_and_length_to_be_added(
column_list = column_list,
pc_titles = pc_titles,
doing_kh = doing_kh,
do_well_inflow = do_well_inflow,
length_mode = length_mode)
grid_crs_list = self.__verify_number_of_grids_and_crs_units(column_list = column_list)
k_face_check = np.zeros((2, 2), dtype = int)
k_face_check[1, 1] = 1 # now represents entry, exit of K-, K+
k_face_check_end = k_face_check.copy()
k_face_check_end[1] = -1 # entry through K-, terminating (TD) within cell
traj_crs, traj_z_inc_down = self.__get_trajectory_crs_and_z_inclination()
df = pd.DataFrame(columns = column_list)
df = df.astype({i_col: int, j_col: int, k_col: int})
ci = -1
row_ci_list = []
interval_count = self.__get_interval_count()
for interval in range(interval_count):
if self.grid_indices[interval] < 0:
continue # unblocked interval
ci += 1
row_dict = {}
grid = self.grid_list[self.grid_indices[interval]]
grid_crs = grid_crs_list[self.grid_indices[interval]]
grid_name = rqet.citation_title_for_node(grid.root).replace(' ', '_')
natural_cell = self.cell_indices[ci]
cell_kji0 = grid.denaturalized_cell_index(natural_cell)
tuple_kji0 = tuple(cell_kji0)
skip_interval = BlockedWell.__skip_interval_check(max_depth = max_depth,
grid = grid,
cell_kji0 = cell_kji0,
grid_crs = grid_crs,
active_only = active_only,
tuple_kji0 = tuple_kji0,
min_k0 = min_k0,
max_k0 = max_k0,
k0_list = k0_list,
region_list = region_list,
region_uuid = region_uuid,
max_satw = max_satw,
satw_uuid = satw_uuid,
min_sato = min_sato,
sato_uuid = sato_uuid,
max_satg = max_satg,
satg_uuid = satg_uuid)
if skip_interval:
continue
skip_interval_due_to_perforations, part_perf_fraction = self.__get_part_perf_fraction_for_interval(
pc = pc, pc_titles = pc_titles, perforation_list = perforation_list, ci = ci, interval = interval)
if skip_interval_due_to_perforations:
continue
entry_xyz, exit_xyz, ee_crs = self.__get_entry_exit_xyz_and_crs_for_interval(
doing_entry_exit = doing_entry_exit,
use_face_centres = use_face_centres,
grid = grid,
cell_kji0 = cell_kji0,
interval = interval,
grid_crs = grid_crs,
traj_crs = traj_crs)
skip_interval_due_to_invalid_length, length = self.__get_length_of_interval(
length_mode = length_mode,
interval = interval,
length_uom = length_uom,
entry_xyz = entry_xyz,
exit_xyz = exit_xyz,
ee_crs = ee_crs,
perforation_list = perforation_list,
part_perf_fraction = part_perf_fraction,
min_length = min_length)
if skip_interval_due_to_invalid_length:
continue
md = 0.5 * (self.node_mds[interval + 1] + self.node_mds[interval])
anglv, sine_anglv, cosine_anglv, angla, sine_angla, cosine_angla = self.__get_angles_for_interval(
pc = pc,
pc_titles = pc_titles,
doing_angles = doing_angles,
set_k_face_intervals_vertical = set_k_face_intervals_vertical,
ci = ci,
k_face_check = k_face_check,
k_face_check_end = k_face_check_end,
entry_xyz = entry_xyz,
exit_xyz = exit_xyz,
traj_z_inc_down = traj_z_inc_down,
grid = grid,
grid_crs = grid_crs,
cell_kji0 = cell_kji0,
anglv_ref = anglv_ref,
angla_plane_ref = angla_plane_ref)
ntg_is_one, k_i, k_j, k_k = BlockedWell.__get_ntg_and_directional_perm_for_interval(
doing_kh = doing_kh,
do_well_inflow = do_well_inflow,
ntg_uuid = ntg_uuid,
grid = grid,
tuple_kji0 = tuple_kji0,
isotropic_perm = isotropic_perm,
preferential_perforation = preferential_perforation,
part_perf_fraction = part_perf_fraction,
perm_i_uuid = perm_i_uuid,
perm_j_uuid = perm_j_uuid,
perm_k_uuid = perm_k_uuid)
skip_interval_due_to_min_kh, kh = BlockedWell.__get_kh_for_interval(doing_kh = doing_kh,
isotropic_perm = isotropic_perm,
ntg_is_one = ntg_is_one,
length = length,
perm_i_uuid = perm_i_uuid,
grid = grid,
tuple_kji0 = tuple_kji0,
k_i = k_i,
k_j = k_j,
k_k = k_k,
anglv = anglv,
sine_anglv = sine_anglv,
cosine_anglv = cosine_anglv,
sine_angla = sine_angla,
cosine_angla = cosine_angla,
min_kh = min_kh,
pc = pc,
pc_titles = pc_titles,
ci = ci)
if skip_interval_due_to_min_kh:
continue
length, radw, skin, radb, wi, wbc = BlockedWell.__get_pc_arrays_for_interval(pc = pc,
pc_titles = pc_titles,
ci = ci,
length = length,
radw = radw,
skin = skin)
radb, wi, wbc = BlockedWell.__get_well_inflow_parameters_for_interval(do_well_inflow = do_well_inflow,
isotropic_perm = isotropic_perm,
ntg_is_one = ntg_is_one,
k_i = k_i,
k_j = k_j,
k_k = k_k,
sine_anglv = sine_anglv,
cosine_anglv = cosine_anglv,
sine_angla = sine_angla,
cosine_angla = cosine_angla,
grid = grid,
cell_kji0 = cell_kji0,
radw = radw,
radb = radb,
wi = wi,
wbc = wbc,
skin = skin,
kh = kh,
length_uom = length_uom,
column_list = column_list)
xyz = self.__get_xyz_arrays_for_interval(doing_xyz = doing_xyz,
length_mode = length_mode,
length_uom = length_uom,
md = md,
traj_crs = traj_crs,
depth_inc_down = depth_inc_down,
traj_z_inc_down = traj_z_inc_down,
entry_xyz = entry_xyz,
exit_xyz = exit_xyz,
ee_crs = ee_crs,
pc = pc,
pc_titles = pc_titles,
ci = ci)
md = self.__get_md_array_in_correct_units_for_interval(md = md,
length_uom = length_uom,
pc = pc,
pc_titles = pc_titles,
ci = ci)
df = BlockedWell.__append_interval_data_to_dataframe(df = df,
grid_name = grid_name,
radw = radw,
skin = skin,
angla = angla,
anglv = anglv,
length = length,
kh = kh,
xyz = xyz,
md = md,
stat = stat,
part_perf_fraction = part_perf_fraction,
radb = radb,
wi = wi,
wbc = wbc,
column_list = column_list,
one_based = one_based,
row_dict = row_dict,
cell_kji0 = cell_kji0,
row_ci_list = row_ci_list,
ci = ci)
self.__add_as_properties(df = df,
add_as_properties = add_as_properties,
extra_columns_list = extra_columns_list,
row_ci_list = row_ci_list,
length_uom = length_uom)
return df
def __get_interval_count(self):
"""Get the number of intervals to be added to the dataframe."""
if self.node_count is None or self.node_count < 2:
interval_count = 0
else:
interval_count = self.node_count - 1
return interval_count
@staticmethod
def __prop_array(uuid_or_dict, grid):
assert uuid_or_dict is not None and grid is not None
if isinstance(uuid_or_dict, dict):
prop_uuid = uuid_or_dict[grid.uuid]
else:
prop_uuid = uuid_or_dict # uuid either in form of string or uuid.UUID
return grid.property_collection.single_array_ref(uuid = prop_uuid)
@staticmethod
def __get_ref_vector(grid, grid_crs, cell_kji0, mode):
# gravity = np.array((0.0, 0.0, 1.0))
if mode == 'normal well i+':
return None # ANGLA only: option for no projection onto a plane
ref_vector = None
# options for anglv or angla reference: 'z down', 'z+', 'k down', 'k+', 'normal ij', 'normal ij down'
cell_axial_vectors = None
if not mode.startswith('z'):
cell_axial_vectors = grid.interface_vectors_kji(cell_kji0)
ref_vector = BlockedWell.__get_ref_vector_when_mode_not_normal_well_i(mode = mode,
grid_crs = grid_crs,
cell_axial_vectors = cell_axial_vectors,
grid = grid)
if ref_vector is None or ref_vector[2] == 0.0:
if grid_crs.z_inc_down:
ref_vector = np.array((0.0, 0.0, 1.0))
else:
ref_vector = np.array((0.0, 0.0, -1.0))
return ref_vector
@staticmethod
def __get_ref_vector_when_mode_not_normal_well_i(mode, grid_crs, cell_axial_vectors, grid):
if mode == 'z+':
ref_vector = np.array((0.0, 0.0, 1.0))
elif mode == 'z down':
if grid_crs.z_inc_down:
ref_vector = np.array((0.0, 0.0, 1.0))
else:
ref_vector = np.array((0.0, 0.0, -1.0))
elif mode in ['k+', 'k down']:
ref_vector = vec.unit_vector(cell_axial_vectors[0])
if mode == 'k down' and not grid.k_direction_is_down:
ref_vector = -ref_vector
else: # normal to plane of ij axes
ref_vector = vec.unit_vector(vec.cross_product(cell_axial_vectors[1], cell_axial_vectors[2]))
if mode == 'normal ij down':
if grid_crs.z_inc_down:
if ref_vector[2] < 0.0:
ref_vector = -ref_vector
else:
if ref_vector[2] > 0.0:
ref_vector = -ref_vector
return ref_vector
@staticmethod
def __verify_angle_references(anglv_ref, angla_plane_ref):
""" Verify that the references for anglv and angla are one of the acceptable options."""
assert anglv_ref in ['gravity', 'z down', 'z+', 'k down', 'k+', 'normal ij', 'normal ij down']
if anglv_ref == 'gravity':
anglv_ref = 'z down'
if angla_plane_ref is None:
angla_plane_ref = anglv_ref
assert angla_plane_ref in [
'gravity', 'z down', 'z+', 'k down', 'k+', 'normal ij', 'normal ij down', 'normal well i+'
]
if angla_plane_ref == 'gravity':
angla_plane_ref = 'z down'
return anglv_ref, angla_plane_ref
@staticmethod
def __verify_saturation_ranges_and_property_uuids(max_satw, min_sato, max_satg, satw_uuid, sato_uuid, satg_uuid):
"""Verify that the fluid saturation limits fall within 0.0 to 1.0 and that the uuid of the required
saturation property array has been specified.
"""
if max_satw is not None and max_satw >= 1.0:
max_satw = None
if min_sato is not None and min_sato <= 0.0:
min_sato = None
if max_satg is not None and max_satg >= 1.0:
max_satg = None
phase_list = ['water', 'oil', 'gas']
phase_saturation_limits_list = [max_satw, min_sato, max_satg]
uuids_list = [satw_uuid, sato_uuid, satg_uuid]
for phase, phase_limit, uuid in zip(phase_list, phase_saturation_limits_list, uuids_list):
if phase_limit is not None:
assert uuid is not None, f'{phase} saturation limit specified without saturation property array'
return max_satw, min_sato, max_satg
@staticmethod
def __verify_extra_properties_to_be_added_to_dataframe(extra_columns_list, column_list, add_as_properties,
use_properties, skin, stat, radw):
""" Determine which extra columns, if any, should be added as properties to the dataframe.
note:
if skin, stat or radw are None, default values are specified.
"""
if extra_columns_list:
for extra in extra_columns_list:
assert extra.upper() in [
'GRID', 'ANGLA', 'ANGLV', 'LENGTH', 'KH', 'DEPTH', 'MD', 'X', 'Y', 'SKIN', 'RADW', 'PPERF', 'RADB',
'WI', 'WBC'
]
column_list.append(extra.upper())
else:
add_as_properties = use_properties = False
assert not (add_as_properties and use_properties)
column_list, skin, stat, radw = BlockedWell.__check_skin_stat_radw_to_be_added_as_properties(
skin = skin, stat = stat, radw = radw, column_list = column_list)
return column_list, add_as_properties, use_properties, skin, stat, radw
@staticmethod
def __check_perforation_properties_to_be_added(column_list, perforation_list):
if all(['LENGTH' in column_list, 'PPERF' in column_list, 'KH' not in column_list, perforation_list
is not None]):
log.warning(
'both LENGTH and PPERF will include effects of partial perforation; only one should be used in WELLSPEC'
)
elif all([
perforation_list is not None, 'LENGTH' not in column_list, 'PPERF' not in column_list, 'KH'
not in column_list, 'WBC' not in column_list
]):
log.warning('perforation list supplied but no use of LENGTH, KH, PPERF nor WBC')
if perforation_list is not None and len(perforation_list) == 0:
log.warning('empty perforation list specified for blocked well dataframe: no rows will be included')
@staticmethod
def __check_skin_stat_radw_to_be_added_as_properties(skin, stat, radw, column_list):
""" Verify whether skin should be added as a property in the dataframe."""
if skin is not None and 'SKIN' not in column_list:
column_list.append('SKIN')
if skin is None:
skin = 0.0
if stat is not None:
assert str(stat).upper() in ['ON', 'OFF']
stat = str(stat).upper()
if 'STAT' not in column_list:
column_list.append('STAT')
else:
stat = 'ON'
if radw is not None and 'RADW' not in column_list:
column_list.append('RADW')
if radw is None:
radw = 0.25
return column_list, skin, stat, radw
@staticmethod
def __verify_perm_i_uuid_for_well_inflow(column_list, perm_i_uuid, pc_titles):
""" Verify that the I direction permeability has been specified if well inflow properties are to be added
to the dataframe.
"""
do_well_inflow = (('WI' in column_list and 'WI' not in pc_titles) or
('WBC' in column_list and 'WBC' not in pc_titles) or
('RADB' in column_list and 'RADB' not in pc_titles))
if do_well_inflow:
assert perm_i_uuid is not None, 'WI, RADB or WBC requested without I direction permeabilty being specified'
return do_well_inflow
@staticmethod
def __verify_perm_i_uuid_for_kh(min_kh, column_list, perm_i_uuid, pc_titles):
""" Verify that the I direction permeability has been specified if permeability thickness and
wellbore constant properties are to be added to the dataframe.
"""
if min_kh is not None and min_kh <= 0.0:
min_kh = None
doing_kh = False
if ('KH' in column_list or min_kh is not None) and 'KH' not in pc_titles:
assert perm_i_uuid is not None, 'KH requested (or minimum specified) without I direction permeabilty being specified'
doing_kh = True
if 'WBC' in column_list and 'WBC' not in pc_titles:
assert perm_i_uuid is not None, 'WBC requested without I direction permeabilty being specified'
doing_kh = True
return min_kh, doing_kh
@staticmethod
def __verify_perm_j_k_uuids_for_kh_and_well_inflow(doing_kh, do_well_inflow, perm_i_uuid, perm_j_uuid, perm_k_uuid):
""" Verify that the J and K direction permeabilities have been specified if well inflow properties or
permeability thickness properties are to be added to the dataframe.
"""
isotropic_perm = None
if doing_kh or do_well_inflow:
if perm_j_uuid is None and perm_k_uuid is None:
isotropic_perm = True
else:
if perm_j_uuid is None:
perm_j_uuid = perm_i_uuid
if perm_k_uuid is None:
perm_k_uuid = perm_i_uuid
# following line assumes arguments are passed in same form; if not, some unnecessary maths might be done
isotropic_perm = (bu.matching_uuids(perm_i_uuid, perm_j_uuid) and
bu.matching_uuids(perm_i_uuid, perm_k_uuid))
return perm_j_uuid, perm_k_uuid, isotropic_perm
@staticmethod
def __verify_k_layers_to_be_included(min_k0, max_k0, k0_list):
"""Verify that the k layers to be included in the dataframe exist within the appropriate range."""
if min_k0 is None:
min_k0 = 0
else:
assert min_k0 >= 0
if max_k0 is not None:
assert min_k0 <= max_k0
if k0_list is not None and len(k0_list) == 0:
log.warning('no layers included for blocked well dataframe: no rows will be included')
@staticmethod
def __verify_if_angles_xyz_and_length_to_be_added(column_list, pc_titles, doing_kh, do_well_inflow, length_mode):
""" Determine if angla, anglv, x, y, z and length data are to be added as properties to the dataframe."""
doing_angles = any([('ANGLA' in column_list and 'ANGLA' not in pc_titles),
('ANGLV' in column_list and 'ANGLV' not in pc_titles), (doing_kh), (do_well_inflow)])
doing_xyz = any([('X' in column_list and 'X' not in pc_titles), ('Y' in column_list and 'Y' not in pc_titles),
('DEPTH' in column_list and 'DEPTH' not in pc_titles)])
doing_entry_exit = any([(doing_angles),
('LENGTH' in column_list and 'LENGTH' not in pc_titles and length_mode == 'straight')])
# doing_angles = (('ANGLA' in column_list and 'ANGLA' not in pc_titles) or
# ('ANGLV' in column_list and 'ANGLV' not in pc_titles) or doing_kh or do_well_inflow)
# doing_xyz = (('X' in column_list and 'X' not in pc_titles) or (
# 'Y' in column_list and 'Y' not in pc_titles) or
# ('DEPTH' in column_list and 'DEPTH' not in pc_titles))
# doing_entry_exit = doing_angles or ('LENGTH' in column_list and 'LENGTH' not in pc_titles and
# length_mode == 'straight')
return doing_angles, doing_xyz, doing_entry_exit
def __verify_number_of_grids_and_crs_units(self, column_list):
""" Verify that a GRID column is included in the dataframe if the well intersects more than one grid.
Verify that each grid's crs units are consistent in all directions.
"""
if 'GRID' not in column_list and self.number_of_grids() > 1:
log.error('creating blocked well dataframe without GRID column for well that intersects more than one grid')
grid_crs_list = []
for grid in self.grid_list:
grid_crs = crs.Crs(self.model, uuid = grid.crs_uuid)
grid_crs_list.append(grid_crs)
if grid_crs.z_units != grid_crs.xy_units and (len(column_list) > 1 or
(len(column_list) == 1 and
column_list[0] != 'GRID')) is not None:
log.error('grid ' + str(rqet.citation_title_for_node(grid.root_node)) +
' has z units different to xy units: some WELLSPEC data likely to be wrong')
return grid_crs_list
def __get_trajectory_crs_and_z_inclination(self):
if self.trajectory is None or self.trajectory.crs_uuid is None:
traj_crs = None
traj_z_inc_down = None
else:
traj_crs = crs.Crs(self.trajectory.model, uuid = self.trajectory.crs_uuid)
assert traj_crs.xy_units == traj_crs.z_units
traj_z_inc_down = traj_crs.z_inc_down
return traj_crs, traj_z_inc_down
@staticmethod
def __check_cell_depth(max_depth, grid, cell_kji0, grid_crs):
""" Check whether the maximum depth specified has been exceeded with the current interval."""
max_depth_exceeded = False
if max_depth is not None:
cell_depth = grid.centre_point(cell_kji0)[2]
if not grid_crs.z_inc_down:
cell_depth = -cell_depth
if cell_depth > max_depth:
max_depth_exceeded = True
return max_depth_exceeded
@staticmethod
def __skip_interval_check(max_depth, grid, cell_kji0, grid_crs, active_only, tuple_kji0, min_k0, max_k0, k0_list,
region_list, region_uuid, max_satw, satw_uuid, min_sato, sato_uuid, max_satg, satg_uuid):
"""Check whether any conditions are met that mean the interval should be skipped."""
max_depth_exceeded = BlockedWell.__check_cell_depth(max_depth = max_depth,
grid = grid,
cell_kji0 = cell_kji0,
grid_crs = grid_crs)
inactive_grid = active_only and grid.inactive is not None and grid.inactive[tuple_kji0]
out_of_bounds_layer_1 = (min_k0 is not None and cell_kji0[0] < min_k0) or (max_k0 is not None and
cell_kji0[0] > max_k0)
out_of_bounds_layer_2 = k0_list is not None and cell_kji0[0] not in k0_list
out_of_bounds_region = region_list is not None and BlockedWell.__prop_array(region_uuid,
grid)[tuple_kji0] not in region_list
saturation_limit_exceeded_1 = max_satw is not None and BlockedWell.__prop_array(satw_uuid,
grid)[tuple_kji0] > max_satw
saturation_limit_exceeded_2 = min_sato is not None and BlockedWell.__prop_array(sato_uuid,
grid)[tuple_kji0] < min_sato
saturation_limit_exceeded_3 = max_satg is not None and BlockedWell.__prop_array(satg_uuid,
grid)[tuple_kji0] > max_satg
skip_interval = any([
max_depth_exceeded, inactive_grid, out_of_bounds_layer_1, out_of_bounds_layer_2, out_of_bounds_region,
saturation_limit_exceeded_1, saturation_limit_exceeded_2, saturation_limit_exceeded_3
])
return skip_interval
def __get_part_perf_fraction_for_interval(self, pc, pc_titles, ci, perforation_list, interval):
""" Get the partial perforation fraction for the interval."""
skip_interval = False
if 'PPERF' in pc_titles:
part_perf_fraction = pc.single_array_ref(citation_title = 'PPERF')[ci]
else:
part_perf_fraction = 1.0
if perforation_list is not None:
perf_length = 0.0
for perf_start, perf_end in perforation_list:
if perf_end <= self.node_mds[interval] or perf_start >= self.node_mds[interval + 1]:
skip_interval = True
if perf_start <= self.node_mds[interval]:
if perf_end >= self.node_mds[interval + 1]:
perf_length += self.node_mds[interval + 1] - self.node_mds[interval]
break
else:
perf_length += perf_end - self.node_mds[interval]
else:
if perf_end >= self.node_mds[interval + 1]:
perf_length += self.node_mds[interval + 1] - perf_start
else:
perf_length += perf_end - perf_start
if perf_length == 0.0:
skip_interval = True
part_perf_fraction = min(1.0, perf_length / (self.node_mds[interval + 1] - self.node_mds[interval]))
return skip_interval, part_perf_fraction
def __get_entry_exit_xyz_and_crs_for_interval(self, doing_entry_exit, use_face_centres, grid, cell_kji0, interval,
grid_crs, traj_crs):
"""Calculate the entry and exit points for the interval and set the entry and exit
coordinate reference system.
"""
entry_xyz = None
exit_xyz = None
ee_crs = None
if doing_entry_exit:
assert self.trajectory is not None
if use_face_centres:
entry_xyz = grid.face_centre(cell_kji0, self.face_pair_indices[interval, 0, 0],
self.face_pair_indices[interval, 0, 1])
if self.face_pair_indices[interval, 1, 0] >= 0:
exit_xyz = grid.face_centre(cell_kji0, self.face_pair_indices[interval, 1, 0],
self.face_pair_indices[interval, 1, 1])
else:
exit_xyz = grid.face_centre(cell_kji0, self.face_pair_indices[interval, 0, 0],
1 - self.face_pair_indices[interval, 0, 1])
ee_crs = grid_crs
else:
entry_xyz = self.trajectory.xyz_for_md(self.node_mds[interval])
exit_xyz = self.trajectory.xyz_for_md(self.node_mds[interval + 1])
ee_crs = traj_crs
return entry_xyz, exit_xyz, ee_crs
def __get_length_of_interval(self, length_mode, interval, length_uom, entry_xyz, exit_xyz, ee_crs, perforation_list,
part_perf_fraction, min_length):
"""Calculate the length of the interval."""
skip_interval = False
if length_mode == 'MD':
length = self.node_mds[interval + 1] - self.node_mds[interval]
if length_uom is not None and self.trajectory is not None and length_uom != self.trajectory.md_uom:
length = bwam.convert_lengths(length, self.trajectory.md_uom, length_uom)
else: # use straight line length between entry and exit
length = vec.naive_length(np.array(exit_xyz) -
np.array(entry_xyz)) # trajectory crs, unless use_face_centres!
if length_uom is not None:
length = bwam.convert_lengths(length, ee_crs.z_units, length_uom)
elif self.trajectory is not None:
length = bwam.convert_lengths(length, ee_crs.z_units, self.trajectory.md_uom)
if perforation_list is not None:
length *= part_perf_fraction
if min_length is not None and length < min_length:
skip_interval = True
return skip_interval, length
def __get_angles_for_interval(self, pc, pc_titles, doing_angles, set_k_face_intervals_vertical, ci, k_face_check,
k_face_check_end, entry_xyz, exit_xyz, traj_z_inc_down, grid, grid_crs, cell_kji0,
anglv_ref, angla_plane_ref):
"""Calculate angla, anglv and related trigonometirc transforms for the interval."""
sine_anglv = sine_angla = 0.0
cosine_anglv = cosine_angla = 1.0
anglv = pc.single_array_ref(citation_title = 'ANGLV')[ci] if 'ANGLV' in pc_titles else None
angla = pc.single_array_ref(citation_title = 'ANGLA')[ci] if 'ANGLA' in pc_titles else None
if doing_angles and not (set_k_face_intervals_vertical and
(np.all(self.face_pair_indices[ci] == k_face_check) or
np.all(self.face_pair_indices[ci] == k_face_check_end))):
anglv, sine_anglv, cosine_anglv, vector, a_ref_vector = BlockedWell.__get_anglv_for_interval(
anglv = anglv,
entry_xyz = entry_xyz,
exit_xyz = exit_xyz,
traj_z_inc_down = traj_z_inc_down,
grid = grid,
grid_crs = grid_crs,
cell_kji0 = cell_kji0,
anglv_ref = anglv_ref,
angla_plane_ref = angla_plane_ref)
if anglv != 0.0:
angla, sine_angla, cosine_angla = BlockedWell.__get_angla_for_interval(angla = angla,
grid = grid,
cell_kji0 = cell_kji0,
vector = vector,
a_ref_vector = a_ref_vector)
else:
if angla is None:
angla = 0.0
if anglv is None:
anglv = 0.0
return anglv, sine_anglv, cosine_anglv, angla, sine_angla, cosine_angla
@staticmethod
def __get_angla_for_interval(angla, grid, cell_kji0, vector, a_ref_vector):
"""Calculate angla and related trigonometric transforms for the interval."""
# project well vector and i-axis vector onto plane defined by normal vector a_ref_vector
i_axis = grid.interface_vector(cell_kji0, 2)
i_axis = vec.unit_vector(i_axis)
if a_ref_vector is not None: # project vector and i axis onto a plane
vector -= vec.dot_product(vector, a_ref_vector) * a_ref_vector
vector = vec.unit_vector(vector)
# log.debug('i axis unit vector: ' + str(i_axis))
i_axis -= vec.dot_product(i_axis, a_ref_vector) * a_ref_vector
i_axis = vec.unit_vector(i_axis)
# log.debug('i axis unit vector in reference plane: ' + str(i_axis))
if angla is not None:
angla_rad = vec.radians_from_degrees(angla)
cosine_angla = maths.cos(angla_rad)
sine_angla = maths.sin(angla_rad)
else:
cosine_angla = min(max(vec.dot_product(vector, i_axis), -1.0), 1.0)
angla_rad = maths.acos(cosine_angla)
# negate angla if vector is 'clockwise from' i_axis when viewed from above, projected in the xy plane
# todo: have discussion around angla sign under different ijk handedness (and z inc direction?)
sine_angla = maths.sin(angla_rad)
angla = vec.degrees_from_radians(angla_rad)
if vec.clockwise((0.0, 0.0), i_axis, vector) > 0.0:
angla = -angla
angla_rad = -angla_rad ## as angle_rad before --> typo?
sine_angla = -sine_angla
# log.debug('angla: ' + str(angla))
return angla, sine_angla, cosine_angla
@staticmethod
def __get_anglv_for_interval(anglv, entry_xyz, exit_xyz, traj_z_inc_down, grid, grid_crs, cell_kji0, anglv_ref,
angla_plane_ref):
"""Get anglv and related trigonometric transforms for the interval."""
vector = vec.unit_vector(np.array(exit_xyz) - np.array(entry_xyz)) # nominal wellbore vector for interval
if traj_z_inc_down is not None and traj_z_inc_down != grid_crs.z_inc_down:
vector[2] = -vector[2]
v_ref_vector = BlockedWell.__get_ref_vector(grid, grid_crs, cell_kji0, anglv_ref)
# log.debug('v ref vector: ' + str(v_ref_vector))
if angla_plane_ref == anglv_ref:
a_ref_vector = v_ref_vector
else:
a_ref_vector = BlockedWell.__get_ref_vector(grid, grid_crs, cell_kji0, angla_plane_ref)
# log.debug('a ref vector: ' + str(a_ref_vector))
if anglv is not None:
anglv_rad = vec.radians_from_degrees(anglv)
cosine_anglv = maths.cos(anglv_rad)
sine_anglv = maths.sin(anglv_rad)
else:
cosine_anglv = min(max(vec.dot_product(vector, v_ref_vector), -1.0), 1.0)
anglv_rad = maths.acos(cosine_anglv)
sine_anglv = maths.sin(anglv_rad)
anglv = vec.degrees_from_radians(anglv_rad)
# log.debug('anglv: ' + str(anglv))
return anglv, sine_anglv, cosine_anglv, vector, a_ref_vector
@staticmethod
def __get_ntg_and_directional_perm_for_interval(doing_kh, do_well_inflow, ntg_uuid, grid, tuple_kji0,
isotropic_perm, preferential_perforation, part_perf_fraction,
perm_i_uuid, perm_j_uuid, perm_k_uuid):
"""Get the net-to-gross and directional permeability arrays for the interval."""
ntg_is_one = False
k_i = k_j = k_k = None
if doing_kh or do_well_inflow:
if ntg_uuid is None:
ntg = 1.0
ntg_is_one = True
else:
ntg = BlockedWell.__prop_array(ntg_uuid, grid)[tuple_kji0]
ntg_is_one = maths.isclose(ntg, 1.0, rel_tol = 0.001)
if isotropic_perm and ntg_is_one:
k_i = k_j = k_k = BlockedWell.__prop_array(perm_i_uuid, grid)[tuple_kji0]
else:
if preferential_perforation and not ntg_is_one:
if part_perf_fraction <= ntg:
ntg = 1.0 # effective ntg when perforated intervals are in pay
else:
ntg /= part_perf_fraction # adjusted ntg when some perforations in non-pay
# todo: check netgross facet type in property perm i & j parts: if set to gross then don't multiply by ntg below
k_i = BlockedWell.__prop_array(perm_i_uuid, grid)[tuple_kji0] * ntg
k_j = BlockedWell.__prop_array(perm_j_uuid, grid)[tuple_kji0] * ntg
k_k = BlockedWell.__prop_array(perm_k_uuid, grid)[tuple_kji0]
return ntg_is_one, k_i, k_j, k_k
@staticmethod
def __get_kh_for_interval(doing_kh, isotropic_perm, ntg_is_one, length, perm_i_uuid, grid, tuple_kji0, k_i, k_j,
k_k, anglv, sine_anglv, cosine_anglv, sine_angla, cosine_angla, min_kh, pc, pc_titles,
ci):
"""Get the permeability-thickness value for the interval."""
skip_interval = False
if doing_kh:
kh = BlockedWell.__get_kh_if_doing_kh(isotropic_perm = isotropic_perm,
ntg_is_one = ntg_is_one,
length = length,
perm_i_uuid = perm_i_uuid,
grid = grid,
tuple_kji0 = tuple_kji0,
k_i = k_i,
k_j = k_j,
k_k = k_k,
anglv = anglv,
sine_anglv = sine_anglv,
cosine_anglv = cosine_anglv,
sine_angla = sine_angla,
cosine_angla = cosine_angla)
if min_kh is not None and kh < min_kh:
skip_interval = True
elif 'KH' in pc_titles:
kh = pc.single_array_ref(citation_title = 'KH')[ci]
else:
kh = None
return skip_interval, kh
@staticmethod
def __get_kh_if_doing_kh(isotropic_perm, ntg_is_one, length, perm_i_uuid, grid, tuple_kji0, k_i, k_j, k_k, anglv,
sine_anglv, cosine_anglv, sine_angla, cosine_angla):
if isotropic_perm and ntg_is_one:
kh = length * BlockedWell.__prop_array(perm_i_uuid, grid)[tuple_kji0]
else:
if np.isnan(k_i) or np.isnan(k_j):
kh = 0.0
elif anglv == 0.0:
kh = length * maths.sqrt(k_i * k_j)
elif np.isnan(k_k):
kh = 0.0
else:
k_e = maths.pow(k_i * k_j * k_k, 1.0 / 3.0)
if k_e == 0.0:
kh = 0.0
else:
l_i = length * maths.sqrt(k_e / k_i) * sine_anglv * cosine_angla
l_j = length * maths.sqrt(k_e / k_j) * sine_anglv * sine_angla
l_k = length * maths.sqrt(k_e / k_k) * cosine_anglv
l_p = maths.sqrt(l_i * l_i + l_j * l_j + l_k * l_k)
kh = k_e * l_p
return kh
@staticmethod
def __get_pc_arrays_for_interval(pc, pc_titles, ci, length, radw, skin):
"""Get the property collection arrays for the interval."""
if 'LENGTH' in pc_titles:
length = pc.single_array_ref(citation_title = 'LENGTH')[ci]
if 'RADW' in pc_titles:
radw = pc.single_array_ref(citation_title = 'RADW')[ci]
assert radw > 0.0
if 'SKIN' in pc_titles:
skin = pc.single_array_ref(citation_title = 'SKIN')[ci]
radb = wi = wbc = None
if 'RADB' in pc_titles:
radb = pc.single_array_ref(citation_title = 'RADB')[ci]
if 'WI' in pc_titles:
wi = pc.single_array_ref(citation_title = 'WI')[ci]
if 'WBC' in pc_titles:
wbc = pc.single_array_ref(citation_title = 'WBC')[ci]
return length, radw, skin, radb, wi, wbc
@staticmethod
def __get_well_inflow_parameters_for_interval(do_well_inflow, isotropic_perm, ntg_is_one, k_i, k_j, k_k, sine_anglv,
cosine_anglv, sine_angla, cosine_angla, grid, cell_kji0, radw, radb,
wi, wbc, skin, kh, length_uom, column_list):
if do_well_inflow:
k_ei, k_ej, k_ek, radw_e = BlockedWell.__calculate_ke_and_radw_e(isotropic_perm = isotropic_perm,
ntg_is_one = ntg_is_one,
radw = radw,
k_i = k_i,
k_j = k_j,
k_k = k_k,
sine_anglv = sine_anglv,
cosine_anglv = cosine_anglv,
sine_angla = sine_angla,
cosine_angla = cosine_angla)
cell_axial_vectors = grid.interface_vectors_kji(cell_kji0)
d2 = np.empty(3)
for axis in range(3):
d2[axis] = np.sum(cell_axial_vectors[axis] * cell_axial_vectors[axis])
radb_e = BlockedWell.__calculate_radb_e(k_ei = k_ei,
k_ej = k_ej,
k_ek = k_ek,
k_i = k_i,
k_j = k_j,
k_k = k_k,
d2 = d2,
sine_anglv = sine_anglv,
cosine_anglv = cosine_anglv,
sine_angla = sine_angla,
cosine_angla = cosine_angla)
if radb is None:
radb = radw * radb_e / radw_e
if wi is None:
wi = 0.0 if radb <= 0.0 else 2.0 * maths.pi / (maths.log(radb / radw) + skin)
if 'WBC' in column_list and wbc is None:
conversion_constant = 8.5270171e-5 if length_uom == 'm' else 0.006328286
wbc = conversion_constant * kh * wi # note: pperf aleady accounted for in kh
return radb, wi, wbc
@staticmethod
def __calculate_ke_and_radw_e(isotropic_perm, ntg_is_one, radw, k_i, k_j, k_k, sine_anglv, cosine_anglv, sine_angla,
cosine_angla):
if isotropic_perm and ntg_is_one:
k_ei = k_ej = k_ek = k_i
radw_e = radw
else:
k_ei = maths.sqrt(k_j * k_k)
k_ej = maths.sqrt(k_i * k_k)
k_ek = maths.sqrt(k_i * k_j)
r_wi = 0.0 if k_ei == 0.0 else 0.5 * radw * (maths.sqrt(k_ei / k_j) + maths.sqrt(k_ei / k_k))
r_wj = 0.0 if k_ej == 0.0 else 0.5 * radw * (maths.sqrt(k_ej / k_i) + maths.sqrt(k_ej / k_k))
r_wk = 0.0 if k_ek == 0.0 else 0.5 * radw * (maths.sqrt(k_ek / k_i) + maths.sqrt(k_ek / k_j))
rwi = r_wi * sine_anglv * cosine_angla
rwj = r_wj * sine_anglv * sine_angla
rwk = r_wk * cosine_anglv
radw_e = maths.sqrt(rwi * rwi + rwj * rwj + rwk * rwk)
if radw_e == 0.0:
radw_e = radw # no permeability in this situation anyway
return k_ei, k_ej, k_ek, radw_e
@staticmethod
def __calculate_radb_e(k_ei, k_ej, k_ek, k_i, k_j, k_k, d2, sine_anglv, cosine_anglv, sine_angla, cosine_angla):
r_bi = 0.0 if k_ei == 0.0 else 0.14 * maths.sqrt(k_ei * (d2[1] / k_j + d2[0] / k_k))
r_bj = 0.0 if k_ej == 0.0 else 0.14 * maths.sqrt(k_ej * (d2[2] / k_i + d2[0] / k_k))
r_bk = 0.0 if k_ek == 0.0 else 0.14 * maths.sqrt(k_ek * (d2[2] / k_i + d2[1] / k_j))
rbi = r_bi * sine_anglv * cosine_angla
rbj = r_bj * sine_anglv * sine_angla
rbk = r_bk * cosine_anglv
radb_e = maths.sqrt(rbi * rbi + rbj * rbj + rbk * rbk)
return radb_e
def __get_xyz_arrays_for_interval(self, doing_xyz, length_mode, length_uom, md, traj_crs, depth_inc_down,
traj_z_inc_down, entry_xyz, exit_xyz, ee_crs, pc, pc_titles, ci):
""" Get the x, y and z arrays for the interval."""
xyz = (np.NaN, np.NaN, np.NaN)
if doing_xyz:
xyz = self.__get_xyz_if_doing_xyz(length_mode = length_mode,
md = md,
length_uom = length_uom,
traj_crs = traj_crs,
depth_inc_down = depth_inc_down,
traj_z_inc_down = traj_z_inc_down,
entry_xyz = entry_xyz,
exit_xyz = exit_xyz,
ee_crs = ee_crs)
xyz = np.array(xyz)
for i, col_header in enumerate(['X', 'Y', 'DEPTH']):
if col_header in pc_titles:
xyz[i] = pc.single_array_ref(citation_title = col_header)[ci]
return xyz
def __get_xyz_if_doing_xyz(self, length_mode, md, length_uom, traj_crs, depth_inc_down, traj_z_inc_down, exit_xyz,
entry_xyz, ee_crs):
if length_mode == 'MD' and self.trajectory is not None:
xyz = self.trajectory.xyz_for_md(md)
if length_uom is not None and length_uom != self.trajectory.md_uom:
bwam.convert_lengths(xyz, traj_crs.z_units, length_uom)
if depth_inc_down and traj_z_inc_down is False:
xyz[2] = -xyz[2]
else:
xyz = 0.5 * (np.array(exit_xyz) + np.array(entry_xyz))
if length_uom is not None and length_uom != ee_crs.z_units:
bwam.convert_lengths(xyz, ee_crs.z_units, length_uom)
if depth_inc_down and ee_crs.z_inc_down is False:
xyz[2] = -xyz[2]
return xyz
def __get_md_array_in_correct_units_for_interval(self, md, length_uom, pc, pc_titles, ci):
"""Convert the measurd depth valeus to the correct units and get the measured depths property collection
array.
"""
if length_uom is not None and self.trajectory is not None and length_uom != self.trajectory.md_uom:
md = bwam.convert_lengths(md, self.trajectory.md_uom, length_uom)
if 'MD' in pc_titles:
md = pc.single_array_ref(citation_title = 'MD')[ci]
return md
@staticmethod
def __append_interval_data_to_dataframe(df, grid_name, radw, skin, angla, anglv, length, kh, xyz, md, stat,
part_perf_fraction, radb, wi, wbc, column_list, one_based, row_dict,
cell_kji0, row_ci_list, ci):
"""Append the row of data corresponding to the interval to the dataframe."""
column_names = [
'GRID', 'RADW', 'SKIN', 'ANGLA', 'ANGLV', 'LENGTH', 'KH', 'DEPTH', 'MD', 'X', 'Y', 'STAT', 'PPERF', 'RADB',
'WI', 'WBC'
]
column_values = [
grid_name, radw, skin, angla, anglv, length, kh, xyz[2], md, xyz[0], xyz[1], stat, part_perf_fraction, radb,
wi, wbc
]
column_values_dict = dict(zip(column_names, column_values))
data = df.to_dict()
data = {k: list(v.values()) for k, v in data.items()}
for col_index, col in enumerate(column_list):
if col_index < 3:
if one_based:
row_dict[col] = [cell_kji0[2 - col_index] + 1]
else:
row_dict[col] = [cell_kji0[2 - col_index]]
else:
row_dict[col] = [column_values_dict[col]]
for col, vals in row_dict.items():
if col in data:
data[col].extend(vals)
else:
data[col] = vals
df = pd.DataFrame(data)
row_ci_list.append(ci)
return df
def __add_as_properties(self, df, add_as_properties, extra_columns_list, row_ci_list, length_uom):
"""Checks that the column can be added as a property part then creates said part."""
if add_as_properties:
if isinstance(add_as_properties, list):
for col in add_as_properties:
assert col in extra_columns_list
property_columns = add_as_properties
else:
property_columns = extra_columns_list
self._add_df_properties(df, property_columns, row_ci_list = row_ci_list, length_uom = length_uom)
def _add_df_properties(self, df, columns, row_ci_list = None, length_uom = None):
# creates a property part for each named column, based on the dataframe values
# column name used as the citation title
# self must already exist as a part in the model
# currently only handles single grid situations
# todo: rewrite to add separate property objects for each grid references by the blocked well
log.debug('_add_df_props: df:')
log.debug(f'\n{df}')
log.debug(f'columns: {columns}')
assert len(self.grid_list) == 1
if columns is None or len(columns) == 0 or len(df) == 0:
return
if row_ci_list is None:
row_ci_list = np.arange(self.cell_count)
assert len(row_ci_list) == len(df)
if length_uom is None:
length_uom = self.trajectory.md_uom
extra_pc = rqp.PropertyCollection()
extra_pc.set_support(support = self)
ci_map = np.array(row_ci_list, dtype = int)
for e in columns:
extra = e.upper()
pk, uom = self.__set_pk_and_uom_for_df_properties(extra = extra, length_uom = length_uom)
# 'SKIN': use defaults for now; todo: create local property kind for skin
expanded = np.full(self.cell_count, np.NaN)
expanded[ci_map] = df[extra]
extra_pc.add_cached_array_to_imported_list(expanded,
'blocked well dataframe',
extra,
discrete = False,
uom = uom,
property_kind = pk,
local_property_kind_uuid = None,
facet_type = None,
facet = None,
realization = None,
indexable_element = 'cells',
count = 1)
extra_pc.write_hdf5_for_imported_list()
extra_pc.create_xml_for_imported_list_and_add_parts_to_model()
def __set_pk_and_uom_for_df_properties(self, extra, length_uom):
"""Set the property kind and unit of measure for all properties in the dataframe."""
column_list = ['ANGLA', 'ANGLV', 'KH', 'PPERF']
uom_list = ['dega', 'dega', 'mD.' + length_uom, length_uom + '/' + length_uom]
pk_list = ['azimuth', 'inclination', 'permeability_length', 'continuous'
] # neither azimuth nor dip are correct property kinds; todo: create local property kinds
if extra in column_list:
list_position = column_list.index(extra)
uom, pk = uom_list[list_position], pk_list[list_position]
elif extra in ['LENGTH', 'MD', 'X', 'Y', 'DEPTH', 'RADW']:
uom, pk = self.__set_pk_and_uom_for_length_based_properties(length_uom = length_uom, extra = extra)
else:
uom = 'Euc'
pk = 'continuous'
return uom, pk
def __set_pk_and_uom_for_length_based_properties(self, length_uom, extra):
if length_uom is None or length_uom == 'Euc':
if extra in ['LENGTH', 'MD']:
uom = self.trajectory.md_uom
elif extra in ['X', 'Y', 'RADW']:
uom = self.grid_list[0].xy_units()
else:
uom = self.grid_list[0].z_units()
else:
uom = length_uom
if extra == 'DEPTH':
pk = 'depth'
else:
pk = 'length'
return pk, uom
def static_kh(self,
ntg_uuid = None,
perm_i_uuid = None,
perm_j_uuid = None,
perm_k_uuid = None,
satw_uuid = None,
sato_uuid = None,
satg_uuid = None,
region_uuid = None,
active_only = False,
min_k0 = None,
max_k0 = None,
k0_list = None,
min_length = None,
min_kh = None,
max_depth = None,
max_satw = None,
min_sato = None,
max_satg = None,
perforation_list = None,
region_list = None,
set_k_face_intervals_vertical = False,
anglv_ref = 'gravity',
angla_plane_ref = None,
length_mode = 'MD',
length_uom = None,
use_face_centres = False,
preferential_perforation = True):
"""Returns the total static K.H (permeability x height); length units are those of trajectory md_uom unless
length_upm is set.
note:
see doc string for dataframe() method for argument descriptions; perm_i_uuid required
"""
df = self.dataframe(i_col = 'I',
j_col = 'J',
k_col = 'K',
one_based = False,
extra_columns_list = ['KH'],
ntg_uuid = ntg_uuid,
perm_i_uuid = perm_i_uuid,
perm_j_uuid = perm_j_uuid,
perm_k_uuid = perm_k_uuid,
satw_uuid = satw_uuid,
sato_uuid = sato_uuid,
satg_uuid = satg_uuid,
region_uuid = region_uuid,
active_only = active_only,
min_k0 = min_k0,
max_k0 = max_k0,
k0_list = k0_list,
min_length = min_length,
min_kh = min_kh,
max_depth = max_depth,
max_satw = max_satw,
min_sato = min_sato,
max_satg = max_satg,
perforation_list = perforation_list,
region_list = region_list,
set_k_face_intervals_vertical = set_k_face_intervals_vertical,
anglv_ref = anglv_ref,
angla_plane_ref = angla_plane_ref,
length_mode = length_mode,
length_uom = length_uom,
use_face_centres = use_face_centres,
preferential_perforation = preferential_perforation)
return sum(df['KH'])
def write_wellspec(self,
wellspec_file,
well_name = None,
mode = 'a',
extra_columns_list = [],
ntg_uuid = None,
perm_i_uuid = None,
perm_j_uuid = None,
perm_k_uuid = None,
satw_uuid = None,
sato_uuid = None,
satg_uuid = None,
region_uuid = None,
radw = None,
skin = None,
stat = None,
active_only = False,
min_k0 = None,
max_k0 = None,
k0_list = None,
min_length = None,
min_kh = None,
max_depth = None,
max_satw = None,
min_sato = None,
max_satg = None,
perforation_list = None,
region_list = None,
set_k_face_intervals_vertical = False,
depth_inc_down = True,
anglv_ref = 'gravity',
angla_plane_ref = None,
length_mode = 'MD',
length_uom = None,
preferential_perforation = True,
space_instead_of_tab_separator = True,
align_columns = True,
preceeding_blank_lines = 0,
trailing_blank_lines = 0,
length_uom_comment = False,
write_nexus_units = True,
float_format = '5.3'):
"""Writes Nexus WELLSPEC keyword to an ascii file.
returns:
pandas DataFrame containing data that has been written to the wellspec file
note:
see doc string for dataframe() method for most of the argument descriptions;
align_columns and float_format arguments are deprecated and no longer used
"""
assert wellspec_file, 'no output file specified to write WELLSPEC to'
col_width_dict = {
'IW': 4,
'JW': 4,
'L': 4,
'ANGLA': 8,
'ANGLV': 8,
'LENGTH': 8,
'KH': 10,
'DEPTH': 10,
'MD': 10,
'X': 8,
'Y': 12,
'SKIN': 7,
'RADW': 5,
'PPERF': 5
}
well_name = self.__get_well_name(well_name = well_name)
df = self.dataframe(one_based = True,
extra_columns_list = extra_columns_list,
ntg_uuid = ntg_uuid,
perm_i_uuid = perm_i_uuid,
perm_j_uuid = perm_j_uuid,
perm_k_uuid = perm_k_uuid,
satw_uuid = satw_uuid,
sato_uuid = sato_uuid,
satg_uuid = satg_uuid,
region_uuid = region_uuid,
radw = radw,
skin = skin,
stat = stat,
active_only = active_only,
min_k0 = min_k0,
max_k0 = max_k0,
k0_list = k0_list,
min_length = min_length,
min_kh = min_kh,
max_depth = max_depth,
max_satw = max_satw,
min_sato = min_sato,
max_satg = max_satg,
perforation_list = perforation_list,
region_list = region_list,
depth_inc_down = depth_inc_down,
set_k_face_intervals_vertical = set_k_face_intervals_vertical,
anglv_ref = anglv_ref,
angla_plane_ref = angla_plane_ref,
length_mode = length_mode,
length_uom = length_uom,
preferential_perforation = preferential_perforation)
sep = ' ' if space_instead_of_tab_separator else '\t'
with open(wellspec_file, mode = mode) as fp:
for _ in range(preceeding_blank_lines):
fp.write('\n')
self.__write_wellspec_file_units_metadata(write_nexus_units = write_nexus_units,
fp = fp,
length_uom = length_uom,
length_uom_comment = length_uom_comment,
extra_columns_list = extra_columns_list,
well_name = well_name)
BlockedWell.__write_wellspec_file_columns(df = df, fp = fp, col_width_dict = col_width_dict, sep = sep)
fp.write('\n')
BlockedWell.__write_wellspec_file_rows_from_dataframe(df = df,
fp = fp,
col_width_dict = col_width_dict,
sep = sep)
for _ in range(trailing_blank_lines):
fp.write('\n')
return df
@staticmethod
def __tidy_well_name(well_name):
nexus_friendly = ''
previous_underscore = False
for ch in well_name:
if not 32 <= ord(ch) < 128 or ch in ' ,!*#':
ch = '_'
if not (previous_underscore and ch == '_'):
nexus_friendly += ch
previous_underscore = (ch == '_')
if not nexus_friendly:
well_name = 'WELL_X'
return nexus_friendly
@staticmethod
def __is_float_column(col_name):
if col_name.upper() in ['ANGLA', 'ANGLV', 'LENGTH', 'KH', 'DEPTH', 'MD', 'X', 'Y', 'SKIN', 'RADW', 'PPERF']:
return True
return False
@staticmethod
def __is_int_column(col_name):
if col_name.upper() in ['IW', 'JW', 'L']:
return True
return False
def __get_well_name(self, well_name):
""" Get the name of the well whose data is to be written to the Nexus WELLSPEC file."""
if not well_name:
if self.well_name:
well_name = self.well_name
elif self.root is not None:
well_name = rqet.citation_title_for_node(self.root)
elif self.wellbore_interpretation is not None:
well_name = self.wellbore_interpretation.title
elif self.trajectory is not None:
well_name = self.trajectory.title
else:
log.warning('no well name identified for use in WELLSPEC')
well_name = 'WELLNAME'
well_name = BlockedWell.__tidy_well_name(well_name)
return well_name
def __write_wellspec_file_units_metadata(self, write_nexus_units, fp, length_uom, length_uom_comment,
extra_columns_list, well_name):
"""Write the units of measure (uom) and system of measure for length in the WELLSPEC file.
Also write a comment on the length uom if necessary.
"""
if write_nexus_units:
length_uom_system_list = ['METRIC', 'ENGLISH']
length_uom_index = ['m', 'ft'].index(length_uom)
fp.write(f'{length_uom_system_list[length_uom_index]}\n\n')
if length_uom_comment and self.trajectory is not None and ('LENGTH' in extra_columns_list or 'MD'
in extra_columns_list or 'KH' in extra_columns_list):
fp.write(f'! Length units along wellbore: {self.trajectory.md_uom if length_uom is None else length_uom}\n')
fp.write('WELLSPEC ' + str(well_name) + '\n')
@staticmethod
def __write_wellspec_file_columns(df, fp, col_width_dict, sep):
"""Write the column names to the WELLSPEC file."""
for col_name in df.columns:
if col_name in col_width_dict:
width = col_width_dict[col_name]
else:
width = 10
form = '{0:>' + str(width) + '}'
fp.write(sep + form.format(col_name))
@staticmethod
def __write_wellspec_file_rows_from_dataframe(df, fp, col_width_dict, sep):
""" Writes the non-blank lines of a Nexus WELLSPEC file from a BlockedWell.dataframe """
for row_info in df.iterrows():
row = row_info[1]
for col_name in df.columns:
try:
if col_name in col_width_dict:
width = col_width_dict[col_name]
else:
width = 10
if BlockedWell.__is_float_column(col_name):
form = '{0:>' + str(width) + '.3f}'
fp.write(sep + form.format(float(row[col_name])))
else:
form = '{0:>' + str(width) + '}'
if BlockedWell.__is_int_column(col_name):
fp.write(sep + form.format(int(row[col_name])))
else:
fp.write(sep + form.format(str(row[col_name])))
except Exception:
fp.write(sep + str(row[col_name]))
fp.write('\n')
def kji0_marker(self, active_only = True):
"""Convenience method returning (k0, j0, i0), grid_uuid of first blocked interval."""
cells, grids = self.cell_indices_and_grid_list()
if cells is None or grids is None or len(grids) == 0:
return None, None, None, None
return cells[0], grids[0].uuid
def xyz_marker(self, active_only = True):
"""Convenience method returning (x, y, z), crs_uuid of perforation in first blocked interval.
notes:
active_only argument not yet in use;
returns None, None if no blocked interval found
"""
cells, grids = self.cell_indices_and_grid_list()
if cells is None or grids is None or len(grids) == 0:
return None, None
node_index = 0
while node_index < self.node_count - 1 and self.grid_indices[node_index] == -1:
node_index += 1
if node_index >= self.node_count - 1:
return None, None
md = 0.5 * (self.node_mds[node_index] + self.node_mds[node_index + 1])
xyz = self.trajectory.xyz_for_md(md)
return xyz, self.trajectory.crs_uuid
def create_feature_and_interpretation(self, shared_interpretation = True):
"""Instantiate new empty WellboreFeature and WellboreInterpretation objects.
Uses the Blocked well citation title as the well name
"""
if self.trajectory is not None:
traj_interp_uuid = self.model.uuid(obj_type = 'WellboreInterpretation', related_uuid = self.trajectory.uuid)
if traj_interp_uuid is not None:
if shared_interpretation:
self.wellbore_interpretation = rqo.WellboreInterpretation(parent_model = self.model,
uuid = traj_interp_uuid)
traj_feature_uuid = self.model.uuid(obj_type = 'WellboreFeature', related_uuid = traj_interp_uuid)
if traj_feature_uuid is not None:
self.wellbore_feature = rqo.WellboreFeature(parent_model = self.model, uuid = traj_feature_uuid)
if self.wellbore_feature is None:
self.wellbore_feature = rqo.WellboreFeature(parent_model = self.model, feature_name = self.trajectory.title)
self.feature_to_be_written = True
if self.wellbore_interpretation is None:
self.wellbore_interpretation = rqo.WellboreInterpretation(parent_model = self.model,
wellbore_feature = self.wellbore_feature)
if self.trajectory.wellbore_interpretation is None and shared_interpretation:
self.trajectory.wellbore_interpretation = self.wellbore_interpretation
self.interpretation_to_be_written = True
def create_md_datum_and_trajectory(self,
grid,
trajectory_mds,
trajectory_points,
length_uom,
well_name,
set_depth_zero = False,
set_tangent_vectors = False,
create_feature_and_interp = True):
"""Creates an Md Datum object and a (simulation) Trajectory object for this blocked well.
note:
not usually called directly; used by import methods
"""
# create md datum node for synthetic trajectory, using crs for grid
datum_location = trajectory_points[0].copy()
if set_depth_zero:
datum_location[2] = 0.0
datum = MdDatum(self.model,
crs_uuid = grid.crs_uuid,
location = datum_location,
md_reference = 'mean sea level')
# create synthetic trajectory object, using crs for grid
trajectory_mds_array = np.array(trajectory_mds)
trajectory_xyz_array = np.array(trajectory_points)
trajectory_df = pd.DataFrame({
'MD': trajectory_mds_array,
'X': trajectory_xyz_array[..., 0],
'Y': trajectory_xyz_array[..., 1],
'Z': trajectory_xyz_array[..., 2]
})
self.trajectory = Trajectory(self.model,
md_datum = datum,
data_frame = trajectory_df,
length_uom = length_uom,
well_name = well_name,
set_tangent_vectors = set_tangent_vectors)
self.trajectory_to_be_written = True
if create_feature_and_interp:
self.create_feature_and_interpretation()
def create_xml(self,
ext_uuid = None,
create_for_trajectory_if_needed = True,
add_as_part = True,
add_relationships = True,
title = None,
originator = None):
"""Create a blocked wellbore representation node from this BlockedWell object, optionally add as part.
note:
trajectory xml node must be in place before calling this function;
witsml log reference, interval stratigraphic units, and cell fluid phase units not yet supported
:meta common:
"""
assert self.trajectory is not None, 'trajectory object missing'
if ext_uuid is None:
ext_uuid = self.model.h5_uuid()
if title:
self.title = title
if not self.title:
self.title = 'blocked well'
self.__create_wellbore_feature_and_interpretation_xml_if_needed(add_as_part = add_as_part,
add_relationships = add_relationships,
originator = originator)
self.__create_trajectory_xml_if_needed(create_for_trajectory_if_needed = create_for_trajectory_if_needed,
add_as_part = add_as_part,
add_relationships = add_relationships,
originator = originator,
ext_uuid = ext_uuid,
title = title)
assert self.trajectory.root is not None, 'trajectory xml not established'
bw_node = super().create_xml(title = title, originator = originator, add_as_part = False)
# wellbore frame elements
nc_node, mds_node, mds_values_node, cc_node, cis_node, cnull_node, cis_values_node, gis_node, gnull_node, gis_values_node, fis_node, fnull_node, fis_values_node = self.__create_bw_node_sub_elements(
bw_node = bw_node)
self.__create_hdf5_dataset_references(ext_uuid = ext_uuid,
mds_values_node = mds_values_node,
cis_values_node = cis_values_node,
gis_values_node = gis_values_node,
fis_values_node = fis_values_node)
traj_root, grid_root, interp_root = self.__create_trajectory_grid_wellbore_interpretation_reference_nodes(
bw_node = bw_node)
self.__add_as_part_and_add_relationships_if_required(add_as_part = add_as_part,
add_relationships = add_relationships,
bw_node = bw_node,
interp_root = interp_root,
ext_uuid = ext_uuid)
return bw_node
def __create_wellbore_feature_and_interpretation_xml_if_needed(self, add_as_part, add_relationships, originator):
""" Create root node for WellboreFeature and WellboreInterpretation objects if necessary."""
if self.feature_to_be_written:
if self.wellbore_feature is None:
self.create_feature_and_interpretation()
self.wellbore_feature.create_xml(add_as_part = add_as_part, originator = originator)
if self.interpretation_to_be_written:
if self.wellbore_interpretation is None:
self.create_feature_and_interpretation()
self.wellbore_interpretation.create_xml(add_as_part = add_as_part,
title_suffix = None,
add_relationships = add_relationships,
originator = originator)
def __create_trajectory_xml_if_needed(self, create_for_trajectory_if_needed, add_as_part, add_relationships,
originator, ext_uuid, title):
""" Create root node for associated Trajectory object if necessary."""
if create_for_trajectory_if_needed and self.trajectory_to_be_written and self.trajectory.root is None:
md_datum_root = self.trajectory.md_datum.create_xml(add_as_part = add_as_part,
add_relationships = add_relationships,
title = str(self.title),
originator = originator)
self.trajectory.create_xml(ext_uuid,
md_datum_root = md_datum_root,
add_as_part = add_as_part,
add_relationships = add_relationships,
title = title,
originator = originator)
def __create_bw_node_sub_elements(self, bw_node):
""" Append sub-elements to the BlockedWell object's root node."""
nc_node = rqet.SubElement(bw_node, ns['resqml2'] + 'NodeCount')
nc_node.set(ns['xsi'] + 'type', ns['xsd'] + 'positiveInteger')
nc_node.text = str(self.node_count)
mds_node = rqet.SubElement(bw_node, ns['resqml2'] + 'NodeMd')
mds_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'DoubleHdf5Array')
mds_node.text = rqet.null_xml_text
mds_values_node = rqet.SubElement(mds_node, ns['resqml2'] + 'Values')
mds_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')
mds_values_node.text = rqet.null_xml_text
cc_node = rqet.SubElement(bw_node, ns['resqml2'] + 'CellCount')
cc_node.set(ns['xsi'] + 'type', ns['xsd'] + 'nonNegativeInteger')
cc_node.text = str(self.cell_count)
cis_node = rqet.SubElement(bw_node, ns['resqml2'] + 'CellIndices')
cis_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'IntegerHdf5Array')
cis_node.text = rqet.null_xml_text
cnull_node = rqet.SubElement(cis_node, ns['resqml2'] + 'NullValue')
cnull_node.set(ns['xsi'] + 'type', ns['xsd'] + 'integer')
cnull_node.text = str(self.cellind_null)
cis_values_node = rqet.SubElement(cis_node, ns['resqml2'] + 'Values')
cis_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')
cis_values_node.text = rqet.null_xml_text
gis_node = rqet.SubElement(bw_node, ns['resqml2'] + 'GridIndices')
gis_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'IntegerHdf5Array')
gis_node.text = rqet.null_xml_text
gnull_node = rqet.SubElement(gis_node, ns['resqml2'] + 'NullValue')
gnull_node.set(ns['xsi'] + 'type', ns['xsd'] + 'integer')
gnull_node.text = str(self.gridind_null)
gis_values_node = rqet.SubElement(gis_node, ns['resqml2'] + 'Values')
gis_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')
gis_values_node.text = rqet.null_xml_text
fis_node = rqet.SubElement(bw_node, ns['resqml2'] + 'LocalFacePairPerCellIndices')
fis_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'IntegerHdf5Array')
fis_node.text = rqet.null_xml_text
fnull_node = rqet.SubElement(fis_node, ns['resqml2'] + 'NullValue')
fnull_node.set(ns['xsi'] + 'type', ns['xsd'] + 'integer')
fnull_node.text = str(self.facepair_null)
fis_values_node = rqet.SubElement(fis_node, ns['resqml2'] + 'Values')
fis_values_node.set(ns['xsi'] + 'type', ns['eml'] + 'Hdf5Dataset')
fis_values_node.text = rqet.null_xml_text
return nc_node, mds_node, mds_values_node, cc_node, cis_node, cnull_node, cis_values_node, gis_node, gnull_node, gis_values_node, fis_node, fnull_node, fis_values_node
def __create_trajectory_grid_wellbore_interpretation_reference_nodes(self, bw_node):
"""Create nodes and add to BlockedWell object root node."""
traj_root = self.trajectory.root
self.model.create_ref_node('Trajectory',
rqet.find_nested_tags_text(traj_root, ['Citation', 'Title']),
bu.uuid_from_string(traj_root.attrib['uuid']),
content_type = 'obj_WellboreTrajectoryRepresentation',
root = bw_node)
for grid in self.grid_list:
grid_root = grid.root
self.model.create_ref_node('Grid',
rqet.find_nested_tags_text(grid_root, ['Citation', 'Title']),
bu.uuid_from_string(grid_root.attrib['uuid']),
content_type = 'obj_IjkGridRepresentation',
root = bw_node)
interp_root = None
if self.wellbore_interpretation is not None:
interp_root = self.wellbore_interpretation.root
self.model.create_ref_node('RepresentedInterpretation',
rqet.find_nested_tags_text(interp_root, ['Citation', 'Title']),
bu.uuid_from_string(interp_root.attrib['uuid']),
content_type = 'obj_WellboreInterpretation',
root = bw_node)
return traj_root, grid_root, interp_root
def __create_hdf5_dataset_references(self, ext_uuid, mds_values_node, cis_values_node, gis_values_node,
fis_values_node):
"""Create nodes that reference the hdf5 datasets (arrays) and add to the BlockedWell onject's root node."""
self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'NodeMd', root = mds_values_node)
self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'CellIndices', root = cis_values_node)
self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'GridIndices', root = gis_values_node)
self.model.create_hdf5_dataset_ref(ext_uuid, self.uuid, 'LocalFacePairPerCellIndices', root = fis_values_node)
def __add_as_part_and_add_relationships_if_required(self, add_as_part, add_relationships, bw_node, interp_root,
ext_uuid):
"""Add the newly created BlockedWell object's root node as a part in the model and
add reciprocal relationships.
"""
if add_as_part:
self.model.add_part('obj_BlockedWellboreRepresentation', self.uuid, bw_node)
if add_relationships:
self.model.create_reciprocal_relationship(bw_node, 'destinationObject', self.trajectory.root,
'sourceObject')
for grid in self.grid_list:
self.model.create_reciprocal_relationship(bw_node, 'destinationObject', grid.root, 'sourceObject')
if interp_root is not None:
self.model.create_reciprocal_relationship(bw_node, 'destinationObject', interp_root, 'sourceObject')
ext_part = rqet.part_name_for_object('obj_EpcExternalPartReference', ext_uuid, prefixed = False)
ext_node = self.model.root_for_part(ext_part)
self.model.create_reciprocal_relationship(bw_node, 'mlToExternalPartProxy', ext_node,
'externalPartProxyToMl')
def write_hdf5(self, file_name = None, mode = 'a', create_for_trajectory_if_needed = True):
"""Create or append to an hdf5 file, writing datasets for the measured depths, grid, cell & face indices.
:meta common:
"""
# NB: array data must all have been set up prior to calling this function
if self.uuid is None:
self.uuid = bu.new_uuid()
h5_reg = rwh5.H5Register(self.model)
if create_for_trajectory_if_needed and self.trajectory_to_be_written:
self.trajectory.write_hdf5(file_name, mode = mode)
mode = 'a'
h5_reg.register_dataset(self.uuid, 'NodeMd', self.node_mds)
h5_reg.register_dataset(self.uuid, 'CellIndices', self.cell_indices) # could use int32?
h5_reg.register_dataset(self.uuid, 'GridIndices', self.grid_indices) # could use int32?
# convert face index pairs from [axis, polarity] back to strange local face numbering
mask = (self.face_pair_indices.flatten() == -1).reshape((-1, 2)) # 2nd axis is (axis, polarity)
masked_face_indices = np.where(mask, 0, self.face_pair_indices.reshape((-1, 2))) # 2nd axis is (axis, polarity)
# using flat array for raw_face_indices array
# other resqml writing code might use an array with one int per entry point and one per exit point, with 2nd axis as (entry, exit)
raw_face_indices = np.where(mask[:, 0], -1, self.face_index_map[masked_face_indices[:, 0],
masked_face_indices[:,
1]].flatten()).reshape(-1)
h5_reg.register_dataset(self.uuid, 'LocalFacePairPerCellIndices', raw_face_indices) # could use uint8?
h5_reg.write(file = file_name, mode = mode)
| 53.078176 | 206 | 0.559705 |
0275515053ff051f95f0054bde1bd90d1fadcdbf | 1,305 | py | Python | handlers/isapiwsgihandler.py | jessicadelrio/HandyHouse | 058e8981da850790c84f990fd2a3bbcf9aa695cc | [
"BSD-3-Clause"
] | 21 | 2018-02-16T17:43:59.000Z | 2021-12-29T12:08:28.000Z | handlers/isapiwsgihandler.py | jessicadelrio/HandyHouse | 058e8981da850790c84f990fd2a3bbcf9aa695cc | [
"BSD-3-Clause"
] | 26 | 2015-01-02T13:35:48.000Z | 2015-06-10T14:39:07.000Z | handlers/isapiwsgihandler.py | jessicadelrio/HandyHouse | 058e8981da850790c84f990fd2a3bbcf9aa695cc | [
"BSD-3-Clause"
] | 8 | 2018-10-08T03:48:00.000Z | 2022-03-31T12:13:01.000Z | """
web2py handler for isapi-wsgi for IIS. Requires:
http://code.google.com/p/isapi-wsgi/
"""
# The entry point for the ISAPI extension.
def __ExtensionFactory__():
import os
import sys
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
if not os.path.isdir('applications'):
raise RuntimeError('Running from the wrong folder')
sys.path = [path] + [p for p in sys.path if not p == path]
import gluon.main
import isapi_wsgi
application = gluon.main.wsgibase
return isapi_wsgi.ISAPIThreadPoolHandler(application)
# ISAPI installation:
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print "USAGE: python isapiwsgihandler.py install --server=Sitename"
sys.exit(0)
from isapi.install import ISAPIParameters
from isapi.install import ScriptMapParams
from isapi.install import VirtualDirParameters
from isapi.install import HandleCommandLine
params = ISAPIParameters()
sm = [ScriptMapParams(Extension="*", Flags=0)]
vd = VirtualDirParameters(Name="appname",
Description="Web2py in Python",
ScriptMaps=sm,
ScriptMapUpdate="replace")
params.VirtualDirs = [vd]
HandleCommandLine(params)
| 32.625 | 75 | 0.661303 |
794a960100de33c9c7bb87c0c9b32b283a91d6a4 | 5,753 | py | Python | utils/helpers.py | dgriffiths3/mononet3d | 4505234025efaee727d4bb4bb3f013fee8dc65b4 | [
"MIT"
] | 9 | 2020-08-24T17:48:33.000Z | 2021-01-29T17:26:51.000Z | utils/helpers.py | dgriffiths3/mononet3d | 4505234025efaee727d4bb4bb3f013fee8dc65b4 | [
"MIT"
] | 1 | 2021-01-15T10:51:06.000Z | 2021-01-21T10:39:15.000Z | utils/helpers.py | dgriffiths3/mononet3d | 4505234025efaee727d4bb4bb3f013fee8dc65b4 | [
"MIT"
] | 1 | 2021-02-18T14:06:16.000Z | 2021-02-18T14:06:16.000Z | import os
import numpy as np
import progressbar
import pyvista as pv
import toml
import tensorflow as tf
import colorsys
class EvalProgBar():
def __init__(self):
self.widgets = [progressbar.FormatLabel('')]
self.bar = progressbar.ProgressBar(widgets=self.widgets)
self.bar.start(max_value=progressbar.UnknownLength)
def update(self, step, metrics):
self.widgets[0] = progressbar.FormatLabel(
'step: {}, AP: {:}, mAP: {:.2f}'.format(step, metrics[0], metrics[1])
)
self.bar.update(step, True)
def progbar(n):
bar = progressbar.ProgressBar(
maxval=n,
widgets=[
progressbar.Bar('=', '[', ']'), ' ',
progressbar.Percentage(), ' | ',
progressbar.SimpleProgress(), ' | ',
progressbar.AdaptiveETA()
]
)
return bar
def colormap(n_classes):
vals = np.linspace(0, 1, n_classes)
return np.array([colorsys.hsv_to_rgb(c, 0.8, 0.8) for c in vals])
def dump_config(cfg):
save_dir = os.path.join('./logs/{}'.format(cfg['std']['log_code']))
if not os.path.isdir(save_dir): os.makedirs(save_dir)
f = open(os.path.join(save_dir, 'config.toml'), "w")
s = toml.dumps(cfg)
f.write(s)
f.close()
def euc_dist(a, b):
return np.sqrt(np.sum((a - b)**2))
def bounding_box(pc):
bbox = [
np.min(pc[:, 0]), np.min(pc[:, 1]), np.min(pc[:, 2]),
np.max(pc[:, 0]), np.max(pc[:, 1]), np.max(pc[:, 2])
]
return np.array(bbox)
def bbox_overlap(pc_a, pc_b):
bbox_a = bounding_box(pc_a)
bbox_b = bounding_box(pc_b)
if (
bbox_a[3] >= bbox_b[0] and bbox_b[3] >= bbox_a[0] and
bbox_a[4] >= bbox_b[1] and bbox_b[4] >= bbox_a[1] and
bbox_a[5] >= bbox_b[2] and bbox_b[5] >= bbox_a[2]
):
overlap = True
else:
overlap = False
return overlap
def nonaa_box(b, theta, axis=1):
pts = np.array([
[b[0], b[1], b[2]],
[b[3], b[1], b[2]],
[b[0], b[1], b[5]],
[b[3], b[1], b[5]],
[b[0], b[4], b[2]],
[b[3], b[4], b[2]],
[b[0], b[4], b[5]],
[b[3], b[4], b[5]]
])
return rotate_euler(pts, theta, axis)
def make_lines(pts):
lines = [
pv.Line(pts[0], pts[1]), pv.Line(pts[1], pts[3]), pv.Line(pts[3], pts[2]), pv.Line(pts[2], pts[0]),
pv.Line(pts[0], pts[4]), pv.Line(pts[1], pts[5]), pv.Line(pts[3], pts[7]), pv.Line(pts[2], pts[6]),
pv.Line(pts[4], pts[6]), pv.Line(pts[6], pts[7]), pv.Line(pts[7], pts[5]), pv.Line(pts[5], pts[4])
]
return lines
def rotate_euler(pts, theta, axis=2, center=None):
c = np.cos(theta)
s = np.sin(theta)
if axis == 0:
R = np.array([[1., 0., 0.], [0., c, -s], [0., s, c]])
elif axis == 1:
R = np.array([[c, 0., s], [0., 1., 0.], [-s, 0., c]])
elif axis == 2:
R = np.array([[c, -s, 0.], [s, c, 0.], [0., 0., 1.]])
mean = np.mean(pts, axis=0) if center is None else center
pts -= mean
pts = np.einsum('ij,kj->ki', R, pts)
pts += mean
return pts
def get_fixed_pts(in_pts, n_pts):
out_pts = np.zeros((n_pts, 3))
ret = True
if in_pts.shape[0] == 0:
ret = False
elif in_pts.shape[0] < n_pts:
out_pts[0:in_pts.shape[0]] = in_pts
s_idx = np.arange(n_pts)
np.random.shuffle(s_idx)
out_pts = out_pts[s_idx]
else:
s_idx = np.arange(in_pts.shape[0])
np.random.shuffle(s_idx)
out_pts = in_pts[s_idx[0:n_pts]]
return ret, out_pts
def iou(a, b):
xx1 = np.maximum(a[0], b[0])
yy1 = np.maximum(a[1], b[1])
zz1 = np.maximum(a[2], b[2])
xx2 = np.minimum(a[3], b[3])
yy2 = np.minimum(a[4], b[4])
zz2 = np.minimum(a[5], b[5])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
d = np.maximum(0.0, zz2 - zz1)
inter = w * h * d
area_a = (a[3] - a[0]) * (a[4] - a[1]) * (a[5] - a[2])
area_b = (b[3] - b[0]) * (b[4] - b[1]) * (b[5] - b[2])
return inter / float(area_a + area_b - inter)
def nms(boxes, scores, max_out=100, iou_thresh=0.25):
"""
Code adapted from : https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py
"""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
z1 = boxes[:, 2]
x2 = boxes[:, 3]
y2 = boxes[:, 4]
z2 = boxes[:, 5]
keep_inds = []
if scores.shape[0] > 0:
order = np.argsort(-scores, axis=0)
areas = (x2 - x1) * (y2 - y1) * (z2 - z1)
num_in = 0
while order.size > 0:
if num_in == max_out: break
i = order[0]
keep_inds.append(i[0])
num_in += 1
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
zz1 = np.maximum(z1[i], z1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
zz2 = np.minimum(z2[i], z2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
d = np.maximum(0.0, zz2 - zz1)
inter = w * h * d
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= iou_thresh)[0]
order = order[inds + 1]
return keep_inds
def kitti_scenenet_to_oabb(center, attr):
extent = attr[:, :3]
theta = np.arctan2(attr[:, 4], attr[:, 3])
x_mins = center[:, 0, None] - (extent[:, 2, None]/2.)
x_maxs = center[:, 0, None] + (extent[:, 2, None]/2.)
y_mins = center[:, 1, None]
y_maxs = center[:, 1, None] - (extent[:, 0, None])
z_mins = center[:, 2, None] - (extent[:, 1, None]/2.)
z_maxs = center[:, 2, None] + (extent[:, 1, None]/2.)
boxes = np.concatenate(
[x_mins, y_mins, z_mins, x_maxs, y_maxs, z_maxs]
, 1)
nonaa_boxes = np.zeros((boxes.shape[0], 8, 3))
for i, b in enumerate(boxes):
nonaa_boxes[i] = nonaa_box(b, theta[i])
return nonaa_boxes
def get_fixed_pts(in_pts, n_pts):
out_pts = np.zeros((n_pts, 3))
ret = True
if in_pts.shape[0] == 0:
ret = False
elif in_pts.shape[0] < n_pts:
out_pts[0:in_pts.shape[0]] = in_pts
s_idx = np.arange(n_pts)
np.random.shuffle(s_idx)
out_pts = out_pts[s_idx]
else:
s_idx = np.arange(in_pts.shape[0])
np.random.shuffle(s_idx)
out_pts = in_pts[s_idx[0:n_pts]]
return ret, out_pts
| 20.620072 | 101 | 0.58665 |
5c4c260604b7616b99b8ab0f16d8325c72e4f5e5 | 40,712 | py | Python | src/skmultiflow/trees/multi_target_regression_hoeffding_tree.py | mertozer94/scikit-multiflow | a6e719cad900805a85d17143c05a3da9dd4987e8 | [
"BSD-3-Clause"
] | null | null | null | src/skmultiflow/trees/multi_target_regression_hoeffding_tree.py | mertozer94/scikit-multiflow | a6e719cad900805a85d17143c05a3da9dd4987e8 | [
"BSD-3-Clause"
] | null | null | null | src/skmultiflow/trees/multi_target_regression_hoeffding_tree.py | mertozer94/scikit-multiflow | a6e719cad900805a85d17143c05a3da9dd4987e8 | [
"BSD-3-Clause"
] | null | null | null | from operator import attrgetter
import numpy as np
from skmultiflow.core import MultiOutputMixin
from skmultiflow.trees import RegressionHoeffdingTree
from skmultiflow.trees.numeric_attribute_regression_observer_multi_target \
import NumericAttributeRegressionObserverMultiTarget
from skmultiflow.trees.nominal_attribute_regression_observer import NominalAttributeRegressionObserver
from skmultiflow.utils.utils import get_dimensions
from skmultiflow.trees.intra_cluster_variance_reduction_split_criterion \
import IntraClusterVarianceReductionSplitCriterion
from skmultiflow.utils import check_random_state
_TARGET_MEAN = 'mean'
_PERCEPTRON = 'perceptron'
_ADAPTIVE = 'adaptive'
class MultiTargetRegressionHoeffdingTree(RegressionHoeffdingTree, MultiOutputMixin):
"""Multi-target Regression Hoeffding Tree.
This is an implementation of the iSoup-Tree proposed by A. Osojnik, P. Panov, and S. Džeroski [1]_.
Parameters
----------
max_byte_size: int (default=33554432)
Maximum memory consumed by the tree.
memory_estimate_period: int (default=1000000)
Number of instances between memory consumption checks.
grace_period: int (default=200)
Number of instances a leaf should observe between split attempts.
split_confidence: float (default=0.0000001)
Allowed error in split decision, a value closer to 0 takes longer to
decide.
tie_threshold: float (default=0.05)
Threshold below which a split will be forced to break ties.
binary_split: boolean (default=False)
If True, only allow binary splits.
stop_mem_management: boolean (default=False)
If True, stop growing as soon as memory limit is hit.
remove_poor_atts: boolean (default=False)
If True, disable poor attributes.
no_preprune: boolean (default=False)
If True, disable pre-pruning.
leaf_prediction: string (default='perceptron')
| Prediction mechanism used at leafs.
| 'mean' - Target mean
| 'perceptron' - Perceptron
| 'adaptive' - Adaptively chooses between the best predictor
nb_threshold: int (default=0)
Number of instances a leaf should observe before allowing Naive Bayes.
nominal_attributes: list, optional
List of Nominal attributes. If emtpy, then assume that all attributes
are numerical.
learning_ratio_perceptron: float
The learning rate of the perceptron.
learning_ratio_decay: float
Decay multiplier for the learning rate of the perceptron
learning_ratio_const: Bool
If False the learning ratio will decay with the number of examples seen
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when leaf_prediction is 'perceptron'.
References
----------
.. [1] Aljaž Osojnik, Panče Panov, and Sašo Džeroski. "Tree-based methods for online multi-target regression."
Journal of Intelligent Information Systems 50.2 (2018): 315-339.
"""
class ActiveLearningNodeForRegression(RegressionHoeffdingTree.
ActiveLearningNodeForRegression):
def __init__(self, initial_class_observations):
""" ActiveLearningNode class constructor. """
super().__init__(initial_class_observations)
def learn_from_instance(self, X, y, weight, ht):
"""Update the node with the provided instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: int
Instance class.
weight: float
Instance weight.
ht: HoeffdingTree
Hoeffding Tree to update.
"""
try:
self._observed_class_distribution[0] += weight
self._observed_class_distribution[1] += y * weight
self._observed_class_distribution[2] += y * y * weight
except KeyError:
self._observed_class_distribution[0] = weight
self._observed_class_distribution[1] = y * weight
self._observed_class_distribution[2] = y * y * weight
for i, x in enumerate(X.tolist()):
try:
obs = self._attribute_observers[i]
except KeyError:
if ht.nominal_attributes is not None and i in ht.nominal_attributes:
obs = NominalAttributeRegressionObserver()
else:
obs = NumericAttributeRegressionObserverMultiTarget()
self._attribute_observers[i] = obs
obs.observe_attribute_class(x, y, weight)
class LearningNodePerceptron(RegressionHoeffdingTree.
LearningNodePerceptron):
def __init__(self, initial_class_observations, perceptron_weight=None,
random_state=None):
"""LearningNodePerceptron class constructor
Parameters
----------
initial_class_observations
perceptron_weight
"""
super().__init__(initial_class_observations)
self.perceptron_weight = perceptron_weight
self.random_state = check_random_state(random_state)
def learn_from_instance(self, X, y, weight, rht):
"""Update the node with the provided instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Instance targets.
weight: float
Instance weight.
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
if self.perceptron_weight is None:
# Creates matrix of perceptron random weights
_, rows = get_dimensions(y)
_, cols = get_dimensions(X)
self.perceptron_weight = self.random_state.uniform(-1.0, 1.0,
(rows,
cols + 1)
)
self.normalize_perceptron_weights()
try:
self._observed_class_distribution[0] += weight
except KeyError:
self._observed_class_distribution[0] = weight
if rht.learning_ratio_const:
learning_ratio = rht.learning_ratio_perceptron
else:
learning_ratio = rht.learning_ratio_perceptron / \
(1 + self._observed_class_distribution[0] *
rht.learning_ratio_decay)
try:
self._observed_class_distribution[1] += weight * y
self._observed_class_distribution[2] += weight * y * y
except KeyError:
self._observed_class_distribution[1] = weight * y
self._observed_class_distribution[2] = weight * y * y
for i in range(int(weight)):
self.update_weights(X, y, learning_ratio, rht)
for i, x in enumerate(X.tolist()):
try:
obs = self._attribute_observers[i]
except KeyError:
# Creates targets observers, if not already defined
if rht.nominal_attributes is not None and i in rht.nominal_attributes:
obs = NominalAttributeRegressionObserver()
else:
obs = NumericAttributeRegressionObserverMultiTarget()
self._attribute_observers[i] = obs
obs.observe_attribute_class(x, y, weight)
def update_weights(self, X, y, learning_ratio, rht):
"""Update the perceptron weights
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Targets values.
learning_ratio: float
perceptron learning ratio
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
normalized_sample = rht.normalize_sample(X)
normalized_pred = self.predict(normalized_sample)
normalized_target_value = rht.normalized_target_value(y)
self.perceptron_weight += learning_ratio * \
np.matmul((normalized_target_value - normalized_pred)[:, None],
normalized_sample[None, :])
self.normalize_perceptron_weights()
def normalize_perceptron_weights(self):
# Normalize perceptron weights
n_targets = self.perceptron_weight.shape[0]
for i in range(n_targets):
sum_w = np.sum(np.abs(self.perceptron_weight[i, :]))
self.perceptron_weight[i, :] /= sum_w
# Predicts new income instances as a multiplication of the neurons
# weights with the inputs augmented with a bias value
def predict(self, X):
return np.matmul(self.perceptron_weight, X)
def get_weight_seen(self):
"""Calculate the total weight seen by the node.
Returns
-------
float
Total weight seen.
"""
if self._observed_class_distribution == {}:
return 0
else:
return self._observed_class_distribution[0]
class LearningNodeAdaptive(LearningNodePerceptron):
def __init__(self, initial_class_observations, perceptron_weight=None,
random_state=None):
"""LearningNodePerceptron class constructor
Parameters
----------
initial_class_observations
perceptron_weight
"""
super().__init__(initial_class_observations, perceptron_weight,
random_state)
# Faded errors for the perceptron and mean predictors
self.fMAE_M = 0.0
self.fMAE_P = 0.0
def update_weights(self, X, y, learning_ratio, rht):
"""Update the perceptron weights
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Targets values.
learning_ratio: float
perceptron learning ratio
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
normalized_sample = rht.normalize_sample(X)
normalized_pred = self.predict(normalized_sample)
normalized_target_value = rht.normalized_target_value(y)
self.perceptron_weight += learning_ratio * \
np.matmul((normalized_target_value - normalized_pred)[:, None],
normalized_sample[None, :])
self.normalize_perceptron_weights()
# Update faded errors for the predictors
# The considered errors are normalized, since they are based on
# mean centered and sd scaled values
self.fMAE_P = 0.95 * self.fMAE_P + np.abs(
normalized_target_value - normalized_pred
)
self.fMAE_M = 0.95 * self.fMAE_M + np.abs(
normalized_target_value - rht.
normalized_target_value(self._observed_class_distribution[1] /
self._observed_class_distribution[0])
)
class InactiveLearningNodePerceptron(RegressionHoeffdingTree.
InactiveLearningNodePerceptron):
def __init__(self, initial_class_observations, perceptron_weight=None,
random_state=None):
super().__init__(initial_class_observations)
self.perceptron_weight = perceptron_weight
self.random_state = check_random_state(random_state)
def learn_from_instance(self, X, y, weight, rht):
if self.perceptron_weight is None:
# Creates matrix of perceptron random weights
_, rows = get_dimensions(y)
_, cols = get_dimensions(X)
self.perceptron_weight = self.random_state.uniform(-1, 1,
(rows,
cols + 1))
self.normalize_perceptron_weights()
try:
self._observed_class_distribution[0] += weight
except KeyError:
self._observed_class_distribution[0] = weight
if rht.learning_ratio_const:
learning_ratio = rht.learning_ratio_perceptron
else:
learning_ratio = rht.learning_ratio_perceptron / \
(1 + self._observed_class_distribution[0] *
rht.learning_ratio_decay)
try:
self._observed_class_distribution[1] += weight * y
self._observed_class_distribution[2] += weight * y * y
except KeyError:
self._observed_class_distribution[1] = weight * y
self._observed_class_distribution[2] = weight * y * y
for i in range(int(weight)):
self.update_weights(X, y, learning_ratio, rht)
def update_weights(self, X, y, learning_ratio, rht):
"""Update the perceptron weights
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Targets values.
learning_ratio: float
perceptron learning ratio
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
normalized_sample = rht.normalize_sample(X)
normalized_pred = self.predict(normalized_sample)
normalized_target_value = rht.normalized_target_value(y)
self.perceptron_weight += learning_ratio * \
np.matmul((normalized_target_value - normalized_pred)[:, None],
normalized_sample[None, :])
self.normalize_perceptron_weights()
def normalize_perceptron_weights(self):
n_targets = self.perceptron_weight.shape[0]
# Normalize perceptron weights
for i in range(n_targets):
sum_w = np.sum(np.abs(self.perceptron_weight[i, :]))
self.perceptron_weight[i, :] /= sum_w
# Predicts new income instances as a multiplication of the neurons
# weights with the inputs augmented with a bias value
def predict(self, X):
return np.matmul(self.perceptron_weight, X)
class InactiveLearningNodeAdaptive(InactiveLearningNodePerceptron):
def __init__(self, initial_class_observations, perceptron_weight=None,
random_state=None):
super().__init__(initial_class_observations, perceptron_weight,
random_state)
# Faded errors for the perceptron and mean predictors
self.fMAE_M = 0.0
self.fMAE_P = 0.0
def update_weights(self, X, y, learning_ratio, rht):
"""Update the perceptron weights
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: numpy.ndarray of length equal to the number of targets.
Targets values.
learning_ratio: float
perceptron learning ratio
rht: RegressionHoeffdingTree
Regression Hoeffding Tree to update.
"""
normalized_sample = rht.normalize_sample(X)
normalized_pred = self.predict(normalized_sample)
normalized_target_value = rht.normalized_target_value(y)
self.perceptron_weight += learning_ratio * \
np.matmul((normalized_target_value - normalized_pred)[:, None],
normalized_sample[None, :])
self.normalize_perceptron_weights()
# Update faded errors for the predictors
# The considered errors are normalized, since they are based on
# mean centered and sd scaled values
self.fMAE_P = 0.95 * self.fMAE_P + np.abs(
normalized_target_value - normalized_pred
)
self.fMAE_M = 0.95 * self.fMAE_M + np.abs(
normalized_target_value - rht.
normalized_target_value(self._observed_class_distribution[1] /
self._observed_class_distribution[0])
)
# ===========================================
# == Hoeffding Regression Tree implementation ===
# ===========================================
def __init__(self,
max_byte_size=33554432,
memory_estimate_period=1000000,
grace_period=200,
split_confidence=0.0000001,
tie_threshold=0.05,
binary_split=False,
stop_mem_management=False,
remove_poor_atts=False,
leaf_prediction='perceptron',
no_preprune=False,
nb_threshold=0,
nominal_attributes=None,
learning_ratio_perceptron=0.02,
learning_ratio_decay=0.001,
learning_ratio_const=True,
random_state=None):
super().__init__(max_byte_size=max_byte_size,
memory_estimate_period=memory_estimate_period,
grace_period=grace_period,
split_confidence=split_confidence,
tie_threshold=tie_threshold,
binary_split=binary_split,
stop_mem_management=stop_mem_management,
remove_poor_atts=remove_poor_atts,
no_preprune=no_preprune,
leaf_prediction=leaf_prediction,
nb_threshold=nb_threshold,
nominal_attributes=nominal_attributes)
self.split_criterion = 'icvr' # intra cluster variance reduction
self.learning_ratio_perceptron = learning_ratio_perceptron
self.learning_ratio_decay = learning_ratio_decay
self.learning_ratio_const = learning_ratio_const
self.random_state = random_state
self._tree_root = None
self._decision_node_cnt = 0
self._active_leaf_node_cnt = 0
self._inactive_leaf_node_cnt = 0
self._inactive_leaf_byte_size_estimate = 0.0
self._active_leaf_byte_size_estimate = 0.0
self._byte_size_estimate_overhead_fraction = 1.0
self._growth_allowed = True
self._train_weight_seen_by_model = 0.0
self.examples_seen = 0
self.sum_of_values = 0.0
self.sum_of_squares = 0.0
self.sum_of_attribute_values = 0.0
self.sum_of_attribute_squares = 0.0
# To add the n_targets property once
self._n_targets_set = False
@property
def leaf_prediction(self):
return self._leaf_prediction
@leaf_prediction.setter
def leaf_prediction(self, leaf_prediction):
if leaf_prediction not in {_TARGET_MEAN, _PERCEPTRON, _ADAPTIVE}:
print("Invalid leaf_prediction option {}', will use default '{}'".format(leaf_prediction, _PERCEPTRON))
self._leaf_prediction = _PERCEPTRON
else:
self._leaf_prediction = leaf_prediction
@property
def split_criterion(self):
return self._split_criterion
@split_criterion.setter
def split_criterion(self, split_criterion):
if split_criterion == 'vr':
# Corner case due to parent class initialization
split_criterion = 'icvr'
if split_criterion != 'icvr': # intra cluster variance reduction
print("Invalid split_criterion option {}', will use default '{}'"
.format(split_criterion, 'icvr'))
self._split_criterion = 'icvr'
else:
self._split_criterion = split_criterion
def normalize_sample(self, X):
"""Normalize the features in order to have the same influence during the
process of training.
Parameters
----------
X: np.array
features.
Returns
-------
np.array:
normalized samples
"""
if self.examples_seen <= 1:
_, c = get_dimensions(X)
return np.zeros((c + 1), dtype=np.float64)
mean = self.sum_of_attribute_values / self.examples_seen
variance = (self.sum_of_attribute_squares -
(self.sum_of_attribute_values ** 2) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
normalized_sample = np.zeros(X.shape[0] + 1, dtype=np.float64)
np.divide(X - mean, sd, where=sd != 0, out=normalized_sample[:-1])
# Augments sample with the bias input signal (or y intercept for
# each target)
normalized_sample[-1] = 1.0
return normalized_sample
def normalized_target_value(self, y):
"""Normalize the targets in order to have the same influence during the
process of training.
Parameters
----------
y: np.array
targets.
Returns
-------
np.array:
normalized targets values
"""
if self.examples_seen <= 1:
return np.zeros_like(y, dtype=np.float64)
mean = self.sum_of_values / self.examples_seen
variance = (self.sum_of_squares -
(self.sum_of_values ** 2) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
normalized_targets = np.divide(y - mean, sd, where=sd != 0,
out=np.zeros_like(y, dtype=np.float64))
return normalized_targets
def _new_learning_node(self, initial_class_observations=None,
perceptron_weight=None):
"""Create a new learning node. The type of learning node depends on
the tree configuration.
"""
if initial_class_observations is None:
initial_class_observations = {}
if self.leaf_prediction == _TARGET_MEAN:
return self.ActiveLearningNodeForRegression(
initial_class_observations)
elif self.leaf_prediction == _PERCEPTRON:
return self.LearningNodePerceptron(initial_class_observations,
perceptron_weight,
self.random_state)
elif self.leaf_prediction == _ADAPTIVE:
return self.LearningNodeAdaptive(
initial_class_observations,
perceptron_weight,
random_state=self.random_state
)
def _get_predictors_faded_error(self, X):
"""Get the faded error of the leaf corresponding to the instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes.
Returns
-------
dict (predictor, fmae)
"""
fmaes = {}
if self._tree_root is not None:
found_node = self._tree_root.filter_instance_to_leaf(X, None, -1)
leaf_node = found_node.node
if leaf_node is None:
leaf_node = found_node.parent
fmaes['mean'] = leaf_node.fMAE_M
fmaes['perceptron'] = leaf_node.fMAE_P
return fmaes
def get_weights_for_instance(self, X):
"""Get class votes for a single instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes.
Returns
-------
dict (class_value, weight)
"""
if self._tree_root is not None:
found_node = self._tree_root.filter_instance_to_leaf(X, None, -1)
leaf_node = found_node.node
if leaf_node is None:
leaf_node = found_node.parent
return leaf_node.perceptron_weight
else: # TODO Verify
return []
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally trains the model. Train samples (instances) are
composed of X attributes and their corresponding targets y.
Tasks performed before training:
* Verify instance weight. if not provided, uniform weights (1.0) are
assumed.
* If more than one instance is passed, loop through X and pass
instances one at a time.
* Update weight seen by model.
Training tasks:
* If the tree is empty, create a leaf node as the root.
* If the tree is already initialized, find the corresponding leaf for
the instance and update the leaf node statistics.
* If growth is allowed and the number of instances that the leaf has
observed between split attempts exceed the grace period then attempt
to split.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
Instance attributes.
y: numpy.ndarray of shape (n_samples, n_targets)
Target values.
sample_weight: float or array-like
Samples weight. If not provided, uniform weights are assumed.
"""
if y is not None:
# Set the number of targets once
if not self._n_targets_set:
_, self._n_targets = get_dimensions(y)
self._n_targets_set = True
row_cnt, _ = get_dimensions(X)
if sample_weight is None:
sample_weight = np.ones(row_cnt)
if row_cnt != len(sample_weight):
raise ValueError('Inconsistent number of instances ({}) and weights ({}).'.format(row_cnt,
len(sample_weight)))
for i in range(row_cnt):
if sample_weight[i] != 0.0:
self._train_weight_seen_by_model += sample_weight[i]
self._partial_fit(X[i], y[i], sample_weight[i])
def _partial_fit(self, X, y, sample_weight):
"""Trains the model on samples X and corresponding targets y.
Private function where actual training is carried on.
Parameters
----------
X: numpy.ndarray of shape (1, n_features)
Instance attributes.
y: array_like
numpy.ndarray of shape (1, n_targets)
Target values for sample X.
sample_weight: float
Sample weight.
"""
try:
self.examples_seen += sample_weight
self.sum_of_values += sample_weight * y
self.sum_of_squares += sample_weight * y * y
except ValueError:
self.examples_seen = sample_weight
self.sum_of_values = sample_weight * y
self.sum_of_squares = sample_weight * y * y
try:
self.sum_of_attribute_values += sample_weight * X
self.sum_of_attribute_squares += sample_weight * X * X
except ValueError:
self.sum_of_attribute_values = sample_weight * X
self.sum_of_attribute_squares = sample_weight * X * X
if self._tree_root is None:
self._tree_root = self._new_learning_node()
self._active_leaf_node_cnt = 1
found_node = self._tree_root.filter_instance_to_leaf(X, None, -1)
leaf_node = found_node.node
if leaf_node is None:
leaf_node = self._new_learning_node()
found_node.parent.set_child(found_node.parent_branch, leaf_node)
self._active_leaf_node_cnt += 1
if isinstance(leaf_node, self.LearningNode):
learning_node = leaf_node
learning_node.learn_from_instance(X, y, sample_weight, self)
if self._growth_allowed and \
isinstance(learning_node,
RegressionHoeffdingTree.ActiveLearningNode):
active_learning_node = learning_node
weight_seen = active_learning_node.get_weight_seen()
weight_diff = weight_seen - active_learning_node.\
get_weight_seen_at_last_split_evaluation()
if weight_diff >= self.grace_period:
self._attempt_to_split(active_learning_node,
found_node.parent,
found_node.parent_branch)
active_learning_node.\
set_weight_seen_at_last_split_evaluation(weight_seen)
if self._train_weight_seen_by_model % self.memory_estimate_period == 0:
self.estimate_model_byte_size()
def predict(self, X):
"""Predicts the target value using mean class or the perceptron.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
Samples for which we want to predict the labels.
Returns
-------
list
Predicted target values.
"""
r, _ = get_dimensions(X)
predictions = np.zeros((r, self._n_targets), dtype=np.float64)
for i in range(r):
if self.leaf_prediction == _TARGET_MEAN:
votes = self.get_votes_for_instance(X[i]).copy()
# Tree is not empty, otherwise, all target_values are set
# equally, default to zero
if votes != {}:
number_of_examples_seen = votes[0]
sum_of_values = votes[1]
predictions[i] = sum_of_values / number_of_examples_seen
elif self.leaf_prediction == _PERCEPTRON:
if self.examples_seen > 1:
normalized_sample = self.normalize_sample(X[i])
normalized_prediction = \
np.matmul(self.get_weights_for_instance(X[i]),
normalized_sample)
mean = self.sum_of_values / self.examples_seen
variance = (self.sum_of_squares -
(self.sum_of_values ** 2) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
# Samples are normalized using just one sd, as proposed in
# the iSoup-Tree method
predictions[i] = normalized_prediction * sd + mean
elif self.leaf_prediction == _ADAPTIVE:
if self.examples_seen > 1:
# Mean predictor
votes = self.get_votes_for_instance(X[i]).copy()
number_of_examples_seen = votes[0]
sum_of_values = votes[1]
pred_M = sum_of_values / number_of_examples_seen
# Perceptron
normalized_sample = self.normalize_sample(X[i])
normalized_prediction = \
np.matmul(self.get_weights_for_instance(X[i]),
normalized_sample)
mean = self.sum_of_values / self.examples_seen
variance = (self.sum_of_squares -
(self.sum_of_values ** 2) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
pred_P = normalized_prediction * sd + mean
fmae = self._get_predictors_faded_error(X[i])
for j in range(self._n_targets):
if fmae['perceptron'][j] <= fmae['mean'][j]:
predictions[i, j] = pred_P[j]
else:
predictions[i, j] = pred_M[j]
return predictions
def predict_proba(self, X):
pass
def enforce_tracker_limit(self):
pass
def _attempt_to_split(self, node, parent, parent_idx: int):
"""Attempt to split a node.
If there exists significant variance among the target space of the
seem examples:
1. Find split candidates and select the top 2.
2. Compute the Hoeffding bound.
3. If the difference between the merit ratio of the top 2 split
candidates is smaller than 1 minus the Hoeffding bound:
3.1 Replace the leaf node by a split node.
3.2 Add a new leaf node on each branch of the new split node.
3.3 Update tree's metrics
Optional: Disable poor attribute. Depends on the tree's configuration.
Parameters
----------
node: ActiveLearningNode
The node to evaluate.
parent: SplitNode
The node's parent.
parent_idx: int
Parent node's branch index.
"""
split_criterion = IntraClusterVarianceReductionSplitCriterion()
best_split_suggestions = node.\
get_best_split_suggestions(split_criterion, self)
best_split_suggestions.sort(key=attrgetter('merit'))
should_split = False
if len(best_split_suggestions) < 2:
should_split = len(best_split_suggestions) > 0
else:
hoeffding_bound = self.compute_hoeffding_bound(
split_criterion.get_range_of_merit(
node.get_observed_class_distribution()
), self.split_confidence, node.get_weight_seen())
best_suggestion = best_split_suggestions[-1]
second_best_suggestion = best_split_suggestions[-2]
if (second_best_suggestion.merit / best_suggestion.merit <
1 - hoeffding_bound or hoeffding_bound <
self.tie_threshold):
should_split = True
if self.remove_poor_atts is not None and self.remove_poor_atts \
and not should_split:
poor_atts = set()
best_ratio = second_best_suggestion.merit \
/ best_suggestion.merit
# Add any poor attribute to set
# TODO reactivation procedure???
for i in range(len(best_split_suggestions)):
if best_split_suggestions[i].split_test is not None:
split_atts = best_split_suggestions[i].\
split_test.get_atts_test_depends_on()
if len(split_atts) == 1:
if best_split_suggestions[i].merit / \
best_suggestion.merit < \
best_ratio - 2 * hoeffding_bound:
poor_atts.add(int(split_atts[0]))
for poor_att in poor_atts:
node.disable_attribute(poor_att)
if should_split:
split_decision = best_split_suggestions[-1]
if split_decision.split_test is None:
# Preprune - null wins
self._deactivate_learning_node(node, parent, parent_idx)
else:
new_split = self.new_split_node(
split_decision.split_test,
node.get_observed_class_distribution()
)
for i in range(split_decision.num_splits()):
if self.leaf_prediction == _PERCEPTRON:
new_child = self._new_learning_node(
split_decision.
resulting_class_distribution_from_split(i),
node.perceptron_weight
)
elif self.leaf_prediction == _TARGET_MEAN:
new_child = self._new_learning_node(
split_decision.
resulting_class_distribution_from_split(i),
None)
elif self.leaf_prediction == _ADAPTIVE:
new_child = self._new_learning_node(
split_decision.
resulting_class_distribution_from_split(i),
node.perceptron_weight
)
# Resets faded errors
new_child.fMAE_M = np.zeros(self._n_targets,
dtype=np.float64)
new_child.fMAE_P = np.zeros(self._n_targets,
dtype=np.float64)
new_split.set_child(i, new_child)
self._active_leaf_node_cnt -= 1
self._decision_node_cnt += 1
self._active_leaf_node_cnt += split_decision.num_splits()
if parent is None:
self._tree_root = new_split
else:
parent.set_child(parent_idx, new_split)
# Manage memory
self.enforce_tracker_limit()
def _deactivate_learning_node(self, to_deactivate:
RegressionHoeffdingTree.ActiveLearningNode,
parent: RegressionHoeffdingTree.SplitNode,
parent_branch: int):
"""Deactivate a learning node.
Parameters
----------
to_deactivate: ActiveLearningNode
The node to deactivate.
parent: SplitNode
The node's parent.
parent_branch: int
Parent node's branch index.
"""
if self.leaf_prediction == _TARGET_MEAN:
new_leaf = self.InactiveLearningNodeForRegression(
to_deactivate.get_observed_class_distribution()
)
elif self.leaf_prediction == _PERCEPTRON:
new_leaf = self.InactiveLearningNodePerceptron(
to_deactivate.get_observed_class_distribution(),
to_deactivate.perceptron_weight,
to_deactivate.random_state
)
elif self.leaf_prediction == _ADAPTIVE:
new_leaf = self.InactiveLearningNodeAdaptive(
to_deactivate.get_observed_class_distribution(),
to_deactivate.perceptron_weight,
to_deactivate.random_state
)
new_leaf.fMAE_M = to_deactivate.fMAE_M
new_leaf.fMAE_P = to_deactivate.fMAE_P
if parent is None:
self._tree_root = new_leaf
else:
parent.set_child(parent_branch, new_leaf)
self._active_leaf_node_cnt -= 1
self._inactive_leaf_node_cnt += 1
def _more_tags(self):
return {'multioutput': True,
'multioutput_only': True}
| 41.585291 | 118 | 0.57133 |
03e5b2542b95177719e1241413072e4dd6904afd | 11,128 | py | Python | tensorflow/python/training/ftrl_test.py | jdehotin/TensorFlow | a6c5f8e4e013e54fed8dfcf49fb6de365f018022 | [
"Apache-2.0"
] | 6 | 2016-09-07T18:38:41.000Z | 2020-01-12T23:01:03.000Z | tensorflow/python/training/ftrl_test.py | jdehotin/TensorFlow | a6c5f8e4e013e54fed8dfcf49fb6de365f018022 | [
"Apache-2.0"
] | 1 | 2021-04-12T03:51:59.000Z | 2021-04-12T03:51:59.000Z | tensorflow/python/training/ftrl_test.py | jdehotin/TensorFlow | a6c5f8e4e013e54fed8dfcf49fb6de365f018022 | [
"Apache-2.0"
] | 8 | 2017-06-08T09:46:06.000Z | 2021-06-20T14:03:19.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class FtrlOptimizerTest(tf.test.TestCase):
def testFtrlwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([0.0, 0.0], dtype=dtype)
var1 = tf.Variable([0.0, 0.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-2.60260963, -4.29698515]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.28432083, -0.56694895]),
v1_val)
def testFtrlwithoutRegularization2(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-2.55607247, -3.98729396]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.28232238, -0.56096673]),
v1_val)
def testFtrlWithL1(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]),
v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]),
v1_val)
def testFtrlWithL1_L2(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-0.24059935, -0.46829352]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.02406147, -0.04830509]),
v1_val)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = tf.Variable([[0.0], [0.0]], dtype=dtype)
var1 = tf.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1], dtype=dtype),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]))
else:
var0 = tf.Variable([0.0, 0.0], dtype=dtype)
var1 = tf.Variable([0.0, 0.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
sess = tf.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
tf.test.main()
| 41.677903 | 80 | 0.571891 |
97903c0e71455a9185571bf556e47480c76ddb72 | 3,954 | py | Python | hashr.py | joetache4/md5r | 9efdd09263ff024caae9a1ad0386be6c96dbc06b | [
"MIT"
] | null | null | null | hashr.py | joetache4/md5r | 9efdd09263ff024caae9a1ad0386be6c96dbc06b | [
"MIT"
] | null | null | null | hashr.py | joetache4/md5r | 9efdd09263ff024caae9a1ad0386be6c96dbc06b | [
"MIT"
] | null | null | null | '''
Recursively compute hashes for all files in a directory.
Hash options include MD5, SHA1, SHA256, and SHA512.
author: Joe Tacheron
'''
from pathlib import Path
from fnmatch import fnmatch
import argparse
import hashlib
class PathType(object):
'''Used with argparse as a type indicating a file or directory.'''
def __init__(self, exists=True, type='file', dash_ok=False):
'''
exists:
True: a path that does exist
False: a path that does not exist
None: don't care
type: file, dir, None, or a function returning True for a valid Path object
None: don't care
dash_ok: whether to allow '-' as stdin/stdout
'''
assert exists in (True, False, None)
assert type in ('file', 'dir', None) or hasattr(type, '__call__')
self._exists = exists
self._type = type
self._dash_ok = dash_ok
def __call__(self, s):
p = Path(s)
if s == '-':
# the special argument '-' means sys.std{in,out}
if not self._dash_ok:
raise argparse.ArgumentTypeError('standard input/output (-) not allowed')
if self._type == 'dir':
raise argparse.ArgumentTypeError('standard input/output (-) not allowed as directory path')
else:
# check existence
e = p.exists()
if self._exists == True:
if not e:
raise argparse.ArgumentTypeError(f'path does not exist: {s}')
elif self._exists == False:
if e:
raise argparse.ArgumentTypeError(f'path exists: {s}')
if e:
# if it exists, check its type
if self._type is None:
pass
elif self._type == 'file':
if not p.is_file():
raise argparse.ArgumentTypeError(f'path is not a file: {s}')
elif self._type == 'dir':
if not p.is_dir():
raise argparse.ArgumentTypeError(f'path is not a directory: {s}')
elif not self._type(p):
raise argparse.ArgumentTypeError(f'path not valid: {s}')
#else:
# # if it doesn't exist, check that the parent dir is valid
# parent = p.resolve().parent
# if not parent.is_dir():
# raise argparse.ArgumentTypeError(f'parent path is not a directory: '{parent}'')
return p
def digest(f, hasher, blocksize):
with open(f, 'rb') as file:
while buf := file.read(blocksize):
hasher.update(buf)
return hasher.hexdigest()
md5 = lambda f: digest(f, hashlib.md5(), 128*512)
sha1 = lambda f: digest(f, hashlib.sha1(), 160*512)
sha256 = lambda f: digest(f, hashlib.sha256(), 256*512)
sha512 = lambda f: digest(f, hashlib.sha512(), 512*512)
def scan(path, hash, exclude_files, exclude_dirs):
'''Recursively scan dir for files to hash.'''
if path.is_file() and not any(fnmatch(path.name, pat) for pat in exclude_files):
print(f'{hash(path)} {str(path)}')
elif path.is_dir() and not any(fnmatch(path.name, pat) for pat in exclude_dirs):
for p in path.iterdir():
scan(p, hash, exclude_files, exclude_dirs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('root', metavar='FILE_OR_ROOT_DIR', type=PathType(exists=True, type=None), help='specify the file or root directory for hashing')
group = parser.add_mutually_exclusive_group()
group.add_argument('--md5', action='store_const', const=md5, dest='hash', help='use MD5 algorithm (this is the default)',)
group.add_argument('--sha1', action='store_const', const=sha1, dest='hash', help='use SHA1 algorithm')
group.add_argument('--sha256', action='store_const', const=sha256, dest='hash', help='use SHA256 algorithm')
group.add_argument('--sha512', action='store_const', const=sha512, dest='hash', help='use SHA512 algorithm')
parser.set_defaults(hash=md5)
parser.add_argument('--xf', metavar='FILE', nargs='+', default=[], help='specify files not to hash (uses Unix-style wildcard matching)')
parser.add_argument('--xd', metavar='DIR', nargs='+', default=[], help='specify directories not to scan (uses Unix-style wildcard matching)')
args = parser.parse_args()
scan(args.root, args.hash, args.xf, args.xd)
#input('Done. Press Enter to exit.') | 35.945455 | 150 | 0.68867 |
586507f28cb91f7b0fd020a26e5cba1f2e2e68b1 | 650 | py | Python | backend/db_select.py | mario21ic/api-demo | 10c7cab2f01ba786fea8888246d0ab200477352f | [
"MIT"
] | null | null | null | backend/db_select.py | mario21ic/api-demo | 10c7cab2f01ba786fea8888246d0ab200477352f | [
"MIT"
] | null | null | null | backend/db_select.py | mario21ic/api-demo | 10c7cab2f01ba786fea8888246d0ab200477352f | [
"MIT"
] | 3 | 2019-03-14T03:03:02.000Z | 2019-11-25T00:31:20.000Z | import datetime
import mysql.connector
cnx = mysql.connector.connect(host='db',
database='api_db',
user='root',
password='myclave')
cursor = cnx.cursor()
query = ("SELECT first_name, last_name, hire_date FROM employees "
"WHERE hire_date BETWEEN %s AND %s")
hire_start = datetime.date(1999, 1, 1)
hire_end = datetime.date(1999, 12, 31)
cursor.execute(query, (hire_start, hire_end))
for (first_name, last_name, hire_date) in cursor:
print("{}, {} was hired on {:%d %b %Y}".format(
last_name, first_name, hire_date))
cursor.close()
cnx.close()
| 27.083333 | 66 | 0.609231 |
4d8ff6189d54d7d4e5efcfe5b7650163b7503dab | 1,440 | py | Python | Practica6/variablesglobales.py | JosueHernandezR/An-lisis-de-Algoritmos | 9953f2d3fee6b4cfe842fdbbea83b46b62fa123f | [
"MIT"
] | 1 | 2021-09-30T20:05:41.000Z | 2021-09-30T20:05:41.000Z | Practica6/variablesglobales.py | JosueHernandezR/An-lisis-de-Algoritmos | 9953f2d3fee6b4cfe842fdbbea83b46b62fa123f | [
"MIT"
] | null | null | null | Practica6/variablesglobales.py | JosueHernandezR/An-lisis-de-Algoritmos | 9953f2d3fee6b4cfe842fdbbea83b46b62fa123f | [
"MIT"
] | null | null | null | #Análisis de Algoritmos 3CV2
# Alan Romero Lucero
# Josué David Hernández Ramírez
# Práctica 6 Divide y vencerás
"""
Descripción: este módulo solo almacena las variables globales del programa:
parámetros_1: Lista que almacena los parámetros de los puntos a trazar
para el algoritmo de subarray máximo de fuerza bruta. Cada
elemento es una tupla que almacena el tamaño de la matriz
y la complejidad temporal en su primer y segundo
elemento respectivamente.
parámetros_2: Lista que almacena los parámetros de los puntos para trazar
para el algoritmo de subcadena de cruce máximo. Cada
elemento es una tupla que almacena el tamaño de la matriz
y la complejidad temporal en su primer y segundo
elemento respectivamente.
parámetros_3: Lista que almacena los parámetros de los puntos a trazar
para el algoritmo de subarrays máximo utilizando recursividad. Cada
elemento es una tupla que almacena el tamaño de la matriz
y la complejidad temporal en su primer y segundo
elemento respectivamente.
tiempo: Variable que almacena la complejidad temporal de cada algoritmo.
"""
parametros_3 = [ ( 0, 0 ) ]
parametros_2 = [ ]
parametros_1 = [ ]
tiempo = 0 | 48 | 89 | 0.638194 |
01d3681f355817eae7d2cd623241235669c727f4 | 2,323 | py | Python | starvine/bvcopula/tests/test_copula_cdf.py | wgurecky/StarVine | b952a88eeaff476484ba6a26420cfe4ef575d162 | [
"BSD-3-Clause"
] | 12 | 2018-10-04T06:15:13.000Z | 2020-01-08T03:32:30.000Z | starvine/bvcopula/tests/test_copula_cdf.py | wgurecky/StarVine | b952a88eeaff476484ba6a26420cfe4ef575d162 | [
"BSD-3-Clause"
] | 25 | 2017-08-29T06:28:37.000Z | 2020-10-16T23:56:57.000Z | starvine/bvcopula/tests/test_copula_cdf.py | wgurecky/StarVine | b952a88eeaff476484ba6a26420cfe4ef575d162 | [
"BSD-3-Clause"
] | 3 | 2017-04-08T20:19:09.000Z | 2020-01-09T20:01:02.000Z | ##
# \brief Test copula CDF's to ensure integral on [0,1]^2 is 1
from __future__ import print_function, division
# COPULA IMPORTS
from starvine.bvcopula.copula.t_copula import StudentTCopula
from starvine.bvcopula.copula.gauss_copula import GaussCopula
from starvine.bvcopula.copula.frank_copula import FrankCopula
from starvine.bvcopula.copula.gumbel_copula import GumbelCopula
from starvine.bvcopula.copula.clayton_copula import ClaytonCopula
from starvine.bvcopula.copula.indep_copula import IndepCopula
#
import unittest
import numpy as np
import os
pwd_ = os.getcwd()
dataDir = pwd_ + "/tests/data/"
np.random.seed(123)
class TestCopulaCDF(unittest.TestCase):
def testCopulaCDF(self):
# check all CDFs == 1.0 at U, V = [1.0, 1.0]
print("------------------- COPULA CDF INTEGRAL TEST ----------------------")
def testTCopulaCDF(self):
t_copula = StudentTCopula()
u, v = np.ones(1) - 1e-12, np.ones(1) - 1e-12
cdf_max = t_copula.cdf(u, v, *[0.7, 10])
self.assertAlmostEqual(cdf_max[0], 1.0)
def testGaussCopulaCDF(self):
gauss_copula = GaussCopula()
u, v = np.ones(1) - 1e-9, np.ones(1) - 1e-9
cdf_max = gauss_copula.cdf(u, v, *[0.7])
self.assertAlmostEqual(cdf_max[0], 1.0)
def testFrankCopulaCDF(self):
frank_copula = FrankCopula()
u, v = np.ones(1), np.ones(1)
cdf_max = frank_copula.cdf(u, v, *[2.7])
self.assertAlmostEqual(cdf_max[0], 1.0)
def testGumbelCopulaCDF(self):
gumbel_copula = GumbelCopula()
u, v = np.ones(1), np.ones(1)
cdf_max = gumbel_copula.cdf(u, v, *[2.7])
self.assertAlmostEqual(cdf_max[0], 1.0)
def testClaytonCopulaCDF(self):
clayton_copula = ClaytonCopula()
u, v = np.ones(1), np.ones(1)
cdf_max = clayton_copula.cdf(u, v, *[2.7])
self.assertAlmostEqual(cdf_max[0], 1.0)
def testClayton90CopulaCDF(self):
clayton_copula = ClaytonCopula(1)
u, v = np.ones(1), np.ones(1)
cdf_max = clayton_copula.cdf(u, v, *[2.7])
self.assertAlmostEqual(cdf_max[0], 1.0)
def testIndepCopulaCDF(self):
indep_copula = IndepCopula()
u, v = np.ones(1), np.ones(1)
cdf_max = indep_copula.cdf(u, v)
self.assertAlmostEqual(cdf_max[0], 1.0)
| 35.19697 | 84 | 0.643134 |
cb67548e06f0c14c0b9adc3e6667a1115a4871db | 560 | py | Python | python/clear.py | bmccafferty/ping-pong-led-wall | 41eed47aa50c053a8b4ad508611093b12984d48c | [
"CC0-1.0"
] | 1 | 2020-11-26T22:28:23.000Z | 2020-11-26T22:28:23.000Z | python/clear.py | bmccafferty/ping-pong-led-wall | 41eed47aa50c053a8b4ad508611093b12984d48c | [
"CC0-1.0"
] | null | null | null | python/clear.py | bmccafferty/ping-pong-led-wall | 41eed47aa50c053a8b4ad508611093b12984d48c | [
"CC0-1.0"
] | 1 | 2021-04-29T15:18:25.000Z | 2021-04-29T15:18:25.000Z | #!/usr/bin/python3
# Clear all pixels currently lit (Reset)
import time
import board
import neopixel
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 256
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.2, auto_write=False, pixel_order=ORDER
)
pixels.fill = (0, 0, 0)
pixels.show()
| 23.333333 | 89 | 0.739286 |
a4ee3bcb17de74d867b34df6246d80b9da2a6176 | 2,905 | py | Python | starlette_plugins.py | klen/starlette-plugins | 5ca24ddc1ade331e30e83272fe5e5703d2fd35fb | [
"MIT"
] | 1 | 2020-10-21T21:56:28.000Z | 2020-10-21T21:56:28.000Z | starlette_plugins.py | klen/starlette-plugins | 5ca24ddc1ade331e30e83272fe5e5703d2fd35fb | [
"MIT"
] | null | null | null | starlette_plugins.py | klen/starlette-plugins | 5ca24ddc1ade331e30e83272fe5e5703d2fd35fb | [
"MIT"
] | null | null | null | __version__ = "0.0.7"
__license__ = "MIT"
import threading
setup_lock = threading.Lock()
class PluginException(Exception):
pass
class PluginMiddleware:
__slots__ = 'app',
plugin = None
def __init__(self, app):
self.app = app
def __call__(self, scope, receive, send) -> None:
return self.plugin.process(scope, receive, send, app=self.app)
class PluginMeta(type):
def __new__(mcs, name, bases, params):
cls = super().__new__(mcs, name, bases, params)
if bases and not cls.name:
raise PluginException('Plugin `%s` doesn\'t have a name.' % cls)
return cls
class StarlettePlugin(metaclass=PluginMeta):
Exception = PluginException
name = None
config = {}
middleware = on_startup = on_shutdown = None
def __init__(self, app=None, **settings):
self.app = app
self.config.update(settings)
if self.app:
self.setup(app)
def __call__(self, app, **settings):
self.app = app
# Setup plugins registry
with setup_lock:
if not hasattr(app, 'ps'):
app.ps = type('Plugins', (object,), {})
setattr(app.ps, self.name, self)
# Prepare configuration
self.config.update(settings)
self.config = type('%sConfig' % self.name.title(), (object,), self.config)
return type('%sMiddleware' % self.name.title(), (PluginMiddleware,), {'plugin': self})
def setup(self, app, **settings):
"""Setup middlewares."""
Middleware = self(app, **settings)
self.app.add_middleware(Middleware)
def process(self, scope, receive, send, app=None):
"""Process ASGI call."""
app = app or self.app
try:
if scope['type'] == 'lifespan':
return self.lifespan(scope, receive, send, app)
return self.middleware(scope, receive, send, app)
except Exception as exc:
return self.exception(exc, scope, receive, send, app)
def lifespan(self, scope, receive, send, app):
"""Process lifespan cycle."""
async def reply_receive():
message = await receive()
if message['type'] == 'lifespan.startup':
await self.startup(scope)
elif message['type'] == 'lifespan.shutdown':
await self.shutdown(scope)
return message
return app(scope, reply_receive, send)
async def middleware(self, scope, receive, send, app):
"""Default middleware."""
return await app(scope, send, receive)
async def startup(self, scope):
"""Default startup method."""
pass
async def shutdown(self, scope):
"""Default shutdown method."""
pass
def exception(self, exc, scope, receive, send, app):
"""Default exception."""
raise exc
| 24.82906 | 94 | 0.585542 |
789275e423d8cc0bae85b361325d5b0b242f06a9 | 3,850 | py | Python | python/se4-w3c/tests/test_native_app.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | 1 | 2021-11-17T22:29:42.000Z | 2021-11-17T22:29:42.000Z | python/se3last-w3c/tests/test_native_app.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | null | null | null | python/se3last-w3c/tests/test_native_app.py | saucelabs-training/platform-config-tests | 11dfab8f9be2fe118ed0b0fa4adebb75a5f1a64c | [
"MIT"
] | 1 | 2021-11-17T22:29:35.000Z | 2021-11-17T22:29:35.000Z | import pytest
iOS = "https://github.com/saucelabs/sample-app-mobile/releases/download/2.7.1/iOS.Simulator.SauceLabs.Mobile.Sample.app.2.7.1.zip"
android = "https://github.com/saucelabs/sample-app-mobile/releases/download/2.7.1/Android.SauceLabs.Mobile.Sample.app.2.7.1.apk"
def test_latest_android_latest_appium(helpers):
caps = {}
caps['platformName'] = 'Android'
caps['appium:app'] = android
caps['appium:deviceName'] = 'Android GoogleAPI Emulator'
caps['appium:platformVersion'] = '11.0'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.20.2'
caps['appium:appWaitActivity'] = 'com.swaglabsmobileapp.MainActivity'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
def test_latest_android_earliest_appium(helpers):
caps = {}
caps['platformName'] = 'Android'
caps['appium:app'] = android
caps['appium:deviceName'] = 'Android GoogleAPI Emulator'
caps['appium:platformVersion'] = '11.0'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.15.0'
caps['appium:appWaitActivity'] = 'com.swaglabsmobileapp.MainActivity'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
def test_earliest_android_latest_appium(helpers):
caps = {}
caps['platformName'] = 'Android'
caps['appium:app'] = android
caps['appium:deviceName'] = 'Android GoogleAPI Emulator'
caps['appium:platformVersion'] = '5.1'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.20.2'
caps['appium:appWaitActivity'] = 'com.swaglabsmobileapp.MainActivity'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
def test_earliest_android_latest_appium_actual(helpers):
caps = {}
caps['platformName'] = 'Android'
caps['appium:app'] = android
caps['appium:deviceName'] = 'Android GoogleAPI Emulator'
caps['appium:platformVersion'] = '5.1'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.8.0'
caps['appium:appWaitActivity'] = 'com.swaglabsmobileapp.MainActivity'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
def test_earliest_android_earliest_appium(helpers):
caps = {}
caps['platformName'] = 'Android'
caps['appium:app'] = android
caps['appium:deviceName'] = 'Android GoogleAPI Emulator'
caps['appium:platformVersion'] = '5.1'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.8.0'
caps['appium:appWaitActivity'] = 'com.swaglabsmobileapp.MainActivity'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
def test_latest_ios(helpers):
caps = {}
caps['platformName'] = 'iOS'
caps['appium:app'] = iOS
caps['appium:deviceName'] = 'iPhone Simulator'
caps['appium:platformVersion'] = '14.5'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.21.0'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
@pytest.mark.skip(reason="Does not work with 10.3")
def test_earliest_ios(helpers):
caps = {}
caps['platformName'] = 'iOS'
caps['appium:app'] = iOS
caps['appium:deviceName'] = 'iPhone Simulator'
caps['appium:platformVersion'] = '10.3'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.8.0'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
def test_earliest_ios_actual(helpers):
caps = {}
caps['platformName'] = 'iOS'
caps['appium:app'] = iOS
caps['appium:deviceName'] = 'iPhone Simulator'
caps['appium:platformVersion'] = '11.0'
caps['sauce:options'] = {}
caps['sauce:options']['appiumVersion'] = '1.8.0'
driver = helpers.start_appium_driver(caps)
helpers.validate_app(driver)
| 33.478261 | 130 | 0.683117 |
f269bac529ca4247a34f1f985b407ef282c57941 | 784 | py | Python | tests/experiment/test_rotation_matrix.py | ccechatelier/bcdi | cbe3b7960414b03f8e98336c3fcd7b367de441ca | [
"CECILL-B"
] | 18 | 2020-04-30T08:48:39.000Z | 2022-03-30T14:42:01.000Z | tests/experiment/test_rotation_matrix.py | ccechatelier/bcdi | cbe3b7960414b03f8e98336c3fcd7b367de441ca | [
"CECILL-B"
] | 78 | 2019-06-30T03:45:58.000Z | 2022-03-23T15:04:44.000Z | tests/experiment/test_rotation_matrix.py | ccechatelier/bcdi | cbe3b7960414b03f8e98336c3fcd7b367de441ca | [
"CECILL-B"
] | 16 | 2019-07-03T17:18:53.000Z | 2022-01-12T15:54:56.000Z | # -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, carnis_jerome@yahoo.fr
import unittest
from bcdi.experiment.rotation_matrix import RotationMatrix
def run_tests(test_class):
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
runner = unittest.TextTestRunner(verbosity=2)
return runner.run(suite)
class Test(unittest.TestCase):
"""Tests related to rotation matrix instantiation."""
def test_instantiation_missing_parameter(self):
with self.assertRaises(TypeError):
RotationMatrix()
if __name__ == "__main__":
run_tests(Test)
| 27.034483 | 84 | 0.714286 |
ea4a4facfed697e9b6e329989be333b938308ec9 | 1,291 | py | Python | script/convert.py | wjc852456/pytorch-quant | e4096ce9ae59cd9029b543edff631f1a6d00008a | [
"MIT"
] | 22 | 2019-05-10T18:14:35.000Z | 2022-02-13T20:26:32.000Z | script/convert.py | wjgaas/pytorch-quant | 1a1610dff0327932a16a1e176a739d9e3bdc4e61 | [
"MIT"
] | 3 | 2019-09-25T06:35:53.000Z | 2019-11-13T07:08:13.000Z | script/convert.py | wjgaas/pytorch-quant | 1a1610dff0327932a16a1e176a739d9e3bdc4e61 | [
"MIT"
] | 11 | 2018-09-27T03:53:17.000Z | 2022-01-21T09:25:18.000Z | import sys
sys.path.append("..")
import os
import numpy as np
import tqdm
from utee import misc
import argparse
import cv2
imagenet_urls = [
'http://ml.cs.tsinghua.edu.cn/~chenxi/dataset/val224_compressed.pkl'
]
parser = argparse.ArgumentParser(description='Extract the ILSVRC2012 val dataset')
parser.add_argument('--in_file', default='./val224_compressed.pkl', help='input file path')
parser.add_argument('--out_root', default='~/dataset', help='output file path')
args = parser.parse_args()
d = misc.load_pickle(args.in_file)
assert len(d['data']) == 50000, len(d['data'])
assert len(d['target']) == 50000, len(d['target'])
data224 = []
data299 = []
for img, target in tqdm.tqdm(zip(d['data'], d['target']), total=1000):
img224 = misc.str2img(img)
#img299 = cv2.resize(img224, (299, 299))
data224.append(img224)
#data299.append(img299)
data_dict224 = dict(
data = np.array(data224).transpose(0, 3, 1, 2),
target = d['target']
)
#data_dict299 = dict(
# data = np.array(data299).transpose(0, 3, 1, 2),
# target = d['target']
#)
if not os.path.exists(args.out_root):
os.makedirs(args.out_root)
misc.dump_pickle(data_dict224, os.path.join(args.out_root, 'val224.pkl'))
#misc.dump_pickle(data_dict299, os.path.join(args.out_root, 'val299.pkl'))
| 26.346939 | 91 | 0.694036 |
0c3f7cd7ab83ff6bf1d9df31744618215544db52 | 1,722 | py | Python | vendor/google/cloud/WebSecurityScanner/synth.py | dof-005/JobPortal-Laravel-Vue | 3049d7aa45b87f9d2926203394f523a8a09584cb | [
"MIT"
] | 6 | 2020-10-14T12:52:01.000Z | 2021-07-30T18:53:22.000Z | vendor/google/cloud/WebSecurityScanner/synth.py | dof-005/JobPortal-Laravel-Vue | 3049d7aa45b87f9d2926203394f523a8a09584cb | [
"MIT"
] | null | null | null | vendor/google/cloud/WebSecurityScanner/synth.py | dof-005/JobPortal-Laravel-Vue | 3049d7aa45b87f9d2926203394f523a8a09584cb | [
"MIT"
] | 4 | 2020-12-19T19:34:27.000Z | 2022-02-28T17:28:49.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
library = gapic.php_library(
service='websecurityscanner',
version='v1beta',
config_path=f'/google/cloud/websecurityscanner/artman_websecurityscanner_v1beta.yaml',
artman_output_name=f'google-cloud-websecurityscanner-v1beta')
# copy all src
s.move(library / f'src/V1beta')
# copy proto files to src also
s.move(library / f'proto/src/Google/Cloud/WebSecurityScanner', f'src/')
s.move(library / f'tests/')
# copy GPBMetadata file to metadata
s.move(library / f'proto/src/GPBMetadata/Google/Cloud/Websecurityscanner', f'metadata/')
# fix year
s.replace(
'src/**/**/*.php',
r'Copyright \d{4}',
r'Copyright 2019')
s.replace(
'tests/**/**/*Test.php',
r'Copyright \d{4}',
r'Copyright 2019')
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
| 30.210526 | 90 | 0.730546 |
4ae8d95a505a9505c2c12f681d2f143e95c3a854 | 32,197 | py | Python | src/sentry/interfaces/stacktrace.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | null | null | null | src/sentry/interfaces/stacktrace.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | null | null | null | src/sentry/interfaces/stacktrace.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | null | null | null | """
sentry.interfaces.stacktrace
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('Stacktrace', )
import re
import six
import posixpath
from itertools import islice, chain
from django.conf import settings
from django.utils.translation import ugettext as _
from six.moves.urllib.parse import urlparse
from sentry.app import env
from sentry.interfaces.base import Interface, InterfaceValidationError, prune_empty_keys
from sentry.interfaces.schemas import validate_and_default_interface
from sentry.models import UserOption
from sentry.utils.safe import trim, trim_dict
from sentry.web.helpers import render_to_string
_ruby_anon_func = re.compile(r'_\d{2,}')
_filename_version_re = re.compile(
r"""(?:
v?(?:\d+\.)*\d+| # version numbers, v1, 1.0.0
[a-f0-9]{7,8}| # short sha
[a-f0-9]{32}| # md5
[a-f0-9]{40} # sha1
)/""", re.X | re.I
)
# Java Spring specific anonymous classes.
# see: http://mydailyjava.blogspot.co.at/2013/11/cglib-missing-manual.html
_java_cglib_enhancer_re = re.compile(r'''(\$\$[\w_]+?CGLIB\$\$)[a-fA-F0-9]+(_[0-9]+)?''', re.X)
# Handle Javassist auto-generated classes and filenames:
# com.example.api.entry.EntriesResource_$$_javassist_74
# com.example.api.entry.EntriesResource_$$_javassist_seam_74
# EntriesResource_$$_javassist_seam_74.java
_java_assist_enhancer_re = re.compile(r'''(\$\$_javassist)(?:_seam)?(?:_[0-9]+)?''', re.X)
# Clojure anon functions are compiled down to myapp.mymodule$fn__12345
_clojure_enhancer_re = re.compile(r'''(\$fn__)\d+''', re.X)
# fields that need to be the same between frames for them to be considered
# recursive calls
RECURSION_COMPARISON_FIELDS = [
'abs_path',
'package',
'module',
'filename',
'function',
'lineno',
'colno',
]
def max_addr(cur, addr):
if addr is None:
return cur
length = len(addr) - 2
if length > cur:
return length
return cur
def pad_hex_addr(addr, length):
if length is None or addr is None:
return addr
return '0x' + addr[2:].rjust(length, '0')
def trim_package(pkg):
if not pkg:
return '?'
pkg = pkg.split('/')[-1]
if pkg.endswith(('.dylib', '.so', '.a')):
pkg = pkg.rsplit('.', 1)[0]
return pkg
def to_hex_addr(addr):
if addr is None:
return None
elif isinstance(addr, six.integer_types):
rv = '0x%x' % addr
elif isinstance(addr, six.string_types):
if addr[:2] == '0x':
addr = int(addr[2:], 16)
rv = '0x%x' % int(addr)
else:
raise ValueError('Unsupported address format %r' % (addr, ))
if len(rv) > 24:
raise ValueError('Address too long %r' % (rv, ))
return rv
def get_context(lineno, context_line, pre_context=None, post_context=None, filename=None):
if lineno is None:
return []
if context_line is None and not (pre_context or post_context):
return []
lineno = int(lineno)
context = []
start_lineno = lineno - len(pre_context or [])
if pre_context:
start_lineno = lineno - len(pre_context)
at_lineno = start_lineno
for line in pre_context:
context.append((at_lineno, line))
at_lineno += 1
else:
start_lineno = lineno
at_lineno = lineno
if start_lineno < 0:
start_lineno = 0
context.append((at_lineno, context_line))
at_lineno += 1
if post_context:
for line in post_context:
context.append((at_lineno, line))
at_lineno += 1
# HACK:
if filename and is_url(filename) and '.' not in filename.rsplit('/', 1)[-1]:
filename = 'index.html'
return context
def is_newest_frame_first(event):
newest_first = event.platform not in ('python', None)
if env.request and env.request.user.is_authenticated():
display = UserOption.objects.get_value(
user=env.request.user,
key='stacktrace_order',
default=None,
)
if display == '1':
newest_first = False
elif display == '2':
newest_first = True
return newest_first
def is_url(filename):
return filename.startswith(('file:', 'http:', 'https:', 'applewebdata:'))
def remove_function_outliers(function):
"""
Attempt to normalize functions by removing common platform outliers.
- Ruby generates (random?) integers for various anonymous style functions
such as in erb and the active_support library.
- Block functions have metadata that we don't care about.
"""
if function.startswith('block '):
return 'block'
return _ruby_anon_func.sub('_<anon>', function)
def remove_filename_outliers(filename, platform=None):
"""
Attempt to normalize filenames by removing common platform outliers.
- Sometimes filename paths contain build numbers
"""
# On cocoa we generally only want to use the last path component as
# the filename. The reason for this is that the chances are very high
# that full filenames contain information we do want to strip but
# currently can't (for instance because the information we get from
# the dwarf files does not contain prefix information) and that might
# contain things like /Users/foo/Dropbox/...
if platform == 'cocoa':
return posixpath.basename(filename)
elif platform == 'java':
filename = _java_assist_enhancer_re.sub(r'\1<auto>', filename)
return _filename_version_re.sub('<version>/', filename)
def remove_module_outliers(module, platform=None):
"""Remove things that augment the module but really should not."""
if platform == 'java':
if module[:35] == 'sun.reflect.GeneratedMethodAccessor':
return 'sun.reflect.GeneratedMethodAccessor'
module = _java_cglib_enhancer_re.sub(r'\1<auto>', module)
module = _java_assist_enhancer_re.sub(r'\1<auto>', module)
module = _clojure_enhancer_re.sub(r'\1<auto>', module)
return module
def slim_frame_data(frames, frame_allowance=settings.SENTRY_MAX_STACKTRACE_FRAMES):
"""
Removes various excess metadata from middle frames which go beyond
``frame_allowance``.
"""
frames_len = 0
app_frames = []
system_frames = []
for frame in frames:
frames_len += 1
if frame is not None and frame.in_app:
app_frames.append(frame)
else:
system_frames.append(frame)
if frames_len <= frame_allowance:
return
remaining = frames_len - frame_allowance
app_count = len(app_frames)
system_allowance = max(frame_allowance - app_count, 0)
if system_allowance:
half_max = system_allowance / 2
# prioritize trimming system frames
for frame in system_frames[half_max:-half_max]:
frame.vars = None
frame.pre_context = None
frame.post_context = None
remaining -= 1
else:
for frame in system_frames:
frame.vars = None
frame.pre_context = None
frame.post_context = None
remaining -= 1
if not remaining:
return
app_allowance = app_count - remaining
half_max = app_allowance / 2
for frame in app_frames[half_max:-half_max]:
frame.vars = None
frame.pre_context = None
frame.post_context = None
def validate_bool(value, required=True):
if required:
assert value in (True, False)
else:
assert value in (True, False, None)
return value
def handle_nan(value):
"Remove nan values that can't be json encoded"
if isinstance(value, float):
if value == float('inf'):
return '<inf>'
if value == float('-inf'):
return '<-inf>'
# lol checking for float('nan')
if value != value:
return '<nan>'
return value
def is_recursion(frame1, frame2):
"Returns a boolean indicating whether frames are recursive calls."
for field in RECURSION_COMPARISON_FIELDS:
if getattr(frame1, field, None) != getattr(frame2, field, None):
return False
return True
class Frame(Interface):
@classmethod
def to_python(cls, data, raw=False):
is_valid, errors = validate_and_default_interface(data, cls.path)
if not is_valid:
raise InterfaceValidationError("Invalid stack frame data.")
abs_path = data.get('abs_path')
filename = data.get('filename')
symbol = data.get('symbol')
function = data.get('function')
module = data.get('module')
package = data.get('package')
# For legacy reasons
if function in ('?', ''):
function = None
# For consistency reasons
if symbol in ('?', ''):
symbol = None
# Some of this processing should only be done for non raw frames
if not raw:
# absolute path takes priority over filename
# (in the end both will get set)
if not abs_path:
abs_path = filename
filename = None
if not filename and abs_path:
if is_url(abs_path):
urlparts = urlparse(abs_path)
if urlparts.path:
filename = urlparts.path
else:
filename = abs_path
else:
filename = abs_path
platform = data.get('platform')
context_locals = data.get('vars') or {}
if isinstance(context_locals, (list, tuple)):
context_locals = dict(enumerate(context_locals))
elif not isinstance(context_locals, dict):
context_locals = {}
context_locals = trim_dict(context_locals, object_hook=handle_nan)
# extra data is used purely by internal systems,
# so we dont trim it
extra_data = data.get('data') or {}
if isinstance(extra_data, (list, tuple)):
extra_data = dict(enumerate(extra_data))
# XXX: handle lines which were sent as 'null'
context_line = trim(data.get('context_line'), 256)
if context_line is not None:
pre_context = data.get('pre_context', None)
if pre_context:
pre_context = [c or '' for c in pre_context]
post_context = data.get('post_context', None)
if post_context:
post_context = [c or '' for c in post_context]
else:
pre_context, post_context = None, None
in_app = validate_bool(data.get('in_app'), False)
kwargs = {
'abs_path': trim(abs_path, 2048),
'filename': trim(filename, 256),
'platform': platform,
'module': trim(module, 256),
'function': trim(function, 256),
'package': package,
'image_addr': to_hex_addr(data.get('image_addr')),
'symbol': trim(symbol, 256),
'symbol_addr': to_hex_addr(data.get('symbol_addr')),
'instruction_addr': to_hex_addr(data.get('instruction_addr')),
'trust': trim(data.get('trust'), 16),
'in_app': in_app,
'context_line': context_line,
# TODO(dcramer): trim pre/post_context
'pre_context': pre_context,
'post_context': post_context,
'vars': context_locals or None,
'data': extra_data or None,
'errors': data.get('errors'),
}
if data.get('lineno') is not None:
lineno = int(data['lineno'])
if lineno < 0:
lineno = None
kwargs['lineno'] = lineno
else:
kwargs['lineno'] = None
if data.get('colno') is not None:
kwargs['colno'] = int(data['colno'])
else:
kwargs['colno'] = None
return cls(**kwargs)
def to_json(self):
return prune_empty_keys({
'abs_path': self.abs_path or None,
'filename': self.filename or None,
'platform': self.platform or None,
'module': self.module or None,
'function': self.function or None,
'package': self.package or None,
'image_addr': self.image_addr,
'symbol': self.symbol,
'symbol_addr': self.symbol_addr,
'instruction_addr': self.instruction_addr,
'trust': self.trust,
'in_app': self.in_app,
'context_line': self.context_line or None,
'pre_context': self.pre_context or None,
'post_context': self.post_context or None,
'vars': self.vars or None,
'data': self.data or None,
'errors': self.errors or None,
'lineno': self.lineno,
'colno': self.colno
})
def get_hash(self, platform=None):
"""
The hash of the frame varies depending on the data available.
Our ideal scenario is the module name in addition to the line of
context. However, in several scenarios we opt for other approaches due
to platform constraints.
This is one of the few areas in Sentry that isn't platform-agnostic.
"""
platform = self.platform or platform
output = []
# Safari throws [native code] frames in for calls like ``forEach``
# whereas Chrome ignores these. Let's remove it from the hashing algo
# so that they're more likely to group together
if self.filename == '<anonymous>':
hashable_filename = None
elif self.filename:
hashable_filename = remove_filename_outliers(self.filename, platform)
else:
hashable_filename = None
if self.filename == '[native code]':
return output
if self.module:
if self.is_unhashable_module(platform):
output.append('<module>')
else:
output.append(remove_module_outliers(self.module, platform))
elif hashable_filename and not self.is_url() and not self.is_caused_by():
output.append(hashable_filename)
if self.context_line is None:
can_use_context = False
elif len(self.context_line) > 120:
can_use_context = False
elif self.is_url() and not self.function:
# the context is too risky to use here as it could be something
# coming from an HTML page or it could be minified/unparseable
# code, so lets defer to other lesser heuristics (like lineno)
can_use_context = False
elif self.function and self.is_unhashable_function():
can_use_context = True
else:
can_use_context = True
# XXX: hack around what appear to be non-useful lines of context
if can_use_context:
output.append(self.context_line)
elif not output:
# If we were unable to achieve any context at this point
# (likely due to a bad JavaScript error) we should just
# bail on recording this frame
return output
elif self.symbol:
output.append(self.symbol)
elif self.function:
if self.is_unhashable_function():
output.append('<function>')
else:
output.append(remove_function_outliers(self.function))
elif self.lineno is not None:
output.append(self.lineno)
return output
def get_api_context(self, is_public=False, pad_addr=None):
data = {
'filename': self.filename,
'absPath': self.abs_path,
'module': self.module,
'package': self.package,
'platform': self.platform,
'instructionAddr': pad_hex_addr(self.instruction_addr, pad_addr),
'symbolAddr': pad_hex_addr(self.symbol_addr, pad_addr),
'function': self.function,
'symbol': self.symbol,
'context': get_context(
lineno=self.lineno,
context_line=self.context_line,
pre_context=self.pre_context,
post_context=self.post_context,
filename=self.filename or self.module,
),
'lineNo': self.lineno,
'colNo': self.colno,
'inApp': self.in_app,
'trust': self.trust,
'errors': self.errors,
}
if not is_public:
data['vars'] = self.vars
# TODO(dcramer): abstract out this API
if self.data:
data.update(
{
'map': self.data['sourcemap'].rsplit('/', 1)[-1],
'origFunction': self.data.get('orig_function', '?'),
'origAbsPath': self.data.get('orig_abs_path', '?'),
'origFilename': self.data.get('orig_filename', '?'),
'origLineNo': self.data.get('orig_lineno', '?'),
'origColNo': self.data.get('orig_colno', '?'),
}
)
if is_url(self.data['sourcemap']):
data['mapUrl'] = self.data['sourcemap']
return data
def get_meta_context(self, meta, is_public=False):
if not meta:
return
return {
'filename': meta.get('filename'),
'absPath': meta.get('abs_path'),
'module': meta.get('module'),
'package': meta.get('package'),
'platform': meta.get('platform'),
'instructionAddr': meta.get('instruction_addr'),
'symbolAddr': meta.get('symbol_addr'),
'function': meta.get('function'),
'symbol': meta.get('symbol'),
'context': get_context(
lineno=meta.get('lineno'),
context_line=meta.get('context_line'),
pre_context=meta.get('pre_context'),
post_context=meta.get('post_context'),
filename=meta.get('filename') if self.filename else meta.get('module'),
),
'lineNo': meta.get('lineno'),
'colNo': meta.get('colno'),
'inApp': meta.get('in_app'),
'trust': meta.get('trust'),
'errors': meta.get('errors'),
}
def is_url(self):
if not self.abs_path:
return False
# URLs can be generated such that they are:
# blob:http://example.com/7f7aaadf-a006-4217-9ed5-5fbf8585c6c0
# https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL
if self.abs_path.startswith('blob:'):
return True
return is_url(self.abs_path)
def is_caused_by(self):
# XXX(dcramer): dont compute hash using frames containing the 'Caused by'
# text as it contains an exception value which may may contain dynamic
# values (see raven-java#125)
return self.filename.startswith('Caused by: ')
def is_unhashable_module(self, platform):
# Fix for the case where module is a partial copy of the URL
# and should not be hashed
if (platform == 'javascript' and '/' in self.module
and self.abs_path and self.abs_path.endswith(self.module)):
return True
elif platform == 'java' and '$$Lambda$' in self.module:
return True
return False
def is_unhashable_function(self):
# TODO(dcramer): lambda$ is Java specific
# TODO(dcramer): [Anonymous is PHP specific (used for things like SQL
# queries and JSON data)
return self.function.startswith(('lambda$', '[Anonymous'))
def to_string(self, event):
if event.platform is not None:
choices = [event.platform]
else:
choices = []
choices.append('default')
templates = ['sentry/partial/frames/%s.txt' % choice for choice in choices]
return render_to_string(
templates, {
'abs_path': self.abs_path,
'filename': self.filename,
'function': self.function,
'module': self.module,
'lineno': self.lineno,
'colno': self.colno,
'context_line': self.context_line,
}
).strip('\n')
def get_culprit_string(self, platform=None):
# If this frame has a platform, we use it instead of the one that
# was passed in (as that one comes from the exception which might
# not necessarily be the same platform).
if self.platform is not None:
platform = self.platform
if platform in ('objc', 'cocoa', 'native'):
return self.function or '?'
fileloc = self.module or self.filename
if not fileloc:
return ''
elif platform in ('javascript', 'node'):
# function and fileloc might be unicode here, so let it coerce
# to a unicode string if needed.
return '%s(%s)' % (self.function or '?', fileloc)
return '%s in %s' % (fileloc, self.function or '?', )
class Stacktrace(Interface):
"""
A stacktrace contains a list of frames, each with various bits (most optional)
describing the context of that frame. Frames should be sorted from oldest
to newest.
The stacktrace contains an element, ``frames``, which is a list of hashes. Each
hash must contain **at least** the ``filename`` attribute. The rest of the values
are optional, but recommended.
Additionally, if the list of frames is large, you can explicitly tell the
system that you've omitted a range of frames. The ``frames_omitted`` must
be a single tuple two values: start and end. For example, if you only
removed the 8th frame, the value would be (8, 9), meaning it started at the
8th frame, and went until the 9th (the number of frames omitted is
end-start). The values should be based on a one-index.
The list of frames should be ordered by the oldest call first.
Each frame must contain the following attributes:
``filename``
The relative filepath to the call
OR
``function``
The name of the function being called
OR
``module``
Platform-specific module path (e.g. stacktrace)
The following additional attributes are supported:
``lineno``
The line number of the call
``colno``
The column number of the call
``abs_path``
The absolute path to filename
``context_line``
Source code in filename at lineno
``pre_context``
A list of source code lines before context_line (in order) -- usually [lineno - 5:lineno]
``post_context``
A list of source code lines after context_line (in order) -- usually [lineno + 1:lineno + 5]
``in_app``
Signifies whether this frame is related to the execution of the relevant
code in this stacktrace. For example, the frames that might power the
framework's webserver of your app are probably not relevant, however calls
to the framework's library once you start handling code likely are. See
notes below on implicity ``in_app`` behavior.
``vars``
A mapping of variables which were available within this frame (usually context-locals).
``package``
Name of the package or object file that the frame is contained in. This
for instance can be the name of a DLL, .NET Assembly, jar file, object
file etc.
>>> {
>>> "frames": [{
>>> "abs_path": "/real/file/name.py"
>>> "filename": "file/name.py",
>>> "function": "myfunction",
>>> "vars": {
>>> "key": "value"
>>> },
>>> "pre_context": [
>>> "line1",
>>> "line2"
>>> ],
>>> "context_line": "line3",
>>> "lineno": 3,
>>> "in_app": true,
>>> "post_context": [
>>> "line4",
>>> "line5"
>>> ],
>>> }],
>>> "frames_omitted": [13, 56]
>>> }
Implicity ``in_app`` behavior exists when the value is not specified on all
frames within a stacktrace (or collectively within an exception if this is
part of a chain).
If **any frame** is marked with ``in_app=True`` or ``in_app=False``:
- Set ``in_app=False`` where ``in_app is None``
If **all frames** are marked identical values for ``in_app``:
- Set ``in_app=False`` on all frames
.. note:: This interface can be passed as the 'stacktrace' key in addition
to the full interface path.
"""
score = 1950
def __iter__(self):
return iter(self.frames)
@classmethod
def to_python(cls, data, slim_frames=True, raw=False):
is_valid, errors = validate_and_default_interface(data, cls.path)
if not is_valid:
raise InterfaceValidationError("Invalid stack frame data.")
# Trim down the frame list to a hard limit. Leave the last frame in place in case
# it's useful for debugging.
frameiter = data.get('frames') or []
if len(frameiter) > settings.SENTRY_STACKTRACE_FRAMES_HARD_LIMIT:
frameiter = chain(
islice(data['frames'], settings.SENTRY_STACKTRACE_FRAMES_HARD_LIMIT - 1), (data['frames'][-1],))
frame_list = []
for f in frameiter:
if f is None:
continue
# XXX(dcramer): handle PHP sending an empty array for a frame
frame_list.append(Frame.to_python(f or {}, raw=raw))
kwargs = {
'frames': frame_list,
}
kwargs['registers'] = None
if data.get('registers') and isinstance(data['registers'], dict):
kwargs['registers'] = data.get('registers')
kwargs['frames_omitted'] = data.get('frames_omitted') or None
instance = cls(**kwargs)
if slim_frames:
slim_frame_data(instance)
return instance
def get_has_system_frames(self):
# This is a simplified logic from how the normalizer works.
# Because this always works on normalized data we do not have to
# consider the "all frames are in_app" case. The normalizer lives
# in stacktraces.normalize_in_app which will take care of that.
return any(frame.in_app for frame in self.frames)
def get_longest_address(self):
rv = None
for frame in self.frames:
rv = max_addr(rv, frame.instruction_addr)
rv = max_addr(rv, frame.symbol_addr)
return rv
def get_api_context(self, is_public=False):
longest_addr = self.get_longest_address()
frame_list = [
f.get_api_context(is_public=is_public, pad_addr=longest_addr) for f in self.frames
]
return {
'frames': frame_list,
'framesOmitted': self.frames_omitted,
'registers': self.registers,
'hasSystemFrames': self.get_has_system_frames(),
}
def get_api_meta(self, meta, is_public=False):
if not meta:
return meta
frame_meta = {}
for index, value in six.iteritems(meta.get('frames', {})):
frame = self.frames[int(index)]
frame_meta[index] = frame.get_api_meta(value, is_public=is_public)
return {
'': meta.get(''),
'frames': frame_meta,
'framesOmitted': meta.get('frames_omitted'),
'registers': meta.get('registers'),
}
def to_json(self):
return prune_empty_keys({
'frames': [f and f.to_json() for f in self.frames] or None,
'frames_omitted': self.frames_omitted,
'registers': self.registers,
})
def compute_hashes(self, platform):
system_hash = self.get_hash(platform, system_frames=True)
if not system_hash:
return []
app_hash = self.get_hash(platform, system_frames=False)
if system_hash == app_hash or not app_hash:
return [system_hash]
return [system_hash, app_hash]
def get_hash(self, platform=None, system_frames=True):
frames = self.frames
# TODO(dcramer): this should apply only to platform=javascript
# Browser JS will often throw errors (from inlined code in an HTML page)
# which contain only a single frame, no function name, and have the HTML
# document as the filename. In this case the hash is often not usable as
# the context cannot be trusted and the URL is dynamic (this also means
# the line number cannot be trusted).
stack_invalid = (len(frames) == 1 and not frames[0].function and frames[0].is_url())
if stack_invalid:
return []
if not system_frames:
total_frames = len(frames)
frames = [f for f in frames if f.in_app] or frames
# if app frames make up less than 10% of the stacktrace discard
# the hash as invalid
if len(frames) / float(total_frames) < 0.10:
return []
if not frames:
return []
output = []
# stacktraces that only differ by the number of recursive calls should
# hash the same, so we squash recursive calls by comparing each frame
# to the previous frame
output.extend(frames[0].get_hash(platform))
prev_frame = frames[0]
for frame in frames[1:]:
if not is_recursion(frame, prev_frame):
output.extend(frame.get_hash(platform))
prev_frame = frame
return output
def to_string(self, event, is_public=False, **kwargs):
return self.get_stacktrace(event, system_frames=False, max_frames=10)
def get_stacktrace(
self, event, system_frames=True, newest_first=None, max_frames=None, header=True
):
if newest_first is None:
newest_first = is_newest_frame_first(event)
result = []
if header:
if newest_first:
result.append(_('Stacktrace (most recent call first):'))
else:
result.append(_('Stacktrace (most recent call last):'))
result.append('')
frames = self.frames
num_frames = len(frames)
if not system_frames:
frames = [f for f in frames if f.in_app is not False]
if not frames:
frames = self.frames
if newest_first:
frames = frames[::-1]
if max_frames:
visible_frames = max_frames
if newest_first:
start, stop = None, max_frames
else:
start, stop = -max_frames, None
else:
visible_frames = len(frames)
start, stop = None, None
if not newest_first and visible_frames < num_frames:
result.extend(
(
'(%d additional frame(s) were not displayed)' % (num_frames - visible_frames, ),
'...'
)
)
for frame in frames[start:stop]:
result.append(frame.to_string(event))
if newest_first and visible_frames < num_frames:
result.extend(
(
'...',
'(%d additional frame(s) were not displayed)' % (num_frames - visible_frames, )
)
)
return '\n'.join(result)
def get_culprit_string(self, platform=None):
default = None
for frame in reversed(self.frames):
if frame.in_app:
culprit = frame.get_culprit_string(platform=platform)
if culprit:
return culprit
elif default is None:
default = frame.get_culprit_string(platform=platform)
return default
| 34.361793 | 112 | 0.589465 |
00d2a0eec9c5915ce0b5b7910efd4a79e80bfb0e | 7,240 | py | Python | metrics.py | mikelmh025/self-conditioned-gan | 51713879893b34879a106d6ac964de653c6191e0 | [
"MIT"
] | 145 | 2020-06-18T23:53:42.000Z | 2022-03-29T12:07:16.000Z | metrics.py | mikelmh025/self-conditioned-gan | 51713879893b34879a106d6ac964de653c6191e0 | [
"MIT"
] | 9 | 2020-06-29T06:19:58.000Z | 2022-01-25T13:24:31.000Z | metrics.py | mikelmh025/self-conditioned-gan | 51713879893b34879a106d6ac964de653c6191e0 | [
"MIT"
] | 18 | 2020-06-19T01:41:20.000Z | 2021-10-11T06:19:52.000Z | import argparse
import os
import json
from tqdm import tqdm
import numpy as np
import torch
from gan_training.config import load_config
from seeded_sampler import SeededSampler
parser = argparse.ArgumentParser('Computes numbers used in paper and caches them to a result files. Examples include FID, IS, reverse-KL, # modes, FSD, cluster NMI, Purity.')
parser.add_argument('paths', nargs='+', type=str, help='list of configs for each experiment')
parser.add_argument('--it', type=int, default=-1, help='If set, computes numbers only for that iteration')
parser.add_argument('--every', type=int, default=-1, help='skips some checkpoints and only computes those whose iteration number are divisible by every')
parser.add_argument('--fid', action='store_true', help='compute FID metric')
parser.add_argument('--inception', action='store_true', help='compute IS metric')
parser.add_argument('--modes', action='store_true', help='compute # modes and reverse-KL metric')
parser.add_argument('--fsd', action='store_true', help='compute FSD metric')
parser.add_argument('--cluster_metrics', action='store_true', help='compute clustering metrics (NMI, purity)')
parser.add_argument('--device', type=int, default=1, help='device to run the metrics on (can run into OOM issues if same as main device)')
args = parser.parse_args()
device = args.device
dirs = list(args.paths)
N = 50000
BS = 100
datasets = ['imagenet', 'cifar', 'stacked_mnist', 'places']
dataset_to_img = {
'places': 'output/places_gt_imgs.npz',
'imagenet': 'output/imagenet_gt_imgs.npz'}
def load_results(results_dir):
results = []
for results_file in ['fid_results.json', 'is_results.json', 'kl_results.json', 'nmodes_results.json', 'fsd_results.json', 'cluster_metrics.json']:
results_file = os.path.join(results_dir, results_file)
if not os.path.exists(results_file):
with open(results_file, 'w') as f:
f.write(json.dumps({}))
with open(results_file) as f:
results.append(json.load(f))
return results
def get_dataset_from_path(path):
for name in datasets:
if name in path:
print('Inferred dataset:', name)
return name
def pt_to_np(imgs):
'''normalizes pytorch image in [-1, 1] to [0, 255]'''
return (imgs.permute(0, 2, 3, 1).mul_(0.5).add_(0.5).mul_(255)).clamp_(0, 255).numpy()
def sample(sampler):
with torch.no_grad():
samples = []
for _ in tqdm(range(N // BS + 1)):
x_real = sampler.sample(BS)[0].detach().cpu()
x_real = [x.detach().cpu() for x in x_real]
samples.extend(x_real)
samples = torch.stack(samples[:N], dim=0)
return pt_to_np(samples)
root = './'
while len(dirs) > 0:
path = dirs.pop()
if os.path.isdir(path): # search down tree for config files
for d1 in os.listdir(path):
dirs.append(os.path.join(path, d1))
else:
if path.endswith('.yaml'):
config = load_config(path, default_path='configs/default.yaml')
outdir = config['training']['out_dir']
if not os.path.exists(outdir) and config['pretrained'] == {}:
print('Skipping', path, 'outdir', outdir)
continue
results_dir = os.path.join(outdir, 'results')
checkpoint_dir = os.path.join(outdir, 'chkpts')
os.makedirs(results_dir, exist_ok=True)
fid_results, is_results, kl_results, nmodes_results, fsd_results, cluster_results = load_results(results_dir)
checkpoint_files = os.listdir(checkpoint_dir) if os.path.exists(checkpoint_dir) else []
if config['pretrained'] != {}:
checkpoint_files = checkpoint_files + ['pretrained']
for checkpoint in checkpoint_files:
if (checkpoint.endswith('.pt') and checkpoint != 'model.pt') or checkpoint == 'pretrained':
print('Computing for', checkpoint)
if 'model' in checkpoint:
# infer iteration number from checkpoint file w/o loading it
if 'model_' in checkpoint:
it = int(checkpoint.split('model_')[1].split('.pt')[0])
else:
continue
if args.every != 0 and it % args.every != 0:
continue
# iteration 0 is often useless, skip it
if it == 0 or args.it != -1 and it != args.it:
continue
elif checkpoint == 'pretrained':
it = 'pretrained'
it = str(it)
clusterer_path = os.path.join(root, checkpoint_dir, f'clusterer{it}.pkl')
# don't save samples for each iteration for disk space
samples_path = os.path.join(outdir, 'results', 'samples.npz')
targets = []
if args.inception:
targets = targets + [is_results]
if args.fid:
targets = targets + [fid_results]
if args.modes:
targets = targets + [kl_results, nmodes_results]
if args.fsd:
targets = targets + [fsd_results]
if all([it in result for result in targets]):
print('Already generated', it, path)
else:
sampler = SeededSampler(path,
model_path=os.path.join(root, checkpoint_dir, checkpoint),
clusterer_path=clusterer_path,
pretrained=config['pretrained'])
samples = sample(sampler)
dataset_name = get_dataset_from_path(path)
np.savez(samples_path, fake=samples, real=dataset_name)
arguments = f'--samples {samples_path} --it {it} --results_dir {results_dir}'
if args.fid and it not in fid_results:
os.system(f'CUDA_VISIBLE_DEVICES={device} python gan_training/metrics/fid.py {arguments}')
if args.inception and it not in is_results:
os.system(f'CUDA_VISIBLE_DEVICES={device} python gan_training/metrics/tf_is/inception_score.py {arguments}')
if args.modes and (it not in kl_results or it not in nmodes_results):
os.system(f'CUDA_VISIBLE_DEVICES={device} python utils/get_empirical_distribution.py {arguments} --dataset {dataset_name}')
if args.cluster_metrics and it not in cluster_results:
os.system(f'CUDA_VISIBLE_DEVICES={device} python cluster_metrics.py {path} --model_it {it}')
if args.fsd and it not in fsd_results:
gt_path = dataset_to_img[dataset_name]
os.system(f'CUDA_VISIBLE_DEVICES={device} python -m seeing.fsd {gt_path} {samples_path} --it {it} --results_dir {results_dir}')
| 47.320261 | 174 | 0.58163 |
33b332e600af4a4e3d16ef99bf8cfc98dbcef4a1 | 1,346 | py | Python | interpret.py | amirfarhat/give-py | 4d0285e412d86cc72ca578c282253d7735210e42 | [
"MIT"
] | null | null | null | interpret.py | amirfarhat/give-py | 4d0285e412d86cc72ca578c282253d7735210e42 | [
"MIT"
] | null | null | null | interpret.py | amirfarhat/give-py | 4d0285e412d86cc72ca578c282253d7735210e42 | [
"MIT"
] | null | null | null | import sys
# syntax to swap out for python3
SYNTAX_KEYWORD_MAP = {
# synonyms for def
'make' : 'def',
# synonyms for return
'give' : 'return',
# synonyms for print
'show' : 'print',
# synonyms for append
'.also' : '.append',
# synonyms for import
'bring' : 'import',
'want' : 'import',
# synonyms for assert
'insist' : 'assert',
# give syntax we want to eliminate
' take ': '',
' take' : ''
}
def main():
# get .give file as first arg
_, to_run = sys.argv
# sanitize .give filename
to_run = to_run.strip()
# file must end with .give
if not to_run.endswith('.give'):
print("Mast ind .give pendejo")
exit(1)
# open code file and interpret give to python
print("Converting {} to python3...".format(to_run))
with open(to_run, 'r') as code_file:
code = code_file.read()
# replace give keywords with python keywords
for old, new in SYNTAX_KEYWORD_MAP.items():
code = code.replace(old, new)
# make a .py file of the give code
with open('_temp.py', 'w+') as temp:
temp.write(code)
# successfully converted .give to .py
print("Successfully converting {} to python3".format(to_run))
exit(0)
main() | 22.813559 | 65 | 0.560921 |
d45644f580e906bb284d19da2d40cd0de09daf94 | 2,301 | py | Python | tests/integration/modules/test_localemod.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 2 | 2020-11-02T22:08:26.000Z | 2020-11-14T13:44:46.000Z | tests/integration/modules/test_localemod.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 3 | 2021-03-31T19:54:10.000Z | 2021-12-13T20:47:12.000Z | tests/integration/modules/test_localemod.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 2 | 2020-11-04T06:32:02.000Z | 2020-11-06T11:01:18.000Z | import pytest
import salt.utils.platform
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, requires_salt_modules, slowTest
from tests.support.unit import skipIf
def _find_new_locale(current_locale):
for locale in ["en_US.UTF-8", "de_DE.UTF-8", "fr_FR.UTF-8"]:
if locale != current_locale:
return locale
@skipIf(salt.utils.platform.is_windows(), "minion is windows")
@skipIf(salt.utils.platform.is_darwin(), "locale method is not supported on mac")
@skipIf(
salt.utils.platform.is_freebsd(),
"locale method is supported only within login classes or environment variables",
)
@requires_salt_modules("locale")
@pytest.mark.windows_whitelisted
class LocaleModuleTest(ModuleCase):
def test_get_locale(self):
locale = self.run_function("locale.get_locale")
self.assertNotIn("Unsupported platform!", locale)
@destructiveTest
@slowTest
def test_gen_locale(self):
# Make sure charmaps are available on test system before attempting
# call gen_locale. We log this error to the user in the function, but
# we don't want to fail this test if this is missing on the test system.
char_maps = self.run_function("cmd.run_all", ["locale -m"])
if char_maps["stdout"] == "":
self.skipTest("locale charmaps not available. Skipping test.")
if char_maps["retcode"] and char_maps["stderr"]:
self.skipTest(
"{}. Cannot generate locale. Skipping test.".format(char_maps["stderr"])
)
locale = self.run_function("locale.get_locale")
new_locale = _find_new_locale(locale)
ret = self.run_function("locale.gen_locale", [new_locale])
self.assertTrue(ret)
@destructiveTest
@slowTest
def test_set_locale(self):
original_locale = self.run_function("locale.get_locale")
locale_to_set = _find_new_locale(original_locale)
self.run_function("locale.gen_locale", [locale_to_set])
ret = self.run_function("locale.set_locale", [locale_to_set])
new_locale = self.run_function("locale.get_locale")
self.assertTrue(ret)
self.assertEqual(locale_to_set, new_locale)
self.run_function("locale.set_locale", [original_locale])
| 39.672414 | 88 | 0.696219 |
8054f33277eb509921b95ff2b360be646e075446 | 10,862 | py | Python | clevercsv/detect_type.py | baldurmen/CleverCSV | a7c7c812f2dc220b8f45f3409daac6e933bc44a2 | [
"MIT"
] | 989 | 2019-02-22T12:14:17.000Z | 2022-03-28T01:33:20.000Z | clevercsv/detect_type.py | baldurmen/CleverCSV | a7c7c812f2dc220b8f45f3409daac6e933bc44a2 | [
"MIT"
] | 27 | 2019-12-22T00:09:28.000Z | 2022-03-30T22:45:50.000Z | clevercsv/detect_type.py | baldurmen/CleverCSV | a7c7c812f2dc220b8f45f3409daac6e933bc44a2 | [
"MIT"
] | 55 | 2019-10-22T13:09:53.000Z | 2022-01-03T04:28:26.000Z | # -*- coding: utf-8 -*-
"""
Code for computing the type score.
Author: Gertjan van den Burg
"""
import json
import regex
from .cparser_util import parse_string
DEFAULT_EPS_TYPE = 1e-10
# Used this site: https://unicode-search.net/unicode-namesearch.pl
# Specials allowed in unicode_alphanum regex if is_quoted = False
SPECIALS_ALLOWED = [
"-",
"_",
# Periods
"\u002e",
"\u06d4",
"\u3002",
"\ufe52",
"\uff0e",
"\uff61",
# Parentheses
"\u0028",
"\u0029",
"\u27ee",
"\u27ef",
"\uff08",
"\uff09",
# Question marks
"\u003F",
"\u00BF",
"\u037E",
"\u055E",
"\u061F",
"\u1367",
"\u1945",
"\u2047",
"\u2048",
"\u2049",
"\u2CFA",
"\u2CFB",
"\u2E2E",
"\uA60F",
"\uA6F7",
"\uFE16",
"\uFE56",
"\uFF1F",
chr(69955), # chakma question mark
chr(125279), # adlam initial question mark
# Exclamation marks
"\u0021",
"\u00A1",
"\u01C3",
"\u055C",
"\u07F9",
"\u109F",
"\u1944",
"\u203C",
"\u2048",
"\u2049",
"\uAA77",
"\uFE15",
"\uFE57",
"\uFF01",
chr(125278), # adlam initial exclamation mark
]
# Additional specials allowed in unicode_alphanum_quoted regex
QUOTED_SPECIALS_ALLOWED = [
",",
"\u060C",
"\u1363",
"\u1802",
"\u1808",
"\uFF0C",
"\uFE50",
]
PATTERNS = {
"number_1": "^(?=[+-\.\d])[+-]?(?:0|[1-9]\d*)?(((?P<dot>((?<=\d)\.|\.(?=\d)))?(?(dot)(?P<yes_dot>\d*(\d*[eE][+-]?\d+)?)|(?P<no_dot>((?<=\d)[eE][+-]?\d+)?)))|((?P<comma>,)?(?(comma)(?P<yes_comma>\d+(\d+[eE][+-]?\d+)?)|(?P<no_comma>((?<=\d)[eE][+-]?\d+)?))))$",
"number_2": "[+-]?(?:[1-9]|[1-9]\d{0,2})(?:\,\d{3})+\.\d*",
"number_3": "[+-]?(?:[1-9]|[1-9]\d{0,2})(?:\.\d{3})+\,\d*",
"url": "((https?|ftp):\/\/(?!\-))?(((?:[\p{L}\p{N}-]+\.)+([a-z]{2,}|local)(\.[a-z]{2,3})?)|localhost(\:\d{1,5})?|(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(\:\d{1,5})?))(\/[\p{L}\p{N}_\/()~?=&%\-\#\.:]*)?(\.[a-z]+)?",
"email": r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)",
"ipv4": "(?:\d{1,3}\.){3}\d{1,3}",
"unicode_alphanum": "(\p{N}?\p{L}+[\p{N}\p{L}\ "
+ regex.escape("".join(SPECIALS_ALLOWED))
+ "]*|\p{L}?[\p{N}\p{L}\ "
+ regex.escape("".join(SPECIALS_ALLOWED))
+ "]+)",
"unicode_alphanum_quoted": "(\p{N}?\p{L}+[\p{N}\p{L}\ "
+ regex.escape(
"".join(SPECIALS_ALLOWED) + "".join(QUOTED_SPECIALS_ALLOWED)
)
+ "]*|\p{L}?[\p{N}\p{L}\ "
+ regex.escape(
"".join(SPECIALS_ALLOWED) + "".join(QUOTED_SPECIALS_ALLOWED)
)
+ "]+)",
"time_hhmmss": "(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])",
"time_hhmm": "(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])",
"time_HHMM": "(0[0-9]|1[0-9]|2[0-3])([0-5][0-9])",
"time_HH": "(0[0-9]|1[0-9]|2[0-3])([0-5][0-9])",
"time_hmm": "([0-9]|1[0-9]|2[0-3]):([0-5][0-9])",
"time_hhmmsszz": "(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])[+-]([0-1][0-9]):([0-5][0-9])",
"currency": "\p{Sc}\s?(.*)",
"unix_path": "[~.]?(?:\/[a-zA-Z0-9\.\-\_]+)+\/?",
"date": "((0[1-9]|1[0-2])((0[1-9]|[12]\d|3[01])([12]\d{3}|\d{2})|(?P<sep1>[-\/. ])(0?[1-9]|[12]\d|3[01])(?P=sep1)([12]\d{3}|\d{2}))|(0[1-9]|[12]\d|3[01])((0[1-9]|1[0-2])([12]\d{3}|\d{2})|(?P<sep2>[-\/. ])(0?[1-9]|1[0-2])(?P=sep2)([12]\d{3}|\d{2}))|([12]\d{3}|\d{2})((?P<sep3>[-\/. ])(0?[1-9]|1[0-2])(?P=sep3)(0?[1-9]|[12]\d|3[01])|年(0?[1-9]|1[0-2])月(0?[1-9]|[12]\d|3[01])日|년(0?[1-9]|1[0-2])월(0?[1-9]|[12]\d|3[01])일|(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01]))|(([1-9]|1[0-2])(?P<sep4>[-\/. ])(0?[1-9]|[12]\d|3[01])(?P=sep4)([12]\d{3}|\d{2})|([1-9]|[12]\d|3[01])(?P<sep5>[-\/. ])(0?[1-9]|1[0-2])(?P=sep5)([12]\d{3}|\d{2})))",
}
class TypeDetector(object):
def __init__(self, strip_whitespace=True):
self.patterns = PATTERNS.copy()
self.strip_whitespace = strip_whitespace
self._compile_regexes()
self._register_type_tests()
def _compile_regexes(self):
for key, value in self.patterns.items():
self.patterns[key] = regex.compile(value)
def _register_type_tests(self):
self._type_tests = [
("empty", self.is_empty),
("url", self.is_url),
("email", self.is_email),
("ipv4", self.is_ipv4),
("number", self.is_number),
("time", self.is_time),
("percentage", self.is_percentage),
("currency", self.is_currency),
("unix_path", self.is_unix_path),
("nan", self.is_nan),
("date", self.is_date),
("datetime", self.is_datetime),
("unicode_alphanum", self.is_unicode_alphanum),
("bytearray", self.is_bytearray),
("json", self.is_json_obj),
]
def list_known_types(self):
return [tt[0] for tt in self._type_tests]
def is_known_type(self, cell, is_quoted=False):
return not self.detect_type(cell, is_quoted=is_quoted) is None
def detect_type(self, cell, is_quoted=False):
cell = cell.strip() if self.strip_whitespace else cell
for name, func in self._type_tests:
if func(cell, is_quoted=is_quoted):
return name
return None
def _run_regex(self, cell, patname):
cell = cell.strip() if self.strip_whitespace else cell
pat = self.patterns.get(patname, None)
match = pat.fullmatch(cell)
return match is not None
def is_number(self, cell, **kwargs):
if cell == "":
return False
if self._run_regex(cell, "number_1"):
return True
if self._run_regex(cell, "number_2"):
return True
if self._run_regex(cell, "number_3"):
return True
return False
def is_ipv4(self, cell, **kwargs):
return self._run_regex(cell, "ipv4")
def is_url(self, cell, **kwargs):
return self._run_regex(cell, "url")
def is_email(self, cell, **kwargs):
return self._run_regex(cell, "email")
def is_unicode_alphanum(self, cell, is_quoted=False, **kwargs):
if is_quoted:
return self._run_regex(cell, "unicode_alphanum_quoted")
return self._run_regex(cell, "unicode_alphanum")
def is_date(self, cell, **kwargs):
# This function assumes the cell is not a number.
cell = cell.strip() if self.strip_whitespace else cell
if not cell:
return False
if not cell[0].isdigit():
return False
return self._run_regex(cell, "date")
def is_time(self, cell, **kwargs):
cell = cell.strip() if self.strip_whitespace else cell
if not cell:
return False
if not cell[0].isdigit():
return False
return (
self._run_regex(cell, "time_hmm")
or self._run_regex(cell, "time_hhmm")
or self._run_regex(cell, "time_hhmmss")
or self._run_regex(cell, "time_hhmmsszz")
)
def is_empty(self, cell, **kwargs):
return cell == ""
def is_percentage(self, cell, **kwargs):
return cell.endswith("%") and self.is_number(cell.rstrip("%"))
def is_currency(self, cell, **kwargs):
pat = self.patterns.get("currency", None)
m = pat.fullmatch(cell)
if m is None:
return False
grp = m.group(1)
if not self.is_number(grp):
return False
return True
def is_datetime(self, cell, **kwargs):
# Takes care of cells with '[date] [time]' and '[date]T[time]' (iso)
if not cell:
return False
if not cell[0].isdigit():
return False
if " " in cell:
parts = cell.split(" ")
if len(parts) > 2:
return False
return self.is_date(parts[0]) and self.is_time(parts[1])
elif "T" in cell:
parts = cell.split("T")
if len(parts) > 2:
return False
isdate = self.is_date(parts[0])
if not isdate:
return False
# [date]T[time] or [date]T[time]Z
if parts[1].endswith("Z") and self.is_time(parts[1][:-1]):
return True
if self.is_time(parts[1]):
return True
# [date]T[time][+-][time]
if "+" in parts[1]:
subparts = parts[1].split("+")
istime1 = self.is_time(subparts[0])
istime2 = self.is_time(subparts[1])
if not istime1:
return False
if istime2:
return True
if self._run_regex(subparts[1], "time_HHMM"):
return True
if self._run_regex(subparts[1], "time_HH"):
return True
elif "-" in parts[1]:
subparts = parts[1].split("-")
istime1 = self.is_time(subparts[0])
istime2 = self.is_time(subparts[1])
if not istime1:
return False
if istime2:
return True
if self._run_regex(subparts[1], "time_HHMM"):
return True
if self._run_regex(subparts[1], "time_HH"):
return True
return False
def is_nan(self, cell, **kwargs):
if cell.lower() in ["n/a", "na", "nan"]:
return True
return False
def is_unix_path(self, cell, **kwargs):
return self._run_regex(cell, "unix_path")
def is_bytearray(self, cell: str, **kwargs) -> bool:
return cell.startswith("bytearray(b") and cell.endswith(")")
def is_json_obj(self, cell: str, **kwargs) -> bool:
if not (cell.startswith("{") and cell.endswith("}")):
return False
try:
_ = json.loads(cell)
except json.JSONDecodeError:
return False
return True
def gen_known_type(cells):
"""
Utility that yields a generator over whether or not the provided cells are
of a known type or not.
"""
td = TypeDetector()
for cell in cells:
yield td.is_known_type(cell)
def type_score(data, dialect, eps=DEFAULT_EPS_TYPE):
"""
Compute the type score as the ratio of cells with a known type.
Parameters
----------
data: str
the data as a single string
dialect: SimpleDialect
the dialect to use
eps: float
the minimum value of the type score
"""
total = 0
known = 0
td = TypeDetector()
for row in parse_string(data, dialect, return_quoted=True):
for cell, is_quoted in row:
total += 1
known += td.is_known_type(cell, is_quoted=is_quoted)
if total == 0:
return eps
return max(eps, known / total)
| 31.853372 | 621 | 0.509114 |
37e90da79928915f3423bb4d366932e0343e80e1 | 8,701 | py | Python | autobahntestsuite/autobahntestsuite/wampcase/wampcase3_1_x_x.py | rishabh-bector/autobahn-testsuite | 57030060630c10b22be44774973eaa61987b716c | [
"Apache-2.0"
] | 595 | 2015-10-20T09:01:18.000Z | 2022-03-28T08:48:27.000Z | autobahntestsuite/autobahntestsuite/wampcase/wampcase3_1_x_x.py | rishabh-bector/autobahn-testsuite | 57030060630c10b22be44774973eaa61987b716c | [
"Apache-2.0"
] | 73 | 2015-12-03T14:21:56.000Z | 2022-02-05T01:53:05.000Z | autobahntestsuite/autobahntestsuite/wampcase/wampcase3_1_x_x.py | rishabh-bector/autobahn-testsuite | 57030060630c10b22be44774973eaa61987b716c | [
"Apache-2.0"
] | 65 | 2015-11-04T15:58:37.000Z | 2022-02-09T03:49:24.000Z | ###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['Cases']
## The set of cases we construct and export from this module.
## Everything else is private.
Cases = []
#### BEGIN OF CONFIG
#### END OF CONFIG
import json, time
from zope.interface import implementer
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList, maybeDeferred
from autobahn.twisted.websocket import connectWS
from autobahn.wamp import WampClientFactory, WampCraClientProtocol
from autobahntestsuite.testrun import TestResult
from autobahntestsuite.util import AttributeBag, perf_counter
from autobahntestsuite.interfaces import ITestCase
class WampCase3_1_x_x_Protocol(WampCraClientProtocol):
def onSessionOpen(self):
if self.test.testee.auth:
d = self.authenticate(**self.test.testee.auth)
d.addCallbacks(self.onAuthSuccess, self.onAuthError)
else:
self.main()
def onAuthSuccess(self, permissions):
self.main()
def onAuthError(self, e):
uri, desc, details = e.value.args
print "Authentication Error!", uri, desc, details
def main(self):
self.factory.onReady(self)
class WampCase3_1_x_x_Factory(WampClientFactory):
protocol = WampCase3_1_x_x_Protocol
def __init__(self, test, onReady, onGone):
WampClientFactory.__init__(self, test.testee.url)
self.test = test
self.onReady = onReady
self.onGone = onGone
self.proto = None
def buildProtocol(self, addr):
proto = self.protocol()
proto.factory = self
proto.test = self.test
self.proto = proto
return proto
def clientConnectionLost(self, connector, reason):
self.onGone(self.proto)
def clientConnectionFailed(self, connector, reason):
self.onGone(self.proto)
class WampCase3_1_x_x_Params(AttributeBag):
"""
Test parameter set for configuring instances of WampCase2_*_*.
peers: a list with one item per WAMP session run during the test, where each item contains a list of topics each peer _subscribes_ to. The publisher that publishes during the test is always the first item in the list.
publicationTopic, excludeMe, exclude, eligible: parameters controlling how events are published during the test.
eventPayloads: a list of payloads each tested as event payload to the test at hand.
expectedReceivers: a list of session indices, where each index references a WAMP session created for the list in `peers`.
"""
ATTRIBUTES = ['peers',
'publicationTopic',
'excludeMe',
'exclude',
'eligible',
'eventPayloads',
'expectedReceivers']
@implementer(ITestCase)
class WampCase3_1_x_x_Base:
DESCRIPTION = "Undefined."
EXPECTATION = "Undefined."
def __init__(self, testee):
self.testee = testee
self.client = None
self.result = TestResult()
self.result.received = {}
self.result.expected = {}
self.result.log = []
def run(self):
self.result.started = perf_counter()
def shutdown():
if self.client:
self.client.proto.sendClose()
def test(proto):
#res = yield self.call("http://api.testsuite.wamp.ws/case/3.1.1#1", 23)
## after having published everything the test had specified,
## we need to _wait_ for events on all our WAMP sessions to
## compare with our expectation. by default, we wait 3x the
## specified/default RTT
def perform(i, p):
d = proto.call("http://api.testsuite.wamp.ws/case/3.1.1#1", float(p))
def got(res):
self.result.received[i] = float(res)
d.addCallback(got)
payloads = []
payloads.extend([0])
payloads.extend([2**7-1, 2**8-1, 2**15-1, 2**16-1, 2**24])
#payloads.extend([2**7-1, 2**8-1, 2**15-1, 2**16-1, 2**24, 2**31-1, 2**32-1, 2**53])
#payloads.extend([2**53+1, 2**63-1, 2**64-1])
#payloads.extend([-2**7, -2**15, -2**24, -2**31, -2**53])
payloads.extend([-2**7, -2**15, -2**24])
#payloads.extend([-2**63])
i = 0
for p in payloads:
self.result.expected[i] = float(p)
perform(i, p)
i += 1
wait = 3 * self.testee.options.get("rtt", 0.2)
reactor.callLater(wait, shutdown)
def launch(proto):
## FIXME: explain why the following needed, since
## without the almost zero delay (which triggers a
## reactor loop), the code will not work as expected!
#test() # <= does NOT work
reactor.callLater(0.00001, test, proto)
def error(err):
## FIXME
print "ERROR", err
shutdown()
self.finished.errback(err)
def done(proto):
self.result.ended = perf_counter()
passed = json.dumps(self.result.received) == json.dumps(self.result.expected)
if not passed:
print "EXPECTED", self.result.expected
print "RECEIVED", self.result.received
self.result.passed = passed
self.finished.callback(self.result)
self.client = WampCase3_1_x_x_Factory(self, launch, done)
connectWS(self.client)
self.finished = Deferred()
return self.finished
class WampCase3_1_1_1(WampCase3_1_x_x_Base):
pass
Cases = [WampCase3_1_1_1]
def generate_WampCase3_1_x_x_classes2():
## dynamically create case classes
##
res = []
jc = 1
for setting in SETTINGS:
ic = 1
for payload in PAYLOADS:
params = WampCase2_2_x_x_Params(peers = setting[0],
publicationTopic = setting[1],
excludeMe = setting[2],
exclude = setting[3],
eligible = setting[4],
eventPayloads = payload,
expectedReceivers = setting[5])
pl = len(params.eventPayloads)
plc = "s" if pl else ""
s = []
i = 0
for p in params.peers:
if len(p) > 0:
s.append("%d: %s" % (i, ' & '.join(p)))
else:
s.append("%d: %s" % (i, '-'))
i += 1
s = ', '.join(s)
o = []
if params.excludeMe is not None:
o.append("excludeMe = %s" % params.excludeMe)
if params.exclude is not None:
o.append("exclude = %s" % params.exclude)
if params.eligible is not None:
o.append("eligible = %s" % params.eligible)
if len(o) > 0:
o = ', '.join(o)
else:
o = "-"
description = """The test connects %d WAMP clients to the testee, subscribes \
the sessions to topics %s and \
then publishes %d event%s to the topic %s with payload%s %s from the first session. \
The test sets the following publication options: %s.
""" % (len(params.peers),
s,
pl,
plc,
params.publicationTopic,
plc,
', '.join(['"' + str(x) + '"' for x in params.eventPayloads]),
o)
expectation = """We expect the testee to dispatch the events to us on \
the sessions %s""" % (params.expectedReceivers,)
klassname = "WampCase3_1_%d_%d" % (jc, ic)
Klass = type(klassname,
(object, WampCase3_1_x_x_Base, ),
{
"__init__": WampCase3_1_x_x_Base.__init__,
"run": WampCase3_1_x_x_Base.run,
"description": description,
"expectation": expectation,
"params": params
})
res.append(Klass)
ic += 1
jc += 1
return res
#Cases.extend(generate_WampCase3_1_x_x_classes())
| 30.211806 | 220 | 0.580048 |
1d4583f28811a490ee5161c593b14c27acf88c50 | 4,539 | py | Python | GraphSAGE/eval/eval.py | chinthakarukshan/jasminegraph | 14d2934a3be71024343e02570fb3bc1d92e73de8 | [
"Apache-2.0"
] | 10 | 2019-02-12T16:13:18.000Z | 2021-05-23T04:57:54.000Z | GraphSAGE/eval/eval.py | chinthakarukshan/jasminegraph | 14d2934a3be71024343e02570fb3bc1d92e73de8 | [
"Apache-2.0"
] | 23 | 2019-02-03T10:52:44.000Z | 2021-09-11T17:13:36.000Z | GraphSAGE/eval/eval.py | chinthakarukshan/jasminegraph | 14d2934a3be71024343e02570fb3bc1d92e73de8 | [
"Apache-2.0"
] | 15 | 2018-11-25T10:47:34.000Z | 2020-10-03T16:48:30.000Z | import tensorflow as tf
import numpy as np
import networkx as nx
import json
import random
from networkx.readwrite import json_graph
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('graph_id','3', 'specify the graphID')
flags.DEFINE_string('path', '/var/tmp/jasminegraph-localstore/jasminegraph-local_trained_model_store/3', 'name of the object file that stores the training data. must be specified.')
flags.DEFINE_integer('max_degree', 100, 'maximum node degree.')
flags.DEFINE_integer('neg_sample_size', 20, 'number of negative samples')
def construct_deg(G,id2idx):
adj = len(id2idx)*np.ones((len(id2idx)+1, FLAGS.max_degree))
deg = np.zeros((len(id2idx),))
for nodeid in G.nodes():
neighbors = np.array([id2idx[str(neighbor)] for neighbor in G.neighbors(nodeid)])
deg[id2idx[str(nodeid)]] = len(neighbors)
if len(neighbors) == 0:
continue
if len(neighbors) > FLAGS.max_degree:
neighbors = np.random.choice(neighbors, FLAGS.max_degree, replace=False)
elif len(neighbors) < FLAGS.max_degree:
neighbors = np.random.choice(neighbors, FLAGS.max_degree, replace=True)
adj[id2idx[str(nodeid)], :] = neighbors
return adj, deg
def batch_feed_dict(batch_edges,id2idx):
batch1 = []
batch2 = []
for node1, node2 in batch_edges:
batch1.append(id2idx[str(node1)])
batch2.append(id2idx[str(node2)])
return batch1,batch2
def test_feed_dict(edge_list,id2idx,size=None):
if size is None:
return batch_feed_dict(edge_list,id2idx)
else:
ind = np.random.permutation(len(edge_list))
test_edges = [edge_list[i] for i in ind[:len(ind)]]
return batch_feed_dict(test_edges,id2idx)
#get test edge files
def eval(graph,test_edges,node_idx_map,idx_node_map,graph_embeddings):
adj, deg = construct_deg(graph,node_idx_map)
batch1,batch2=test_feed_dict(test_edges,node_idx_map)
neg_samples = []
for i in range(len(batch1)):
neg =[]
while(len(neg)<FLAGS.neg_sample_size):
x = random.randint(0,len(adj)-2)
if(graph.has_edge(idx_node_map[str(batch1[i])],idx_node_map[str(x)])==False and x not in neg):
neg.append(x)
neg_samples.append(neg)
output1 =[]
output2 =[]
neg_out = []
for i in range(len(batch1)):
output1.append(graph_embeddings[batch1[i]])
for j in range(len(batch2)):
output2.append(graph_embeddings[batch2[j]])
for k in range(len(neg_samples)):
neg_out_part =[]
for m in range(len(neg_samples[k])):
neg_out_part.append(graph_embeddings[neg_samples[k][m]])
neg_out.append(neg_out_part)
aff = np.sum(np.array(output1)*np.array(output2),axis=1)
neg_aff = [np.sum(np.array(output1[i])*neg_out[i],axis=1) for i in range(len(output1))]
hit_1 = 0
hit_3 = 0
hit_10 = 0
hit_50 = 0
mrr = 0
for x in range(len(aff)):
list = np.append(neg_aff[x],aff[x]).tolist()
list.sort()
list= list[::-1]
rank =(list.index(aff[x])+1)
reciprocal = 1/rank
mrr+=reciprocal
if(rank==1):
hit_1+=1
if(rank<=3):
hit_3+=1
if(rank<=10):
hit_10+=1
if(rank<=50):
hit_50+=1
mrr = mrr/(len(aff))
hit_1 = hit_1/(len(aff))
hit_3 = hit_3/(len(aff))
hit_10 = hit_10/(len(aff))
hit_50 = hit_50/(len(aff))
print("MRR = "+ str(mrr))
print("hit@1 = "+ str(hit_1))
print("hit@3 = "+ str(hit_3))
print("hit@10 = "+ str(hit_10))
print("hit@50 = "+ str(hit_50))
def load(path,graphID):
G = nx.read_edgelist(path+"/" +graphID, nodetype=int)
#test edge list
test_edges_graph = nx.read_edgelist(path +"/"+graphID+"_TEST_EDGE_SET.txt", nodetype=int)
test_edges = [e for e in test_edges_graph.edges()]
#load embeddings
node_idx_map = json.load(open(path +"/" +graphID +'-embeddings.json'))
idx_node_map = json.load(open(path+ "/" + graphID + '-idxtoid.json'))
graph_embeddings = np.load(path + "/" + graphID + '-embeddings.npy')
return G,test_edges,node_idx_map,idx_node_map,graph_embeddings;
def main(argv=None):
print("Loading training data..")
graph,test_edges,node_idx_map,idx_node_map,graph_embeddings = load(FLAGS.path,FLAGS.graph_id)
print("Done loading training data..")
eval(graph,test_edges,node_idx_map,idx_node_map,graph_embeddings)
if __name__ == '__main__':
main()
# python -m eval
| 34.648855 | 181 | 0.644195 |
21b7771b6cb48069d1c5fc6d6378ffae51c03fe8 | 4,639 | py | Python | siliconcompiler/tools/netgen/count_lvs.py | mfkiwl/siliconcompiler | 49a16d9a07c526821afe1ce2f2d77394e439ca05 | [
"Apache-2.0"
] | 424 | 2021-12-04T15:45:12.000Z | 2022-03-31T20:27:55.000Z | siliconcompiler/tools/netgen/count_lvs.py | mfkiwl/siliconcompiler | 49a16d9a07c526821afe1ce2f2d77394e439ca05 | [
"Apache-2.0"
] | 105 | 2021-12-03T21:25:29.000Z | 2022-03-31T22:36:59.000Z | siliconcompiler/tools/netgen/count_lvs.py | mfkiwl/siliconcompiler | 49a16d9a07c526821afe1ce2f2d77394e439ca05 | [
"Apache-2.0"
] | 38 | 2021-12-04T21:26:20.000Z | 2022-03-21T02:39:29.000Z | #!/usr/bin/python3
# SPDX-FileCopyrightText: 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
#
#---------------------------------------------------------
# LVS failure check
#
# This is a Python script that parses the comp.json
# output from netgen and reports on the number of
# errors in the top-level netlist.
#
#---------------------------------------------------------
# Written by Tim Edwards
# efabless, inc.
# Pulled from qflow GUI as standalone script Aug 20, 2018
#---------------------------------------------------------
import os
import re
import sys
import json
import argparse
def count_LVS_failures(filename):
with open(filename, 'r') as cfile:
lvsdata = json.load(cfile)
# Count errors in the JSON file
failures = 0
devfail = 0
netfail = 0
pinfail = 0
propfail = 0
netdiff = 0
devdiff = 0
ncells = len(lvsdata)
for c in range(0, ncells):
cellrec = lvsdata[c]
if c == ncells - 1:
topcell = True
else:
topcell = False
# Most errors must only be counted for the top cell, because individual
# failing cells are flattened and the matching attempted again on the
# flattened netlist.
if topcell:
if 'devices' in cellrec:
devices = cellrec['devices']
devlist = [val for pair in zip(devices[0], devices[1]) for val in pair]
devpair = list(devlist[p:p + 2] for p in range(0, len(devlist), 2))
for dev in devpair:
c1dev = dev[0]
c2dev = dev[1]
diffdevs = abs(c1dev[1] - c2dev[1])
failures += diffdevs
devdiff += diffdevs
if 'nets' in cellrec:
nets = cellrec['nets']
diffnets = abs(nets[0] - nets[1])
failures += diffnets
netdiff += diffnets
if 'badnets' in cellrec:
badnets = cellrec['badnets']
failures += len(badnets)
netfail += len(badnets)
if 'badelements' in cellrec:
badelements = cellrec['badelements']
failures += len(badelements)
devfail += len(badelements)
if 'pins' in cellrec:
pins = cellrec['pins']
pinlist = [val for pair in zip(pins[0], pins[1]) for val in pair]
pinpair = list(pinlist[p:p + 2] for p in range(0, len(pinlist), 2))
for pin in pinpair:
# Avoid flagging global vs. local names, e.g., "gnd" vs. "gnd!,"
# and ignore case when comparing pins.
pin0 = re.sub('!$', '', pin[0].lower())
pin1 = re.sub('!$', '', pin[1].lower())
if pin0 != pin1:
failures += 1
pinfail += 1
# Property errors must be counted for every cell
if 'properties' in cellrec:
properties = cellrec['properties']
failures += len(properties)
propfail += len(properties)
return [failures, netfail, devfail, pinfail, propfail, netdiff, devdiff]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parses netgen lvs')
parser.add_argument('--file', '-f', required=True)
args = parser.parse_args()
failures = count_LVS_failures(args.file)
total = failures[0]
if total > 0:
failed = True
print('LVS reports:')
print(' net count difference = ' + str(failures[5]))
print(' device count difference = ' + str(failures[6]))
print(' unmatched nets = ' + str(failures[1]))
print(' unmatched devices = ' + str(failures[2]))
print(' unmatched pins = ' + str(failures[3]))
print(' property failures = ' + str(failures[4]))
else:
print('LVS reports no net, device, pin, or property mismatches.')
print('')
print('Total errors = ' + str(total))
| 34.879699 | 87 | 0.547101 |
a7faf5c556bca16b4cc56eefd55a52e3871019a8 | 504 | py | Python | subsync/suboffset.py | hswmartin/subsync-1 | d9798a5d47e1a921c178b8922b7e30374b186e12 | [
"MIT"
] | 1 | 2020-03-29T09:24:24.000Z | 2020-03-29T09:24:24.000Z | subsync/suboffset.py | xiaopinggai-webrtc/subsync | 691c387c808bcf81a2afd4ca8b666df6346ff634 | [
"MIT"
] | null | null | null | subsync/suboffset.py | xiaopinggai-webrtc/subsync | 691c387c808bcf81a2afd4ca8b666df6346ff634 | [
"MIT"
] | 1 | 2019-09-14T17:56:00.000Z | 2019-09-14T17:56:00.000Z | #!/usr/bin/env python
import logging
import sys
from sklearn.pipeline import Pipeline
from .subtitle_parsers import SrtParser, SrtOffseter
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
td = float(sys.argv[3])
pipe = Pipeline([
('parse', SrtParser()),
('offset', SrtOffseter(td)),
])
pipe.fit_transform(sys.argv[1])
pipe.steps[-1][1].write_file(sys.argv[2])
return 0
if __name__ == "__main__":
sys.exit(main())
| 19.384615 | 52 | 0.668651 |
571a930edb82c9112daae9c08e64ae03eb47dff5 | 3,616 | py | Python | benchmarking/bench_format.py | troxel/TemplateRex-Python | 69982c34bcdff3787ce0681e22a3c47a1f40c79d | [
"MIT"
] | null | null | null | benchmarking/bench_format.py | troxel/TemplateRex-Python | 69982c34bcdff3787ce0681e22a3c47a1f40c79d | [
"MIT"
] | 1 | 2018-12-17T01:47:00.000Z | 2018-12-17T01:47:00.000Z | benchmarking/bench_format.py | troxel/TemplateRex-Python | 69982c34bcdff3787ce0681e22a3c47a1f40c79d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Benchmark against Jinja2 template engine to gauge the effect of
various coding changes.
"""
import sys
from os.path import join, dirname, abspath
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
from datetime import datetime
from timeit import Timer
import re
str_in = "{yo} {yo1} Ths is a test {name} here {that} and {more} and yet {morethis} double on {day} which is {beer} with {ing}"
#str_in = str_in + " " + str_in + " " + str_in + " " + str_in
contxt = {'name':"Joe",'that':"howdy",'more':"good stuff",'morethis':"yet more",'day':"Friday",'beer':"IPA",'ing':"hops",'yo':"",'yo1':""}
contxt_copy = contxt.copy()
contxt_default = {'name':"",'that':"",'more':"",'morethis':"",'yo':"",'day':"",'beer':"",'ing':"",'yo':"",'yo1':""}
key_lst = ['yo','yo1','name','that','more','morethis','yo','day','beer','ing','name1']
id_re = re.compile(r'{([^}]+)}', re.DOTALL)
def sub_var():
def process_capture(obj):
try: return str(contxt[obj.group(1)])
except: return ""
rtn = id_re.sub(process_capture, str_in)
return rtn
######################
# Really really slow
#import string
#class BlankFormatter(string.Formatter):
# def __init__(self, default=''):
# self.default=default
# def get_value(self, key, args, kwds):
# if isinstance(key, str):
# return kwds.get(key, self.default)
# else:
# Formatter.get_value(key, args, kwds)
#fmt=BlankFormatter()
#rtn = fmt.format(str_in,**contxt)
#########################
######################
# Not bad speed wise
# Ratio 2.6901708174699337 vs ~5
# doesn't work with python2
#class format_dict(dict):
# def __missing__(self, key):
# return "..."
#spec_dict = format_dict(contxt)
#rtn = str_in.format(**spec_dict)
######################
######################
# Ratio 2.207058711959082
# doesn't work with python2
#from collections import defaultdict
# def dfl():
# return ""
# d = defaultdict(dfl, contxt)
# rtn = str_in.format(**d)
######################
def fmt_var():
try: rtn = str_in.format(**contxt)
except:
#print('e')
#contxt_copy.update({ k:"" for k in key_lst if k not in contxt}) # ratio 2.1
#rtn = str_in.format(**contxt_copy)
# !Winner! ratio 2.7 works with python2
# Compared to ratio 4 when the try does not fail roughly 30% faster
contxt_default.update(contxt)
rtn = str_in.format(**contxt_default)
return rtn
print(sub_var())
print(fmt_var())
if __name__ == '__main__':
tm = {}
sys.stdout.write('Benchmark:\n')
for test in 'sub_var','fmt_var':
t = Timer(setup='from __main__ import %s as bench' % test,
stmt='bench()')
sys.stdout.write(' >> %-20s<running>' % test)
sys.stdout.flush()
tm[test] = t.timeit(number=1750)
sys.stdout.write('\r %-20s%.9f seconds\n' % (test, tm[test] / 1750))
print("Ratio",tm['sub_var']/tm['fmt_var'])
if '-p' in sys.argv:
print('Jinja profile')
p = Profile()
p.runcall(test_jinja)
stats = Stats(p)
stats.sort_stats('time', 'calls')
stats.print_stats()
print('trex profile')
p = Profile()
p.runcall(test_trex)
stats = Stats(p)
stats.sort_stats('time', 'calls')
stats.print_stats()
| 28.472441 | 138 | 0.550332 |
6c5fba03fb705aeec1385acf7d805b94a54c4f1c | 97,367 | py | Python | scripts/layer_chassis_generator.py | AndrewFobelAMD/Vulkan-ValidationLayers | 46093fc92ada4d876c13023bdece1a25829597e0 | [
"Apache-2.0"
] | null | null | null | scripts/layer_chassis_generator.py | AndrewFobelAMD/Vulkan-ValidationLayers | 46093fc92ada4d876c13023bdece1a25829597e0 | [
"Apache-2.0"
] | null | null | null | scripts/layer_chassis_generator.py | AndrewFobelAMD/Vulkan-ValidationLayers | 46093fc92ada4d876c13023bdece1a25829597e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2020 Valve Corporation
# Copyright (c) 2015-2020 LunarG, Inc.
# Copyright (c) 2015-2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tobin Ehlis <tobine@google.com>
# Author: Mark Lobodzinski <mark@lunarg.com>
#
# This script generates the dispatch portion of a factory layer which intercepts
# all Vulkan functions. The resultant factory layer allows rapid development of
# layers and interceptors.
import os,re,sys
from generator import *
from common_codegen import *
# LayerFactoryGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by LayerFactoryOutputGenerator objects during factory
# layer generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class LayerChassisGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
helper_file_type = '',
expandEnumerants = True):
GeneratorOptions.__init__(self, conventions, filename, directory, apiname, profile,
versions, emitversions, defaultExtensions,
addExtensions, removeExtensions, emitExtensions, sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
# LayerChassisOutputGenerator - subclass of OutputGenerator.
# Generates a LayerFactory layer that intercepts all API entrypoints
# This is intended to be used as a starting point for creating custom layers
#
# ---- methods ----
# LayerChassisOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class LayerChassisOutputGenerator(OutputGenerator):
"""Generate specified API interfaces in a specific style, such as a C header"""
# This is an ordered list of sections in the header file.
TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',
'group', 'bitmask', 'funcpointer', 'struct']
ALL_SECTIONS = TYPE_SECTIONS + ['command']
manual_functions = [
# Include functions here to be interecpted w/ manually implemented function bodies
'vkGetDeviceProcAddr',
'vkGetInstanceProcAddr',
'vkGetPhysicalDeviceProcAddr',
'vkCreateDevice',
'vkDestroyDevice',
'vkCreateInstance',
'vkDestroyInstance',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
# Functions that are handled explicitly due to chassis architecture violations
'vkCreateGraphicsPipelines',
'vkCreateComputePipelines',
'vkCreateRayTracingPipelinesNV',
'vkCreatePipelineLayout',
'vkCreateShaderModule',
'vkAllocateDescriptorSets',
'vkCreateBuffer',
# ValidationCache functions do not get dispatched
'vkCreateValidationCacheEXT',
'vkDestroyValidationCacheEXT',
'vkMergeValidationCachesEXT',
'vkGetValidationCacheDataEXT',
'vkGetPhysicalDeviceToolPropertiesEXT',
]
alt_ret_codes = [
# Include functions here which must tolerate VK_INCOMPLETE as a return code
'vkEnumeratePhysicalDevices',
'vkEnumeratePhysicalDeviceGroupsKHR',
'vkGetValidationCacheDataEXT',
'vkGetPipelineCacheData',
'vkGetShaderInfoAMD',
'vkGetPhysicalDeviceDisplayPropertiesKHR',
'vkGetPhysicalDeviceDisplayProperties2KHR',
'vkGetPhysicalDeviceDisplayPlanePropertiesKHR',
'vkGetDisplayPlaneSupportedDisplaysKHR',
'vkGetDisplayModePropertiesKHR',
'vkGetDisplayModeProperties2KHR',
'vkGetPhysicalDeviceSurfaceFormatsKHR',
'vkGetPhysicalDeviceSurfacePresentModesKHR',
'vkGetPhysicalDevicePresentRectanglesKHR',
'vkGetPastPresentationTimingGOOGLE',
'vkGetSwapchainImagesKHR',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceExtensionProperties',
'vkGetPhysicalDeviceCalibrateableTimeDomainsEXT',
]
pre_dispatch_debug_utils_functions = {
'vkDebugMarkerSetObjectNameEXT' : 'layer_data->report_data->DebugReportSetMarkerObjectName(pNameInfo);',
'vkSetDebugUtilsObjectNameEXT' : 'layer_data->report_data->DebugReportSetUtilsObjectName(pNameInfo);',
'vkQueueBeginDebugUtilsLabelEXT' : 'BeginQueueDebugUtilsLabel(layer_data->report_data, queue, pLabelInfo);',
'vkQueueInsertDebugUtilsLabelEXT' : 'InsertQueueDebugUtilsLabel(layer_data->report_data, queue, pLabelInfo);',
}
post_dispatch_debug_utils_functions = {
'vkQueueEndDebugUtilsLabelEXT' : 'EndQueueDebugUtilsLabel(layer_data->report_data, queue);',
'vkCreateDebugReportCallbackEXT' : 'layer_create_report_callback(layer_data->report_data, false, pCreateInfo, pAllocator, pCallback);',
'vkDestroyDebugReportCallbackEXT' : 'layer_destroy_callback(layer_data->report_data, callback, pAllocator);',
'vkCreateDebugUtilsMessengerEXT' : 'layer_create_messenger_callback(layer_data->report_data, false, pCreateInfo, pAllocator, pMessenger);',
'vkDestroyDebugUtilsMessengerEXT' : 'layer_destroy_callback(layer_data->report_data, messenger, pAllocator);',
}
precallvalidate_loop = "for (auto intercept : layer_data->object_dispatch) {"
precallrecord_loop = precallvalidate_loop
postcallrecord_loop = "for (auto intercept : layer_data->object_dispatch) {"
inline_custom_header_preamble = """
#define NOMINMAX
#include <atomic>
#include <mutex>
#include <cinttypes>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unordered_map>
#include <unordered_set>
#include <algorithm>
#include <memory>
#include "vk_loader_platform.h"
#include "vulkan/vulkan.h"
#include "vk_layer_config.h"
#include "vk_layer_data.h"
#include "vk_layer_logging.h"
#include "vk_object_types.h"
#include "vulkan/vk_layer.h"
#include "vk_enum_string_helper.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
#include "vulkan/vk_layer.h"
#include "vk_dispatch_table_helper.h"
#include "vk_extension_helper.h"
#include "vk_safe_struct.h"
#include "vk_typemap_helper.h"
extern std::atomic<uint64_t> global_unique_id;
// To avoid re-hashing unique ids on each use, we precompute the hash and store the
// hash's LSBs in the high 24 bits.
struct HashedUint64 {
static const int HASHED_UINT64_SHIFT = 40;
size_t operator()(const uint64_t &t) const { return t >> HASHED_UINT64_SHIFT; }
static uint64_t hash(uint64_t id) {
uint64_t h = (uint64_t)std::hash<uint64_t>()(id);
id |= h << HASHED_UINT64_SHIFT;
return id;
}
};
extern vl_concurrent_unordered_map<uint64_t, uint64_t, 4, HashedUint64> unique_id_mapping;
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(
VkInstance instance,
const char* funcName);
"""
inline_custom_header_class_definition = """
// Layer object type identifiers
enum LayerObjectTypeId {
LayerObjectTypeInstance, // Container for an instance dispatch object
LayerObjectTypeDevice, // Container for a device dispatch object
LayerObjectTypeThreading, // Instance or device threading layer object
LayerObjectTypeParameterValidation, // Instance or device parameter validation layer object
LayerObjectTypeObjectTracker, // Instance or device object tracker layer object
LayerObjectTypeCoreValidation, // Instance or device core validation layer object
LayerObjectTypeBestPractices, // Instance or device best practices layer object
LayerObjectTypeGpuAssisted, // Instance or device gpu assisted validation layer object
LayerObjectTypeMaxEnum, // Max enum count
};
struct TEMPLATE_STATE {
VkDescriptorUpdateTemplateKHR desc_update_template;
safe_VkDescriptorUpdateTemplateCreateInfo create_info;
bool destroyed;
TEMPLATE_STATE(VkDescriptorUpdateTemplateKHR update_template, safe_VkDescriptorUpdateTemplateCreateInfo *pCreateInfo)
: desc_update_template(update_template), create_info(*pCreateInfo), destroyed(false) {}
};
class LAYER_PHYS_DEV_PROPERTIES {
public:
VkPhysicalDeviceProperties properties;
std::vector<VkQueueFamilyProperties> queue_family_properties;
};
typedef enum ValidationCheckDisables {
VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE,
VALIDATION_CHECK_DISABLE_OBJECT_IN_USE,
VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET,
VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE,
VALIDATION_CHECK_DISABLE_QUERY_VALIDATION,
VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION,
} ValidationCheckDisables;
// CHECK_DISABLED struct is a container for bools that can block validation checks from being performed.
// These bools are all "false" by default meaning that all checks are enabled. Enum values can be specified
// via the vk_layer_setting.txt config file or at CreateInstance time via the VK_EXT_validation_features extension
// that can selectively disable checks.
struct CHECK_DISABLED {
bool command_buffer_state; // Skip command buffer state validation
bool object_in_use; // Skip all object in_use checking
bool idle_descriptor_set; // Skip check to verify that descriptor set is not in-use
bool push_constant_range; // Skip push constant range checks
bool query_validation; // Disable all core validation query-related checks
bool image_layout_validation; // Disable image layout validation
bool object_tracking; // Disable object lifetime validation
bool core_checks; // Disable core validation checks
bool thread_safety; // Disable thread safety validation
bool stateless_checks; // Disable stateless validation checks
bool handle_wrapping; // Disable unique handles/handle wrapping
bool shader_validation; // Skip validation for shaders
void SetAll(bool value) { std::fill(&command_buffer_state, &shader_validation + 1, value); }
};
struct CHECK_ENABLED {
bool gpu_validation;
bool gpu_validation_reserve_binding_slot;
bool best_practices;
void SetAll(bool value) { std::fill(&gpu_validation, &gpu_validation_reserve_binding_slot + 1, value); }
};
// Layer chassis validation object base class definition
class ValidationObject {
public:
uint32_t api_version;
debug_report_data* report_data = nullptr;
VkLayerInstanceDispatchTable instance_dispatch_table;
VkLayerDispatchTable device_dispatch_table;
InstanceExtensions instance_extensions;
DeviceExtensions device_extensions = {};
CHECK_DISABLED disabled = {};
CHECK_ENABLED enabled = {};
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
VkDevice device = VK_NULL_HANDLE;
LAYER_PHYS_DEV_PROPERTIES phys_dev_properties = {};
std::vector<ValidationObject*> object_dispatch;
LayerObjectTypeId container_type;
std::string layer_name = "CHASSIS";
// Constructor
ValidationObject(){};
// Destructor
virtual ~ValidationObject() {};
ReadWriteLock validation_object_mutex;
virtual read_lock_guard_t read_lock() {
return read_lock_guard_t(validation_object_mutex);
}
virtual write_lock_guard_t write_lock() {
return write_lock_guard_t(validation_object_mutex);
}
ValidationObject* GetValidationObject(std::vector<ValidationObject*>& object_dispatch, LayerObjectTypeId object_type) {
for (auto validation_object : object_dispatch) {
if (validation_object->container_type == object_type) {
return validation_object;
}
}
return nullptr;
};
// Debug Logging Templates
template <typename HANDLE_T>
bool LogError(HANDLE_T src_object, const std::string &vuid_text, const char *format, ...) const {
std::unique_lock<std::mutex> lock(report_data->debug_output_mutex);
// Avoid logging cost if msg is to be ignored
if (!(report_data->active_severities & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) ||
!(report_data->active_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT)) {
return false;
}
va_list argptr;
va_start(argptr, format);
char *str;
if (-1 == vasprintf(&str, format, argptr)) {
str = nullptr;
}
va_end(argptr);
return LogMsgLocked(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkHandleInfo<HANDLE_T>::kDebugReportObjectType,
HandleToUint64(src_object), vuid_text, str);
};
template <typename HANDLE_T>
bool LogWarning(HANDLE_T src_object, const std::string &vuid_text, const char *format, ...) const {
std::unique_lock<std::mutex> lock(report_data->debug_output_mutex);
// Avoid logging cost if msg is to be ignored
if (!(report_data->active_severities & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) ||
!(report_data->active_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT)) {
return false;
}
va_list argptr;
va_start(argptr, format);
char *str;
if (-1 == vasprintf(&str, format, argptr)) {
str = nullptr;
}
va_end(argptr);
return LogMsgLocked(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkHandleInfo<HANDLE_T>::kDebugReportObjectType,
HandleToUint64(src_object), vuid_text, str);
};
template <typename HANDLE_T>
bool LogPerformanceWarning(HANDLE_T src_object, const std::string &vuid_text, const char *format, ...) const {
std::unique_lock<std::mutex> lock(report_data->debug_output_mutex);
// Avoid logging cost if msg is to be ignored
if (!(report_data->active_severities & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) ||
!(report_data->active_types & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT)) {
return false;
}
va_list argptr;
va_start(argptr, format);
char *str;
if (-1 == vasprintf(&str, format, argptr)) {
str = nullptr;
}
va_end(argptr);
return LogMsgLocked(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkHandleInfo<HANDLE_T>::kDebugReportObjectType,
HandleToUint64(src_object), vuid_text, str);
};
template <typename HANDLE_T>
bool LogInfo(HANDLE_T src_object, const std::string &vuid_text, const char *format, ...) const {
std::unique_lock<std::mutex> lock(report_data->debug_output_mutex);
// Avoid logging cost if msg is to be ignored
if (!(report_data->active_severities & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) ||
!(report_data->active_types & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT)) {
return false;
}
va_list argptr;
va_start(argptr, format);
char *str;
if (-1 == vasprintf(&str, format, argptr)) {
str = nullptr;
}
va_end(argptr);
return LogMsgLocked(report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VkHandleInfo<HANDLE_T>::kDebugReportObjectType,
HandleToUint64(src_object), vuid_text, str);
};
// Handle Wrapping Data
// Reverse map display handles
vl_concurrent_unordered_map<VkDisplayKHR, uint64_t, 0> display_id_reverse_mapping;
// Wrapping Descriptor Template Update structures requires access to the template createinfo structs
std::unordered_map<uint64_t, std::unique_ptr<TEMPLATE_STATE>> desc_template_createinfo_map;
struct SubpassesUsageStates {
std::unordered_set<uint32_t> subpasses_using_color_attachment;
std::unordered_set<uint32_t> subpasses_using_depthstencil_attachment;
};
// Uses unwrapped handles
std::unordered_map<VkRenderPass, SubpassesUsageStates> renderpasses_states;
// Map of wrapped swapchain handles to arrays of wrapped swapchain image IDs
// Each swapchain has an immutable list of wrapped swapchain image IDs -- always return these IDs if they exist
std::unordered_map<VkSwapchainKHR, std::vector<VkImage>> swapchain_wrapped_image_handle_map;
// Map of wrapped descriptor pools to set of wrapped descriptor sets allocated from each pool
std::unordered_map<VkDescriptorPool, std::unordered_set<VkDescriptorSet>> pool_descriptor_sets_map;
// Unwrap a handle.
template <typename HandleType>
HandleType Unwrap(HandleType wrappedHandle) {
auto iter = unique_id_mapping.find(reinterpret_cast<uint64_t const &>(wrappedHandle));
if (iter == unique_id_mapping.end())
return (HandleType)0;
return (HandleType)iter->second;
}
// Wrap a newly created handle with a new unique ID, and return the new ID.
template <typename HandleType>
HandleType WrapNew(HandleType newlyCreatedHandle) {
auto unique_id = global_unique_id++;
unique_id = HashedUint64::hash(unique_id);
unique_id_mapping.insert_or_assign(unique_id, reinterpret_cast<uint64_t const &>(newlyCreatedHandle));
return (HandleType)unique_id;
}
// Specialized handling for VkDisplayKHR. Adds an entry to enable reverse-lookup.
VkDisplayKHR WrapDisplay(VkDisplayKHR newlyCreatedHandle, ValidationObject *map_data) {
auto unique_id = global_unique_id++;
unique_id = HashedUint64::hash(unique_id);
unique_id_mapping.insert_or_assign(unique_id, reinterpret_cast<uint64_t const &>(newlyCreatedHandle));
map_data->display_id_reverse_mapping.insert_or_assign(newlyCreatedHandle, unique_id);
return (VkDisplayKHR)unique_id;
}
// VkDisplayKHR objects don't have a single point of creation, so we need to see if one already exists in the map before
// creating another.
VkDisplayKHR MaybeWrapDisplay(VkDisplayKHR handle, ValidationObject *map_data) {
// See if this display is already known
auto it = map_data->display_id_reverse_mapping.find(handle);
if (it != map_data->display_id_reverse_mapping.end()) return (VkDisplayKHR)it->second;
// Unknown, so wrap
return WrapDisplay(handle, map_data);
}
// Pre/post hook point declarations
"""
inline_copyright_message = """
// This file is ***GENERATED***. Do Not Edit.
// See layer_chassis_generator.py for modifications.
/* Copyright (c) 2015-2020 The Khronos Group Inc.
* Copyright (c) 2015-2020 Valve Corporation
* Copyright (c) 2015-2020 LunarG, Inc.
* Copyright (c) 2015-2020 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
*/"""
inline_custom_source_preamble = """
#include <string.h>
#include <mutex>
#define VALIDATION_ERROR_MAP_IMPL
#include "chassis.h"
#include "layer_chassis_dispatch.h"
small_unordered_map<void*, ValidationObject*, 2> layer_data_map;
// Global unique object identifier.
std::atomic<uint64_t> global_unique_id(1ULL);
// Map uniqueID to actual object handle. Accesses to the map itself are
// internally synchronized.
vl_concurrent_unordered_map<uint64_t, uint64_t, 4, HashedUint64> unique_id_mapping;
bool wrap_handles = true;
#define OBJECT_LAYER_NAME "VK_LAYER_KHRONOS_validation"
#define OBJECT_LAYER_DESCRIPTION "khronos_validation"
// Include layer validation object definitions
#include "best_practices.h"
#include "core_validation.h"
#include "command_counter.h"
#include "gpu_validation.h"
#include "object_lifetime_validation.h"
#include "stateless_validation.h"
#include "thread_safety.h"
namespace vulkan_layer_chassis {
using std::unordered_map;
static const VkLayerProperties global_layer = {
OBJECT_LAYER_NAME, VK_LAYER_API_VERSION, 1, "LunarG validation Layer",
};
static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION},
{VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}};
static const VkExtensionProperties device_extensions[] = {
{VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
{VK_EXT_DEBUG_MARKER_EXTENSION_NAME, VK_EXT_DEBUG_MARKER_SPEC_VERSION},
{VK_EXT_TOOLING_INFO_EXTENSION_NAME, VK_EXT_TOOLING_INFO_SPEC_VERSION}
};
typedef struct {
bool is_instance_api;
void* funcptr;
} function_data;
extern const std::unordered_map<std::string, function_data> name_to_funcptr_map;
// Manually written functions
// Check enabled instance extensions against supported instance extension whitelist
static void InstanceExtensionWhitelist(ValidationObject *layer_data, const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
// Check for recognized instance extensions
if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) {
log_msg(layer_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUIDUndefined,
"Instance Extension %s is not supported by this layer. Using this extension may adversely affect validation "
"results and/or produce undefined behavior.",
pCreateInfo->ppEnabledExtensionNames[i]);
}
}
}
// Check enabled device extensions against supported device extension whitelist
static void DeviceExtensionWhitelist(ValidationObject *layer_data, const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
// Check for recognized device extensions
if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) {
log_msg(layer_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUIDUndefined,
"Device Extension %s is not supported by this layer. Using this extension may adversely affect validation "
"results and/or produce undefined behavior.",
pCreateInfo->ppEnabledExtensionNames[i]);
}
}
}
// Process validation features, flags and settings specified through extensions, a layer settings file, or environment variables
static const std::unordered_map<std::string, VkValidationFeatureDisableEXT> VkValFeatureDisableLookup = {
{"VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT", VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT", VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT", VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT", VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT", VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT", VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT},
{"VK_VALIDATION_FEATURE_DISABLE_ALL_EXT", VK_VALIDATION_FEATURE_DISABLE_ALL_EXT},
};
static const std::unordered_map<std::string, VkValidationFeatureEnableEXT> VkValFeatureEnableLookup = {
{"VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT", VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT},
{"VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT", VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT},
{"VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT", VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT},
};
static const std::unordered_map<std::string, ValidationCheckDisables> ValidationDisableLookup = {
{"VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE", VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE},
{"VALIDATION_CHECK_DISABLE_OBJECT_IN_USE", VALIDATION_CHECK_DISABLE_OBJECT_IN_USE},
{"VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET", VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET},
{"VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE", VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE},
{"VALIDATION_CHECK_DISABLE_QUERY_VALIDATION", VALIDATION_CHECK_DISABLE_QUERY_VALIDATION},
{"VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION", VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION},
};
// Set the local disable flag for the appropriate VALIDATION_CHECK_DISABLE enum
void SetValidationDisable(CHECK_DISABLED* disable_data, const ValidationCheckDisables disable_id) {
switch (disable_id) {
case VALIDATION_CHECK_DISABLE_COMMAND_BUFFER_STATE:
disable_data->command_buffer_state = true;
break;
case VALIDATION_CHECK_DISABLE_OBJECT_IN_USE:
disable_data->object_in_use = true;
break;
case VALIDATION_CHECK_DISABLE_IDLE_DESCRIPTOR_SET:
disable_data->idle_descriptor_set = true;
break;
case VALIDATION_CHECK_DISABLE_PUSH_CONSTANT_RANGE:
disable_data->push_constant_range = true;
break;
case VALIDATION_CHECK_DISABLE_QUERY_VALIDATION:
disable_data->query_validation = true;
break;
case VALIDATION_CHECK_DISABLE_IMAGE_LAYOUT_VALIDATION:
disable_data->image_layout_validation = true;
break;
default:
assert(true);
}
}
// Set the local disable flag for a single VK_VALIDATION_FEATURE_DISABLE_* flag
void SetValidationFeatureDisable(CHECK_DISABLED* disable_data, const VkValidationFeatureDisableEXT feature_disable) {
switch (feature_disable) {
case VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT:
disable_data->shader_validation = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT:
disable_data->thread_safety = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT:
disable_data->stateless_checks = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT:
disable_data->object_tracking = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT:
disable_data->core_checks = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT:
disable_data->handle_wrapping = true;
break;
case VK_VALIDATION_FEATURE_DISABLE_ALL_EXT:
// Set all disabled flags to true
disable_data->SetAll(true);
break;
default:
break;
}
}
// Set the local enable flag for a single VK_VALIDATION_FEATURE_ENABLE_* flag
void SetValidationFeatureEnable(CHECK_ENABLED *enable_data, const VkValidationFeatureEnableEXT feature_enable) {
switch (feature_enable) {
case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT:
enable_data->gpu_validation = true;
break;
case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT:
enable_data->gpu_validation_reserve_binding_slot = true;
break;
case VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT:
enable_data->best_practices = true;
break;
default:
break;
}
}
// Set the local disable flag for settings specified through the VK_EXT_validation_flags extension
void SetValidationFlags(CHECK_DISABLED* disables, const VkValidationFlagsEXT* val_flags_struct) {
for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
switch (val_flags_struct->pDisabledValidationChecks[i]) {
case VK_VALIDATION_CHECK_SHADERS_EXT:
disables->shader_validation = true;
break;
case VK_VALIDATION_CHECK_ALL_EXT:
// Set all disabled flags to true
disables->SetAll(true);
break;
default:
break;
}
}
}
// Process Validation Features flags specified through the ValidationFeature extension
void SetValidationFeatures(CHECK_DISABLED *disable_data, CHECK_ENABLED *enable_data,
const VkValidationFeaturesEXT *val_features_struct) {
for (uint32_t i = 0; i < val_features_struct->disabledValidationFeatureCount; ++i) {
SetValidationFeatureDisable(disable_data, val_features_struct->pDisabledValidationFeatures[i]);
}
for (uint32_t i = 0; i < val_features_struct->enabledValidationFeatureCount; ++i) {
SetValidationFeatureEnable(enable_data, val_features_struct->pEnabledValidationFeatures[i]);
}
}
// Given a string representation of a list of enable enum values, call the appropriate setter function
void SetLocalEnableSetting(std::string list_of_enables, std::string delimiter, CHECK_ENABLED* enables) {
size_t pos = 0;
std::string token;
while (list_of_enables.length() != 0) {
pos = list_of_enables.find(delimiter);
if (pos != std::string::npos) {
token = list_of_enables.substr(0, pos);
} else {
pos = list_of_enables.length() - delimiter.length();
token = list_of_enables;
}
if (token.find("VK_VALIDATION_FEATURE_ENABLE_") != std::string::npos) {
auto result = VkValFeatureEnableLookup.find(token);
if (result != VkValFeatureEnableLookup.end()) {
SetValidationFeatureEnable(enables, result->second);
}
}
list_of_enables.erase(0, pos + delimiter.length());
}
}
// Given a string representation of a list of disable enum values, call the appropriate setter function
void SetLocalDisableSetting(std::string list_of_disables, std::string delimiter, CHECK_DISABLED* disables) {
size_t pos = 0;
std::string token;
while (list_of_disables.length() != 0) {
pos = list_of_disables.find(delimiter);
if (pos != std::string::npos) {
token = list_of_disables.substr(0, pos);
} else {
pos = list_of_disables.length() - delimiter.length();
token = list_of_disables;
}
if (token.find("VK_VALIDATION_FEATURE_DISABLE_") != std::string::npos) {
auto result = VkValFeatureDisableLookup.find(token);
if (result != VkValFeatureDisableLookup.end()) {
SetValidationFeatureDisable(disables, result->second);
}
}
if (token.find("VALIDATION_CHECK_DISABLE_") != std::string::npos) {
auto result = ValidationDisableLookup.find(token);
if (result != ValidationDisableLookup.end()) {
SetValidationDisable(disables, result->second);
}
}
list_of_disables.erase(0, pos + delimiter.length());
}
}
// Process enables and disables set though the vk_layer_settings.txt config file or through an environment variable
void ProcessConfigAndEnvSettings(const char* layer_description, CHECK_ENABLED* enables, CHECK_DISABLED* disables) {
std::string enable_key = layer_description;
std::string disable_key = layer_description;
enable_key.append(".enables");
disable_key.append(".disables");
std::string list_of_config_enables = getLayerOption(enable_key.c_str());
std::string list_of_env_enables = GetLayerEnvVar("VK_LAYER_ENABLES");
std::string list_of_config_disables = getLayerOption(disable_key.c_str());
std::string list_of_env_disables = GetLayerEnvVar("VK_LAYER_DISABLES");
#if defined(_WIN32)
std::string env_delimiter = ";";
#else
std::string env_delimiter = ":";
#endif
SetLocalEnableSetting(list_of_config_enables, ",", enables);
SetLocalEnableSetting(list_of_env_enables, env_delimiter, enables);
SetLocalDisableSetting(list_of_config_disables, ",", disables);
SetLocalDisableSetting(list_of_env_disables, env_delimiter, disables);
}
// Non-code-generated chassis API functions
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ApiParentExtensionEnabled(funcName, &layer_data->device_extensions)) {
return nullptr;
}
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
if (item->second.is_instance_api) {
return nullptr;
} else {
return reinterpret_cast<PFN_vkVoidFunction>(item->second.funcptr);
}
}
auto &table = layer_data->device_dispatch_table;
if (!table.GetDeviceProcAddr) return nullptr;
return table.GetDeviceProcAddr(device, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second.funcptr);
}
auto layer_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
auto &table = layer_data->instance_dispatch_table;
if (!table.GetInstanceProcAddr) return nullptr;
return table.GetInstanceProcAddr(instance, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second.funcptr);
}
auto layer_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
auto &table = layer_data->instance_dispatch_table;
if (!table.GetPhysicalDeviceProcAddr) return nullptr;
return table.GetPhysicalDeviceProcAddr(instance, funcName);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(ARRAY_SIZE(instance_extensions), instance_extensions, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(ARRAY_SIZE(device_extensions), device_extensions, pCount, pProperties);
assert(physicalDevice);
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
return layer_data->instance_dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, pLayerName, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance) {
VkLayerInstanceCreateInfo* chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
uint32_t specified_version = (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0);
uint32_t api_version;
if (specified_version < VK_API_VERSION_1_1)
api_version = VK_API_VERSION_1_0;
else if (specified_version < VK_API_VERSION_1_2)
api_version = VK_API_VERSION_1_1;
else
api_version = VK_API_VERSION_1_2;
auto report_data = new debug_report_data{};
report_data->instance_pnext_chain = SafePnextCopy(pCreateInfo->pNext);
ActivateInstanceDebugCallbacks(report_data);
CHECK_ENABLED local_enables {};
CHECK_DISABLED local_disables {};
const auto *validation_features_ext = lvl_find_in_chain<VkValidationFeaturesEXT>(pCreateInfo->pNext);
if (validation_features_ext) {
SetValidationFeatures(&local_disables, &local_enables, validation_features_ext);
}
const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
if (validation_flags_ext) {
SetValidationFlags(&local_disables, validation_flags_ext);
}
ProcessConfigAndEnvSettings(OBJECT_LAYER_DESCRIPTION, &local_enables, &local_disables);
// Create temporary dispatch vector for pre-calls until instance is created
std::vector<ValidationObject*> local_object_dispatch;
// Add VOs to dispatch vector. Order here will be the validation dispatch order!
auto thread_checker = new ThreadSafety(nullptr);
if (!local_disables.thread_safety) {
local_object_dispatch.emplace_back(thread_checker);
}
thread_checker->container_type = LayerObjectTypeThreading;
thread_checker->api_version = api_version;
thread_checker->report_data = report_data;
auto parameter_validation = new StatelessValidation;
if (!local_disables.stateless_checks) {
local_object_dispatch.emplace_back(parameter_validation);
}
parameter_validation->container_type = LayerObjectTypeParameterValidation;
parameter_validation->api_version = api_version;
parameter_validation->report_data = report_data;
auto object_tracker = new ObjectLifetimes;
if (!local_disables.object_tracking) {
local_object_dispatch.emplace_back(object_tracker);
}
object_tracker->container_type = LayerObjectTypeObjectTracker;
object_tracker->api_version = api_version;
object_tracker->report_data = report_data;
auto core_checks = new CoreChecks;
if (!local_disables.core_checks) {
local_object_dispatch.emplace_back(core_checks);
}
core_checks->container_type = LayerObjectTypeCoreValidation;
core_checks->api_version = api_version;
core_checks->report_data = report_data;
auto best_practices = new BestPractices;
if (local_enables.best_practices) {
local_object_dispatch.emplace_back(best_practices);
}
best_practices->container_type = LayerObjectTypeBestPractices;
best_practices->api_version = api_version;
best_practices->report_data = report_data;
auto gpu_assisted = new GpuAssisted;
if (local_enables.gpu_validation) {
local_object_dispatch.emplace_back(gpu_assisted);
}
gpu_assisted->container_type = LayerObjectTypeGpuAssisted;
gpu_assisted->api_version = api_version;
gpu_assisted->report_data = report_data;
// If handle wrapping is disabled via the ValidationFeatures extension, override build flag
if (local_disables.handle_wrapping) {
wrap_handles = false;
}
// Init dispatch array and call registration functions
for (auto intercept : local_object_dispatch) {
(const_cast<const ValidationObject*>(intercept))->PreCallValidateCreateInstance(pCreateInfo, pAllocator, pInstance);
}
for (auto intercept : local_object_dispatch) {
intercept->PreCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance);
}
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
if (result != VK_SUCCESS) return result;
auto framework = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
framework->object_dispatch = local_object_dispatch;
framework->container_type = LayerObjectTypeInstance;
framework->disabled = local_disables;
framework->enabled = local_enables;
framework->instance = *pInstance;
layer_init_instance_dispatch_table(*pInstance, &framework->instance_dispatch_table, fpGetInstanceProcAddr);
framework->report_data = report_data;
framework->api_version = api_version;
framework->instance_extensions.InitFromInstanceCreateInfo(specified_version, pCreateInfo);
layer_debug_messenger_actions(framework->report_data, pAllocator, OBJECT_LAYER_DESCRIPTION);
object_tracker->instance_dispatch_table = framework->instance_dispatch_table;
object_tracker->enabled = framework->enabled;
object_tracker->disabled = framework->disabled;
thread_checker->instance_dispatch_table = framework->instance_dispatch_table;
thread_checker->enabled = framework->enabled;
thread_checker->disabled = framework->disabled;
parameter_validation->instance_dispatch_table = framework->instance_dispatch_table;
parameter_validation->enabled = framework->enabled;
parameter_validation->disabled = framework->disabled;
core_checks->instance_dispatch_table = framework->instance_dispatch_table;
core_checks->instance = *pInstance;
core_checks->enabled = framework->enabled;
core_checks->disabled = framework->disabled;
core_checks->instance_state = core_checks;
best_practices->instance_dispatch_table = framework->instance_dispatch_table;
best_practices->enabled = framework->enabled;
best_practices->disabled = framework->disabled;
gpu_assisted->instance_dispatch_table = framework->instance_dispatch_table;
gpu_assisted->enabled = framework->enabled;
gpu_assisted->disabled = framework->disabled;
for (auto intercept : framework->object_dispatch) {
intercept->PostCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance, result);
}
InstanceExtensionWhitelist(framework, pCreateInfo, *pInstance);
DeactivateInstanceDebugCallbacks(report_data);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(instance);
auto layer_data = GetLayerDataPtr(key, layer_data_map);
ActivateInstanceDebugCallbacks(layer_data->report_data);
""" + precallvalidate_loop + """
auto lock = intercept->read_lock();
(const_cast<const ValidationObject*>(intercept))->PreCallValidateDestroyInstance(instance, pAllocator);
}
""" + precallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PreCallRecordDestroyInstance(instance, pAllocator);
}
layer_data->instance_dispatch_table.DestroyInstance(instance, pAllocator);
""" + postcallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PostCallRecordDestroyInstance(instance, pAllocator);
}
DeactivateInstanceDebugCallbacks(layer_data->report_data);
FreePnextChain(layer_data->report_data->instance_pnext_chain);
layer_debug_utils_destroy_instance(layer_data->report_data);
for (auto item = layer_data->object_dispatch.begin(); item != layer_data->object_dispatch.end(); item++) {
delete *item;
}
FreeLayerDataPtr(key, layer_data_map);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
auto instance_interceptor = GetLayerDataPtr(get_dispatch_key(gpu), layer_data_map);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_interceptor->instance, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
// Get physical device limits for device
VkPhysicalDeviceProperties device_properties = {};
instance_interceptor->instance_dispatch_table.GetPhysicalDeviceProperties(gpu, &device_properties);
// Setup the validation tables based on the application API version from the instance and the capabilities of the device driver
uint32_t effective_api_version = std::min(device_properties.apiVersion, instance_interceptor->api_version);
DeviceExtensions device_extensions = {};
device_extensions.InitFromDeviceCreateInfo(&instance_interceptor->instance_extensions, effective_api_version, pCreateInfo);
for (auto item : instance_interceptor->object_dispatch) {
item->device_extensions = device_extensions;
}
safe_VkDeviceCreateInfo modified_create_info(pCreateInfo);
bool skip = false;
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, &modified_create_info);
}
VkResult result = fpCreateDevice(gpu, reinterpret_cast<VkDeviceCreateInfo *>(&modified_create_info), pAllocator, pDevice);
if (result != VK_SUCCESS) {
return result;
}
auto device_interceptor = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
device_interceptor->container_type = LayerObjectTypeDevice;
// Save local info in device object
device_interceptor->phys_dev_properties.properties = device_properties;
device_interceptor->api_version = device_interceptor->device_extensions.InitFromDeviceCreateInfo(
&instance_interceptor->instance_extensions, effective_api_version, pCreateInfo);
device_interceptor->device_extensions = device_extensions;
layer_init_device_dispatch_table(*pDevice, &device_interceptor->device_dispatch_table, fpGetDeviceProcAddr);
device_interceptor->device = *pDevice;
device_interceptor->physical_device = gpu;
device_interceptor->instance = instance_interceptor->instance;
device_interceptor->report_data = instance_interceptor->report_data;
// Note that this defines the order in which the layer validation objects are called
auto thread_safety = new ThreadSafety(reinterpret_cast<ThreadSafety *>(instance_interceptor->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeThreading)));
thread_safety->container_type = LayerObjectTypeThreading;
if (!instance_interceptor->disabled.thread_safety) {
device_interceptor->object_dispatch.emplace_back(thread_safety);
}
auto stateless_validation = new StatelessValidation;
stateless_validation->container_type = LayerObjectTypeParameterValidation;
if (!instance_interceptor->disabled.stateless_checks) {
device_interceptor->object_dispatch.emplace_back(stateless_validation);
}
auto object_tracker = new ObjectLifetimes;
object_tracker->container_type = LayerObjectTypeObjectTracker;
if (!instance_interceptor->disabled.object_tracking) {
device_interceptor->object_dispatch.emplace_back(object_tracker);
}
auto core_checks = new CoreChecks;
core_checks->container_type = LayerObjectTypeCoreValidation;
core_checks->instance_state = reinterpret_cast<CoreChecks *>(
core_checks->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeCoreValidation));
if (!instance_interceptor->disabled.core_checks) {
// Only enable the command counters when needed.
if (device_extensions.vk_khr_performance_query) {
auto command_counter = new CommandCounter(core_checks);
command_counter->container_type = LayerObjectTypeDevice;
device_interceptor->object_dispatch.emplace_back(command_counter);
}
device_interceptor->object_dispatch.emplace_back(core_checks);
}
auto best_practices = new BestPractices;
best_practices->container_type = LayerObjectTypeBestPractices;
best_practices->instance_state = reinterpret_cast<BestPractices *>(
best_practices->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeBestPractices));
if (instance_interceptor->enabled.best_practices) {
device_interceptor->object_dispatch.emplace_back(best_practices);
}
auto gpu_assisted = new GpuAssisted;
gpu_assisted->container_type = LayerObjectTypeGpuAssisted;
gpu_assisted->instance_state = reinterpret_cast<GpuAssisted *>(
gpu_assisted->GetValidationObject(instance_interceptor->object_dispatch, LayerObjectTypeGpuAssisted));
if (instance_interceptor->enabled.gpu_validation) {
device_interceptor->object_dispatch.emplace_back(gpu_assisted);
}
// Set per-intercept common data items
for (auto dev_intercept : device_interceptor->object_dispatch) {
dev_intercept->device = *pDevice;
dev_intercept->physical_device = gpu;
dev_intercept->instance = instance_interceptor->instance;
dev_intercept->report_data = device_interceptor->report_data;
dev_intercept->device_dispatch_table = device_interceptor->device_dispatch_table;
dev_intercept->api_version = device_interceptor->api_version;
dev_intercept->disabled = instance_interceptor->disabled;
dev_intercept->enabled = instance_interceptor->enabled;
dev_intercept->instance_dispatch_table = instance_interceptor->instance_dispatch_table;
dev_intercept->instance_extensions = instance_interceptor->instance_extensions;
dev_intercept->device_extensions = device_interceptor->device_extensions;
}
for (auto intercept : instance_interceptor->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
}
DeviceExtensionWhitelist(device_interceptor, pCreateInfo, *pDevice);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
auto layer_data = GetLayerDataPtr(key, layer_data_map);
""" + precallvalidate_loop + """
auto lock = intercept->read_lock();
(const_cast<const ValidationObject*>(intercept))->PreCallValidateDestroyDevice(device, pAllocator);
}
""" + precallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PreCallRecordDestroyDevice(device, pAllocator);
}
layer_data->device_dispatch_table.DestroyDevice(device, pAllocator);
""" + postcallrecord_loop + """
auto lock = intercept->write_lock();
intercept->PostCallRecordDestroyDevice(device, pAllocator);
}
for (auto item = layer_data->object_dispatch.begin(); item != layer_data->object_dispatch.end(); item++) {
delete *item;
}
FreeLayerDataPtr(key, layer_data_map);
}
// Special-case APIs for which core_validation needs custom parameter lists and/or modifies parameters
VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkGraphicsPipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_graphics_pipeline_api_state cgpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
cgpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(cgpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(cgpl_state[intercept->container_type]));
}
auto usepCreateInfos = (!cgpl_state[LayerObjectTypeGpuAssisted].pCreateInfos) ? pCreateInfos : cgpl_state[LayerObjectTypeGpuAssisted].pCreateInfos;
VkResult result = DispatchCreateGraphicsPipelines(device, pipelineCache, createInfoCount, usepCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result, &(cgpl_state[intercept->container_type]));
}
return result;
}
// This API saves some core_validation pipeline state state on the stack for performance purposes
VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkComputePipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_compute_pipeline_api_state ccpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
ccpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(ccpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &(ccpl_state[intercept->container_type]));
}
auto usepCreateInfos = (!ccpl_state[LayerObjectTypeGpuAssisted].pCreateInfos) ? pCreateInfos : ccpl_state[LayerObjectTypeGpuAssisted].pCreateInfos;
VkResult result = DispatchCreateComputePipelines(device, pipelineCache, createInfoCount, usepCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result, &(ccpl_state[intercept->container_type]));
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesNV(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_ray_tracing_pipeline_api_state crtpl_state[LayerObjectTypeMaxEnum]{};
for (auto intercept : layer_data->object_dispatch) {
crtpl_state[intercept->container_type].pCreateInfos = pCreateInfos;
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos,
pAllocator, pPipelines, &(crtpl_state[intercept->container_type]));
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator,
pPipelines, &(crtpl_state[intercept->container_type]));
}
VkResult result = DispatchCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator,
pPipelines, result, &(crtpl_state[intercept->container_type]));
}
return result;
}
// This API needs the ability to modify a down-chain parameter
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(
VkDevice device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_pipeline_layout_api_state cpl_state{};
cpl_state.modified_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, &cpl_state);
}
VkResult result = DispatchCreatePipelineLayout(device, &cpl_state.modified_create_info, pAllocator, pPipelineLayout);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, result);
}
return result;
}
// This API needs some local stack data for performance reasons and also may modify a parameter
VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(
VkDevice device,
const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkShaderModule* pShaderModule) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_shader_module_api_state csm_state{};
csm_state.instrumented_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, &csm_state);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, &csm_state);
}
VkResult result = DispatchCreateShaderModule(device, &csm_state.instrumented_create_info, pAllocator, pShaderModule);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, result, &csm_state);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(
VkDevice device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
cvdescriptorset::AllocateDescriptorSetsData ads_state(pAllocateInfo->descriptorSetCount);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, &ads_state);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
}
VkResult result = DispatchAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, result, &ads_state);
}
return result;
}
// This API needs the ability to modify a down-chain parameter
VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(
VkDevice device,
const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
create_buffer_api_state cb_state{};
cb_state.modified_create_info = *pCreateInfo;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, &cb_state);
}
VkResult result = DispatchCreateBuffer(device, &cb_state.modified_create_info, pAllocator, pBuffer);
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, result);
}
return result;
}
// Handle tooling queries manually as this is a request for layer information
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceToolPropertiesEXT(
VkPhysicalDevice physicalDevice,
uint32_t* pToolCount,
VkPhysicalDeviceToolPropertiesEXT* pToolProperties) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
bool skip = false;
static const VkPhysicalDeviceToolPropertiesEXT khronos_layer_tool_props = {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT,
nullptr,
"Khronos Validation Layer",
STRINGIFY(VK_HEADER_VERSION),
VK_TOOL_PURPOSE_VALIDATION_BIT_EXT | VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT | VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT | VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT,
"Khronos Validation Layer",
OBJECT_LAYER_NAME
};
auto original_pToolProperties = pToolProperties;
if (pToolProperties != nullptr) {
*pToolProperties = khronos_layer_tool_props;
pToolProperties = ((*pToolCount > 1) ? &pToolProperties[1] : nullptr);
(*pToolCount)--;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->read_lock();
skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidateGetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
}
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PreCallRecordGetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties);
}
VkResult result = DispatchGetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties);
if (original_pToolProperties != nullptr) {
pToolProperties = original_pToolProperties;
}
(*pToolCount)++;
for (auto intercept : layer_data->object_dispatch) {
auto lock = intercept->write_lock();
intercept->PostCallRecordGetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties, result);
}
return result;
}
// ValidationCache APIs do not dispatch
VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(
VkDevice device,
const VkValidationCacheCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkValidationCacheEXT* pValidationCache) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
const VkAllocationCallbacks* pAllocator) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
validation_data->CoreLayerDestroyValidationCacheEXT(device, validationCache, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(
VkDevice device,
VkValidationCacheEXT dstCache,
uint32_t srcCacheCount,
const VkValidationCacheEXT* pSrcCaches) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
size_t* pDataSize,
void* pData) {
auto layer_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
ValidationObject *validation_data = layer_data->GetValidationObject(layer_data->object_dispatch, LayerObjectTypeCoreValidation);
if (validation_data) {
auto lock = validation_data->write_lock();
result = validation_data->CoreLayerGetValidationCacheDataEXT(device, validationCache, pDataSize, pData);
}
return result;
}"""
inline_custom_validation_class_definitions = """
virtual VkResult CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache) { return VK_SUCCESS; };
virtual void CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator) {};
virtual VkResult CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches) { return VK_SUCCESS; };
virtual VkResult CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData) { return VK_SUCCESS; };
// Allow additional state parameter for CreateGraphicsPipelines
virtual bool PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state) const {
return PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state) {
PreCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* cgpl_state) {
PostCallRecordCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow additional state parameter for CreateComputePipelines
virtual bool PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state) const {
return PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* ccpl_state) {
PreCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* pipe_state) {
PostCallRecordCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow additional state parameter for CreateRayTracingPipelinesNV
virtual bool PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state) const {
return PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PreCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* ccpl_state) {
PreCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
};
virtual void PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* pipe_state) {
PostCallRecordCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, result);
};
// Allow modification of a down-chain parameter for CreatePipelineLayout
virtual void PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout, void *cpl_state) {
PreCallRecordCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
};
// Enable the CreateShaderModule API to take an extra argument for state preservation and paramter modification
virtual bool PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state) const {
return PreCallValidateCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
};
virtual void PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state) {
PreCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
};
virtual void PostCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, VkResult result, void* csm_state) {
PostCallRecordCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, result);
};
// Allow AllocateDescriptorSets to use some local stack storage for performance purposes
virtual bool PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, void* ads_state) const {
return PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
};
virtual void PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, VkResult result, void* ads_state) {
PostCallRecordAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, result);
};
// Allow modification of a down-chain parameter for CreateBuffer
virtual void PreCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer, void *cb_state) {
PreCallRecordCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
};
// Modify a parameter to CreateDevice
virtual void PreCallRecordCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice, safe_VkDeviceCreateInfo *modified_create_info) {
PreCallRecordCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
};
"""
inline_custom_source_postamble = """
// loader-layer interface v0, just wrappers since there is only a layer
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
return vulkan_layer_chassis::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
VkLayerProperties *pProperties) {
return vulkan_layer_chassis::EnumerateInstanceLayerProperties(pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return vulkan_layer_chassis::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return vulkan_layer_chassis::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
return vulkan_layer_chassis::GetDeviceProcAddr(dev, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
return vulkan_layer_chassis::GetInstanceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
const char *funcName) {
return vulkan_layer_chassis::GetPhysicalDeviceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
assert(pVersionStruct != NULL);
assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
// Fill in the function pointers if our version is at least capable of having the structure contain them.
if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
}
return VK_SUCCESS;
}"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.intercepts = []
self.layer_factory = '' # String containing base layer factory class definition
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
if elem.tag == 'type' and elem.tail is not None and '*' in elem.tail:
ispointer = True
return ispointer
#
#
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# Output Copyright
write(self.inline_copyright_message, file=self.outFile)
# Multiple inclusion protection
self.header = False
if (self.genOpts.filename and 'h' == self.genOpts.filename[-1]):
self.header = True
write('#pragma once', file=self.outFile)
self.newline()
if self.header:
write(self.inline_custom_header_preamble, file=self.outFile)
else:
write(self.inline_custom_source_preamble, file=self.outFile)
self.layer_factory += self.inline_custom_header_class_definition
#
#
def endFile(self):
# Finish C++ namespace and multiple inclusion protection
self.newline()
if not self.header:
# Record intercepted procedures
write('// Map of intercepted ApiName to its associated function data', file=self.outFile)
write('#ifdef _MSC_VER', file=self.outFile)
write('#pragma warning( suppress: 6262 ) // VS analysis: this uses more than 16 kiB, which is fine here at global scope', file=self.outFile)
write('#endif', file=self.outFile)
write('const std::unordered_map<std::string, function_data> name_to_funcptr_map = {', file=self.outFile)
write('\n'.join(self.intercepts), file=self.outFile)
write('};\n', file=self.outFile)
self.newline()
write('} // namespace vulkan_layer_chassis', file=self.outFile)
if self.header:
self.newline()
# Output Layer Factory Class Definitions
self.layer_factory += self.inline_custom_validation_class_definitions
self.layer_factory += '};\n\n'
self.layer_factory += 'extern small_unordered_map<void*, ValidationObject*, 2> layer_data_map;'
write(self.layer_factory, file=self.outFile)
else:
write(self.inline_custom_source_postamble, file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# Get feature extra protect
self.featureExtraProtect = GetFeatureProtect(interface)
# Accumulate includes, defines, types, enums, function pointer typedefs, end function prototypes separately for this
# feature. They're only printed in endFeature().
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
def endFeature(self):
# Actually write the interface to the output file.
if (self.emit):
self.newline()
# If type declarations are needed by other features based on this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
if (self.featureExtraProtect != None):
write('#ifdef', self.featureExtraProtect, file=self.outFile)
for section in self.TYPE_SECTIONS:
contents = self.sections[section]
if contents:
write('\n'.join(contents), file=self.outFile)
self.newline()
if (self.sections['command']):
write('\n'.join(self.sections['command']), end=u'', file=self.outFile)
self.newline()
if (self.featureExtraProtect != None):
write('#endif //', self.featureExtraProtect, file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Append a definition to the specified section
def appendSection(self, section, text):
self.sections[section].append(text)
#
# Type generation
def genType(self, typeinfo, name, alias):
pass
#
# Struct (e.g. C "struct" type) generation. This is a special case of the <type> tag where the contents are
# interpreted as a set of <member> tags instead of freeform C type declarations. The <member> tags are just like <param>
# tags - they are a declaration of a struct or union member. Only simple member declarations are supported (no nested
# structs etc.)
def genStruct(self, typeinfo, typeName):
OutputGenerator.genStruct(self, typeinfo, typeName)
body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
# paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
for member in typeinfo.elem.findall('.//member'):
body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
body += ';\n'
body += '} ' + typeName + ';\n'
self.appendSection('struct', body)
#
# Group (e.g. C "enum" type) generation. These are concatenated together with other types.
def genGroup(self, groupinfo, groupName, alias):
pass
# Enumerant generation
# <enum> tags may specify their values in several ways, but are usually just integers.
def genEnum(self, enuminfo, name, alias):
pass
#
# Customize Cdecl for layer factory base class
def BaseClassCdecl(self, elem, name):
raw = self.makeCDecls(elem)[1]
# Toss everything before the undecorated name
prototype = raw.split("VKAPI_PTR *PFN_vk")[1]
prototype = prototype.replace(")", "", 1)
prototype = prototype.replace(";", " {};")
# Build up pre/post call virtual function declarations
pre_call_validate = 'virtual bool PreCallValidate' + prototype
pre_call_validate = pre_call_validate.replace("{}", "const { return false; }")
pre_call_record = 'virtual void PreCallRecord' + prototype
post_call_record = 'virtual void PostCallRecord' + prototype
resulttype = elem.find('proto/type')
if resulttype.text == 'VkResult':
post_call_record = post_call_record.replace(')', ', VkResult result)')
elif resulttype.text == 'VkDeviceAddress':
post_call_record = post_call_record.replace(')', ', VkDeviceAddress result)')
return ' %s\n %s\n %s\n' % (pre_call_validate, pre_call_record, post_call_record)
#
# Command generation
def genCmd(self, cmdinfo, name, alias):
ignore_functions = [
'vkEnumerateInstanceVersion',
]
if name in ignore_functions:
return
if self.header: # In the header declare all intercepts
self.appendSection('command', '')
self.appendSection('command', self.makeCDecls(cmdinfo.elem)[0])
if (self.featureExtraProtect != None):
self.layer_factory += '#ifdef %s\n' % self.featureExtraProtect
# Update base class with virtual function declarations
if 'ValidationCache' not in name:
self.layer_factory += self.BaseClassCdecl(cmdinfo.elem, name)
if (self.featureExtraProtect != None):
self.layer_factory += '#endif\n'
return
is_instance = 'false'
dispatchable_type = cmdinfo.elem.find('param/type').text
if dispatchable_type in ["VkPhysicalDevice", "VkInstance"] or name == 'vkCreateInstance':
is_instance = 'true'
if name in self.manual_functions:
self.intercepts += [ ' {"%s", {%s, (void*)%s}},' % (name, is_instance, name[2:]) ]
return
# Record that the function will be intercepted
if (self.featureExtraProtect != None):
self.intercepts += [ '#ifdef %s' % self.featureExtraProtect ]
self.intercepts += [ ' {"%s", {%s, (void*)%s}},' % (name, is_instance, name[2:]) ]
if (self.featureExtraProtect != None):
self.intercepts += [ '#endif' ]
OutputGenerator.genCmd(self, cmdinfo, name, alias)
#
decls = self.makeCDecls(cmdinfo.elem)
self.appendSection('command', '')
self.appendSection('command', '%s {' % decls[0][:-1])
# Setup common to call wrappers. First parameter is always dispatchable
dispatchable_name = cmdinfo.elem.find('param/name').text
self.appendSection('command', ' auto layer_data = GetLayerDataPtr(get_dispatch_key(%s), layer_data_map);' % (dispatchable_name))
api_function_name = cmdinfo.elem.attrib.get('name')
params = cmdinfo.elem.findall('param/name')
paramstext = ', '.join([str(param.text) for param in params])
API = api_function_name.replace('vk','Dispatch') + '('
# Declare result variable, if any.
return_map = {
'PFN_vkVoidFunction': 'return nullptr;',
'VkBool32': 'return VK_FALSE;',
'VkDeviceAddress': 'return 0;',
'VkResult': 'return VK_ERROR_VALIDATION_FAILED_EXT;',
'void': 'return;',
'uint32_t': 'return 0;',
'uint64_t': 'return 0;'
}
resulttype = cmdinfo.elem.find('proto/type')
assignresult = ''
if (resulttype.text != 'void'):
assignresult = resulttype.text + ' result = '
# Set up skip and locking
self.appendSection('command', ' bool skip = false;')
# Generate pre-call validation source code
self.appendSection('command', ' %s' % self.precallvalidate_loop)
self.appendSection('command', ' auto lock = intercept->read_lock();')
self.appendSection('command', ' skip |= (const_cast<const ValidationObject*>(intercept))->PreCallValidate%s(%s);' % (api_function_name[2:], paramstext))
self.appendSection('command', ' if (skip) %s' % return_map[resulttype.text])
self.appendSection('command', ' }')
# Generate pre-call state recording source code
self.appendSection('command', ' %s' % self.precallrecord_loop)
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' intercept->PreCallRecord%s(%s);' % (api_function_name[2:], paramstext))
self.appendSection('command', ' }')
# Insert pre-dispatch debug utils function call
if name in self.pre_dispatch_debug_utils_functions:
self.appendSection('command', ' %s' % self.pre_dispatch_debug_utils_functions[name])
# Output dispatch (down-chain) function call
self.appendSection('command', ' ' + assignresult + API + paramstext + ');')
# Insert post-dispatch debug utils function call
if name in self.post_dispatch_debug_utils_functions:
self.appendSection('command', ' %s' % self.post_dispatch_debug_utils_functions[name])
# Generate post-call object processing source code
self.appendSection('command', ' %s' % self.postcallrecord_loop)
returnparam = ''
if (resulttype.text == 'VkResult' or resulttype.text == 'VkDeviceAddress'):
returnparam = ', result'
self.appendSection('command', ' auto lock = intercept->write_lock();')
self.appendSection('command', ' intercept->PostCallRecord%s(%s%s);' % (api_function_name[2:], paramstext, returnparam))
self.appendSection('command', ' }')
# Return result variable, if any.
if (resulttype.text != 'void'):
self.appendSection('command', ' return result;')
self.appendSection('command', '}')
#
# Override makeProtoName to drop the "vk" prefix
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
| 50.344881 | 292 | 0.707385 |
0328723c51029c8c0b88585bdf6581734bf35922 | 149 | py | Python | Aulas de Python Mundo 3/Curso Python - #17 - Tuplas - Part 2.py | ErikDMCosta/CEV-Praticas_Python-Mundo_3 | 9669fe1647ec5effd0b1c4a576aa670107657a77 | [
"MIT"
] | null | null | null | Aulas de Python Mundo 3/Curso Python - #17 - Tuplas - Part 2.py | ErikDMCosta/CEV-Praticas_Python-Mundo_3 | 9669fe1647ec5effd0b1c4a576aa670107657a77 | [
"MIT"
] | null | null | null | Aulas de Python Mundo 3/Curso Python - #17 - Tuplas - Part 2.py | ErikDMCosta/CEV-Praticas_Python-Mundo_3 | 9669fe1647ec5effd0b1c4a576aa670107657a77 | [
"MIT"
] | null | null | null | # num = (2, 5, 9, 7)
num = [2, 5, 9, 7]
# num(2) = 3 # NÃO PODE POIS TUPLAS SÃO IMUTAVEIS
num[2] = 3 # TROCA ELEMENTO DO INDICE DOIS
print(num)
| 24.833333 | 54 | 0.577181 |
0e39c65b0d28b206b19077e46a2f4457920dac41 | 44,825 | py | Python | mesonbuild/backend/xcodebackend.py | jonaslb/meson | 8133a7b9a4b8f0686fbc479aa2d64e41c85a979b | [
"Apache-2.0"
] | null | null | null | mesonbuild/backend/xcodebackend.py | jonaslb/meson | 8133a7b9a4b8f0686fbc479aa2d64e41c85a979b | [
"Apache-2.0"
] | null | null | null | mesonbuild/backend/xcodebackend.py | jonaslb/meson | 8133a7b9a4b8f0686fbc479aa2d64e41c85a979b | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import backends
from .. import build
from .. import dependencies
from .. import mesonlib
from .. import mlog
import uuid, os, operator
import typing as T
from ..mesonlib import MesonException
from ..interpreter import Interpreter
class XCodeBackend(backends.Backend):
def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional[Interpreter]):
super().__init__(build, interpreter)
self.name = 'xcode'
self.project_uid = self.environment.coredata.lang_guids['default'].replace('-', '')[:24]
self.project_conflist = self.gen_id()
self.indent = '\t' # Recent versions of Xcode uses tabs
self.indent_level = 0
self.xcodetypemap = {'c': 'sourcecode.c.c',
'a': 'archive.ar',
'cc': 'sourcecode.cpp.cpp',
'cxx': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'c++': 'sourcecode.cpp.cpp',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'h': 'sourcecode.c.h',
'hpp': 'sourcecode.cpp.h',
'hxx': 'sourcecode.cpp.h',
'hh': 'sourcecode.cpp.hh',
'inc': 'sourcecode.c.h',
'dylib': 'compiled.mach-o.dylib',
'o': 'compiled.mach-o.objfile',
's': 'sourcecode.asm',
'asm': 'sourcecode.asm',
}
self.maingroup_id = self.gen_id()
self.all_id = self.gen_id()
self.all_buildconf_id = self.gen_id()
self.buildtypes = ['debug']
self.test_id = self.gen_id()
self.test_command_id = self.gen_id()
self.test_buildconf_id = self.gen_id()
def gen_id(self):
return str(uuid.uuid4()).upper().replace('-', '')[:24]
def get_target_dir(self, target):
dirname = os.path.join(target.get_subdir(), self.environment.coredata.get_option(mesonlib.OptionKey('buildtype')))
os.makedirs(os.path.join(self.environment.get_build_dir(), dirname), exist_ok=True)
return dirname
def target_to_build_root(self, target):
if self.get_target_dir(target) == '':
return ''
directories = os.path.normpath(self.get_target_dir(target)).split(os.sep)
return os.sep.join(['..'] * len(directories))
def write_line(self, text):
self.ofile.write(self.indent * self.indent_level + text)
if not text.endswith('\n'):
self.ofile.write('\n')
def generate(self):
test_data = self.serialize_tests()[0]
self.generate_filemap()
self.generate_buildmap()
self.generate_buildstylemap()
self.generate_build_phase_map()
self.generate_build_configuration_map()
self.generate_build_configurationlist_map()
self.generate_project_configurations_map()
self.generate_buildall_configurations_map()
self.generate_test_configurations_map()
self.generate_native_target_map()
self.generate_native_frameworks_map()
self.generate_source_phase_map()
self.generate_target_dependency_map()
self.generate_pbxdep_map()
self.generate_containerproxy_map()
self.proj_dir = os.path.join(self.environment.get_build_dir(), self.build.project_name + '.xcodeproj')
os.makedirs(self.proj_dir, exist_ok=True)
self.proj_file = os.path.join(self.proj_dir, 'project.pbxproj')
with open(self.proj_file, 'w') as self.ofile:
self.generate_prefix()
self.generate_pbx_aggregate_target()
self.generate_pbx_build_file()
self.generate_pbx_build_style()
self.generate_pbx_container_item_proxy()
self.generate_pbx_file_reference()
self.generate_pbx_frameworks_buildphase()
self.generate_pbx_group()
self.generate_pbx_native_target()
self.generate_pbx_project()
self.generate_pbx_shell_build_phase(test_data)
self.generate_pbx_sources_build_phase()
self.generate_pbx_target_dependency()
self.generate_xc_build_configuration()
self.generate_xc_configurationList()
self.generate_suffix()
def get_xcodetype(self, fname):
xcodetype = self.xcodetypemap.get(fname.split('.')[-1].lower())
if not xcodetype:
xcodetype = 'sourcecode.unknown'
mlog.warning('Unknown file type "%s" fallbacking to "%s". Xcode project might be malformed.' % (fname, xcodetype))
return xcodetype
def generate_filemap(self):
self.filemap = {} # Key is source file relative to src root.
self.target_filemap = {}
for name, t in self.build.targets.items():
for s in t.sources:
if isinstance(s, mesonlib.File):
s = os.path.join(s.subdir, s.fname)
self.filemap[s] = self.gen_id()
for o in t.objects:
if isinstance(o, str):
o = os.path.join(t.subdir, o)
self.filemap[o] = self.gen_id()
self.target_filemap[name] = self.gen_id()
def generate_buildmap(self):
self.buildmap = {}
for t in self.build.targets.values():
for s in t.sources:
s = os.path.join(s.subdir, s.fname)
self.buildmap[s] = self.gen_id()
for o in t.objects:
o = os.path.join(t.subdir, o)
if isinstance(o, str):
self.buildmap[o] = self.gen_id()
def generate_buildstylemap(self):
self.buildstylemap = {'debug': self.gen_id()}
def generate_build_phase_map(self):
for tname, t in self.build.targets.items():
# generate id for our own target-name
t.buildphasemap = {}
t.buildphasemap[tname] = self.gen_id()
# each target can have it's own Frameworks/Sources/..., generate id's for those
t.buildphasemap['Frameworks'] = self.gen_id()
t.buildphasemap['Resources'] = self.gen_id()
t.buildphasemap['Sources'] = self.gen_id()
def generate_build_configuration_map(self):
self.buildconfmap = {}
for t in self.build.targets:
bconfs = {'debug': self.gen_id()}
self.buildconfmap[t] = bconfs
def generate_project_configurations_map(self):
self.project_configurations = {'debug': self.gen_id()}
def generate_buildall_configurations_map(self):
self.buildall_configurations = {'debug': self.gen_id()}
def generate_test_configurations_map(self):
self.test_configurations = {'debug': self.gen_id()}
def generate_build_configurationlist_map(self):
self.buildconflistmap = {}
for t in self.build.targets:
self.buildconflistmap[t] = self.gen_id()
def generate_native_target_map(self):
self.native_targets = {}
for t in self.build.targets:
self.native_targets[t] = self.gen_id()
def generate_native_frameworks_map(self):
self.native_frameworks = {}
self.native_frameworks_fileref = {}
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.native_frameworks[f] = self.gen_id()
self.native_frameworks_fileref[f] = self.gen_id()
def generate_target_dependency_map(self):
self.target_dependency_map = {}
for tname, t in self.build.targets.items():
for target in t.link_targets:
self.target_dependency_map[(tname, target.get_basename())] = self.gen_id()
def generate_pbxdep_map(self):
self.pbx_dep_map = {}
for t in self.build.targets:
self.pbx_dep_map[t] = self.gen_id()
def generate_containerproxy_map(self):
self.containerproxy_map = {}
for t in self.build.targets:
self.containerproxy_map[t] = self.gen_id()
def generate_source_phase_map(self):
self.source_phase = {}
for t in self.build.targets:
self.source_phase[t] = self.gen_id()
def generate_pbx_aggregate_target(self):
target_dependencies = list(map(lambda t: self.pbx_dep_map[t], self.build.targets))
aggregated_targets = []
aggregated_targets.append((self.all_id, 'ALL_BUILD', self.all_buildconf_id, [], target_dependencies))
aggregated_targets.append((self.test_id, 'RUN_TESTS', self.test_buildconf_id, [self.test_command_id], []))
# Sort objects by ID before writing
sorted_aggregated_targets = sorted(aggregated_targets, key=operator.itemgetter(0))
self.ofile.write('\n/* Begin PBXAggregateTarget section */\n')
for t in sorted_aggregated_targets:
name = t[1]
buildconf_id = t[2]
build_phases = t[3]
dependencies = t[4]
self.write_line('%s /* %s */ = {' % (t[0], name))
self.indent_level += 1
self.write_line('isa = PBXAggregateTarget;')
self.write_line('buildConfigurationList = %s /* Build configuration list for PBXAggregateTarget "%s" */;' % (buildconf_id, name))
self.write_line('buildPhases = (')
self.indent_level += 1
for bp in build_phases:
self.write_line('%s /* ShellScript */,' % bp)
self.indent_level -= 1
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level += 1
for td in dependencies:
self.write_line('%s /* PBXTargetDependency */,' % td)
self.indent_level -= 1
self.write_line(');')
self.write_line('name = %s;' % name)
self.write_line('productName = %s;' % name)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXAggregateTarget section */\n')
def generate_pbx_build_file(self):
self.ofile.write('\n/* Begin PBXBuildFile section */\n')
templ = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */; settings = { COMPILER_FLAGS = "%s"; }; };\n'
otempl = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */;};\n'
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = %s /* %s.framework */; };\n' % (self.native_frameworks[f], f, self.native_frameworks_fileref[f], f))
for s in t.sources:
if isinstance(s, mesonlib.File):
s = os.path.join(s.subdir, s.fname)
if isinstance(s, str):
s = os.path.join(t.subdir, s)
idval = self.buildmap[s]
fullpath = os.path.join(self.environment.get_source_dir(), s)
fileref = self.filemap[s]
fullpath2 = fullpath
compiler_args = ''
self.write_line(templ % (idval, fullpath, fileref, fullpath2, compiler_args))
for o in t.objects:
o = os.path.join(t.subdir, o)
idval = self.buildmap[o]
fileref = self.filemap[o]
fullpath = os.path.join(self.environment.get_source_dir(), o)
fullpath2 = fullpath
self.write_line(otempl % (idval, fullpath, fileref, fullpath2))
self.ofile.write('/* End PBXBuildFile section */\n')
def generate_pbx_build_style(self):
# FIXME: Xcode 9 and later does not uses PBXBuildStyle and it gets removed. Maybe we can remove this part.
self.ofile.write('\n/* Begin PBXBuildStyle section */\n')
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */ = {\n' % (idval, name))
self.indent_level += 1
self.write_line('isa = PBXBuildStyle;\n')
self.write_line('buildSettings = {\n')
self.indent_level += 1
self.write_line('COPY_PHASE_STRIP = NO;\n')
self.indent_level -= 1
self.write_line('};\n')
self.write_line('name = "%s";\n' % name)
self.indent_level -= 1
self.write_line('};\n')
self.ofile.write('/* End PBXBuildStyle section */\n')
def generate_pbx_container_item_proxy(self):
self.ofile.write('\n/* Begin PBXContainerItemProxy section */\n')
for t in self.build.targets:
self.write_line('%s /* PBXContainerItemProxy */ = {' % self.containerproxy_map[t])
self.indent_level += 1
self.write_line('isa = PBXContainerItemProxy;')
self.write_line('containerPortal = %s /* Project object */;' % self.project_uid)
self.write_line('proxyType = 1;')
self.write_line('remoteGlobalIDString = %s;' % self.native_targets[t])
self.write_line('remoteInfo = "%s";' % t)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXContainerItemProxy section */\n')
def generate_pbx_file_reference(self):
self.ofile.write('\n/* Begin PBXFileReference section */\n')
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = %s.framework; path = System/Library/Frameworks/%s.framework; sourceTree = SDKROOT; };\n' % (self.native_frameworks_fileref[f], f, f, f))
src_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; fileEncoding = 4; name = "%s"; path = "%s"; sourceTree = SOURCE_ROOT; };\n'
for fname, idval in self.filemap.items():
fullpath = os.path.join(self.environment.get_source_dir(), fname)
xcodetype = self.get_xcodetype(fname)
name = os.path.basename(fname)
path = fname
self.write_line(src_templ % (idval, fullpath, xcodetype, name, path))
target_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; path = %s; refType = %d; sourceTree = BUILT_PRODUCTS_DIR; };\n'
for tname, idval in self.target_filemap.items():
t = self.build.targets[tname]
fname = t.get_filename()
reftype = 0
if isinstance(t, build.Executable):
typestr = 'compiled.mach-o.executable'
path = fname
elif isinstance(t, build.SharedLibrary):
typestr = self.get_xcodetype('dummy.dylib')
path = fname
else:
typestr = self.get_xcodetype(fname)
path = '"%s"' % t.get_filename()
self.write_line(target_templ % (idval, tname, typestr, path, reftype))
self.ofile.write('/* End PBXFileReference section */\n')
def generate_pbx_frameworks_buildphase(self):
for t in self.build.targets.values():
self.ofile.write('\n/* Begin PBXFrameworksBuildPhase section */\n')
self.write_line('%s /* %s */ = {\n' % (t.buildphasemap['Frameworks'], 'Frameworks'))
self.indent_level += 1
self.write_line('isa = PBXFrameworksBuildPhase;\n')
self.write_line('buildActionMask = %s;\n' % (2147483647))
self.write_line('files = (\n')
self.indent_level += 1
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework in Frameworks */,\n' % (self.native_frameworks[f], f))
self.indent_level -= 1
self.write_line(');\n')
self.write_line('runOnlyForDeploymentPostprocessing = 0;\n')
self.indent_level -= 1
self.write_line('};\n')
self.ofile.write('/* End PBXFrameworksBuildPhase section */\n')
def generate_pbx_group(self):
groupmap = {}
target_src_map = {}
for t in self.build.targets:
groupmap[t] = self.gen_id()
target_src_map[t] = self.gen_id()
self.ofile.write('\n/* Begin PBXGroup section */\n')
sources_id = self.gen_id()
resources_id = self.gen_id()
products_id = self.gen_id()
frameworks_id = self.gen_id()
self.write_line('%s = {' % self.maingroup_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
self.write_line('%s /* Sources */,' % sources_id)
self.write_line('%s /* Resources */,' % resources_id)
self.write_line('%s /* Products */,' % products_id)
self.write_line('%s /* Frameworks */,' % frameworks_id)
self.indent_level -= 1
self.write_line(');')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# Sources
self.write_line('%s /* Sources */ = {' % sources_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (groupmap[t], t))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Sources;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Resources */ = {' % resources_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.write_line(');')
self.write_line('name = Resources;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Frameworks */ = {' % frameworks_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
# write frameworks
self.indent_level += 1
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework */,\n' % (self.native_frameworks_fileref[f], f))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Frameworks;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# Targets
for t in self.build.targets:
self.write_line('%s /* %s */ = {' % (groupmap[t], t))
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
self.write_line('%s /* Source files */,' % target_src_map[t])
self.indent_level -= 1
self.write_line(');')
self.write_line('name = "%s";' % t)
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Source files */ = {' % target_src_map[t])
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for s in self.build.targets[t].sources:
s = os.path.join(s.subdir, s.fname)
if isinstance(s, str):
self.write_line('%s /* %s */,' % (self.filemap[s], s))
for o in self.build.targets[t].objects:
o = os.path.join(self.build.targets[t].subdir, o)
self.write_line('%s /* %s */,' % (self.filemap[o], o))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = "Source files";')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# And finally products
self.write_line('%s /* Products */ = {' % products_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.target_filemap[t], t))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Products;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXGroup section */\n')
def generate_pbx_native_target(self):
self.ofile.write('\n/* Begin PBXNativeTarget section */\n')
for tname, idval in self.native_targets.items():
t = self.build.targets[tname]
self.write_line('%s /* %s */ = {' % (idval, tname))
self.indent_level += 1
self.write_line('isa = PBXNativeTarget;')
self.write_line('buildConfigurationList = %s /* Build configuration list for PBXNativeTarget "%s" */;'
% (self.buildconflistmap[tname], tname))
self.write_line('buildPhases = (')
self.indent_level += 1
for bpname, bpval in t.buildphasemap.items():
self.write_line('%s /* %s yyy */,' % (bpval, bpname))
self.indent_level -= 1
self.write_line(');')
self.write_line('buildRules = (')
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level += 1
for lt in self.build.targets[tname].link_targets:
# NOT DOCUMENTED, may need to make different links
# to same target have different targetdependency item.
idval = self.pbx_dep_map[lt.get_id()]
self.write_line('%s /* PBXTargetDependency */,' % idval)
self.indent_level -= 1
self.write_line(");")
self.write_line('name = "%s";' % tname)
self.write_line('productName = "%s";' % tname)
self.write_line('productReference = %s /* %s */;' % (self.target_filemap[tname], tname))
if isinstance(t, build.Executable):
typestr = 'com.apple.product-type.tool'
elif isinstance(t, build.StaticLibrary):
typestr = 'com.apple.product-type.library.static'
elif isinstance(t, build.SharedLibrary):
typestr = 'com.apple.product-type.library.dynamic'
else:
raise MesonException('Unknown target type for %s' % tname)
self.write_line('productType = "%s";' % typestr)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXNativeTarget section */\n')
def generate_pbx_project(self):
self.ofile.write('\n/* Begin PBXProject section */\n')
self.write_line('%s /* Project object */ = {' % self.project_uid)
self.indent_level += 1
self.write_line('isa = PBXProject;')
self.write_line('attributes = {')
self.indent_level += 1
self.write_line('BuildIndependentTargetsInParallel = YES;')
self.indent_level -= 1
self.write_line('};')
conftempl = 'buildConfigurationList = %s /* Build configuration list for PBXProject "%s" */;'
self.write_line(conftempl % (self.project_conflist, self.build.project_name))
self.write_line('buildSettings = {')
self.write_line('};')
self.write_line('buildStyles = (')
self.indent_level += 1
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */,' % (idval, name))
self.indent_level -= 1
self.write_line(');')
self.write_line('compatibilityVersion = "Xcode 3.2";')
self.write_line('hasScannedForEncodings = 0;')
self.write_line('mainGroup = %s;' % self.maingroup_id)
self.write_line('projectDirPath = "%s";' % self.build_to_src)
self.write_line('projectRoot = "";')
self.write_line('targets = (')
self.indent_level += 1
self.write_line('%s /* ALL_BUILD */,' % self.all_id)
self.write_line('%s /* RUN_TESTS */,' % self.test_id)
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.native_targets[t], t))
self.indent_level -= 1
self.write_line(');')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXProject section */\n')
def generate_pbx_shell_build_phase(self, test_data):
self.ofile.write('\n/* Begin PBXShellScriptBuildPhase section */\n')
self.write_line('%s /* ShellScript */ = {' % self.test_command_id)
self.indent_level += 1
self.write_line('isa = PBXShellScriptBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.write_line(');')
self.write_line('inputPaths = (')
self.write_line(');')
self.write_line('outputPaths = (')
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.write_line('shellPath = /bin/sh;')
cmd = mesonlib.meson_command + ['test', test_data, '-C', self.environment.get_build_dir()]
cmdstr = ' '.join(["'%s'" % i for i in cmd])
self.write_line('shellScript = "%s";' % cmdstr)
self.write_line('showEnvVarsInLog = 0;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXShellScriptBuildPhase section */\n')
def generate_pbx_sources_build_phase(self):
self.ofile.write('\n/* Begin PBXSourcesBuildPhase section */\n')
for name in self.source_phase.keys():
t = self.build.targets[name]
self.write_line('%s /* Sources */ = {' % (t.buildphasemap[name]))
self.indent_level += 1
self.write_line('isa = PBXSourcesBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.indent_level += 1
for s in self.build.targets[name].sources:
s = os.path.join(s.subdir, s.fname)
if not self.environment.is_header(s):
self.write_line('%s /* %s */,' % (self.buildmap[s], os.path.join(self.environment.get_source_dir(), s)))
self.indent_level -= 1
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXSourcesBuildPhase section */\n')
def generate_pbx_target_dependency(self):
targets = []
for t in self.build.targets:
idval = self.pbx_dep_map[t] # VERIFY: is this correct?
targets.append((idval, self.native_targets[t], t, self.containerproxy_map[t]))
# Sort object by ID
sorted_targets = sorted(targets, key=operator.itemgetter(0))
self.ofile.write('\n/* Begin PBXTargetDependency section */\n')
for t in sorted_targets:
self.write_line('%s /* PBXTargetDependency */ = {' % t[0])
self.indent_level += 1
self.write_line('isa = PBXTargetDependency;')
self.write_line('target = %s /* %s */;' % (t[1], t[2]))
self.write_line('targetProxy = %s /* PBXContainerItemProxy */;' % t[3])
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXTargetDependency section */\n')
def generate_xc_build_configuration(self):
self.ofile.write('\n/* Begin XCBuildConfiguration section */\n')
# First the setup for the toplevel project.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.project_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('ARCHS = "$(ARCHS_STANDARD_64_BIT)";')
self.write_line('ONLY_ACTIVE_ARCH = YES;')
self.write_line('SDKROOT = "macosx";')
self.write_line('SYMROOT = "%s/build";' % self.environment.get_build_dir())
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Then the all target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = "";')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = ALL_BUILD;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_build_setting_line('WARNING_CFLAGS', ['-Wmost', '-Wno-four-char-constants', '-Wno-unknown-pragmas'])
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Then the test target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.test_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = "";')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = RUN_TESTS;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_build_setting_line('WARNING_CFLAGS', ['-Wmost', '-Wno-four-char-constants', '-Wno-unknown-pragmas'])
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Now finally targets.
langnamemap = {'c': 'C', 'cpp': 'CPLUSPLUS', 'objc': 'OBJC', 'objcpp': 'OBJCPLUSPLUS'}
for target_name, target in self.build.targets.items():
for buildtype in self.buildtypes:
dep_libs = []
links_dylib = False
headerdirs = []
for d in target.include_dirs:
for sd in d.incdirs:
cd = os.path.join(d.curdir, sd)
headerdirs.append(os.path.join(self.environment.get_source_dir(), cd))
headerdirs.append(os.path.join(self.environment.get_build_dir(), cd))
for l in target.link_targets:
abs_path = os.path.join(self.environment.get_build_dir(),
l.subdir, buildtype, l.get_filename())
dep_libs.append("'%s'" % abs_path)
if isinstance(l, build.SharedLibrary):
links_dylib = True
if links_dylib:
dep_libs = ['-Wl,-search_paths_first', '-Wl,-headerpad_max_install_names'] + dep_libs
dylib_version = None
if isinstance(target, build.SharedLibrary):
ldargs = ['-dynamiclib', '-Wl,-headerpad_max_install_names'] + dep_libs
install_path = os.path.join(self.environment.get_build_dir(), target.subdir, buildtype)
dylib_version = target.soversion
else:
ldargs = dep_libs
install_path = ''
if dylib_version is not None:
product_name = target.get_basename() + '.' + dylib_version
else:
product_name = target.get_basename()
ldargs += target.link_args
for dep in target.get_external_deps():
ldargs += dep.get_link_args()
ldstr = ' '.join(ldargs)
valid = self.buildconfmap[target_name][buildtype]
langargs = {}
for lang in self.environment.coredata.compilers[target.for_machine]:
if lang not in langnamemap:
continue
# Add compile args added using add_project_arguments()
pargs = self.build.projects_args[target.for_machine].get(target.subproject, {}).get(lang, [])
# Add compile args added using add_global_arguments()
# These override per-project arguments
gargs = self.build.global_args[target.for_machine].get(lang, [])
targs = target.get_extra_args(lang)
args = pargs + gargs + targs
if args:
langargs[langnamemap[lang]] = args
symroot = os.path.join(self.environment.get_build_dir(), target.subdir)
self.write_line('%s /* %s */ = {' % (valid, buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
if dylib_version is not None:
self.write_line('DYLIB_CURRENT_VERSION = "%s";' % dylib_version)
self.write_line('EXECUTABLE_PREFIX = "%s";' % target.prefix)
if target.suffix == '':
suffix = ''
else:
suffix = '.' + target.suffix
self.write_line('EXECUTABLE_SUFFIX = "%s";' % suffix)
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = YES;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
if target.has_pch:
# Xcode uses GCC_PREFIX_HEADER which only allows one file per target/executable. Precompiling various header files and
# applying a particular pch to each source file will require custom scripts (as a build phase) and build flags per each
# file. Since Xcode itself already discourages precompiled headers in favor of modules we don't try much harder here.
pchs = target.get_pch('c') + target.get_pch('cpp') + target.get_pch('objc') + target.get_pch('objcpp')
# Make sure to use headers (other backends require implementation files like *.c *.cpp, etc; these should not be used here)
pchs = [pch for pch in pchs if pch.endswith('.h') or pch.endswith('.hh') or pch.endswith('hpp')]
if pchs:
if len(pchs) > 1:
mlog.warning('Unsupported Xcode configuration: More than 1 precompiled header found "%s". Target "%s" might not compile correctly.' % (str(pchs), target.name))
relative_pch_path = os.path.join(target.get_subdir(), pchs[0]) # Path relative to target so it can be used with "$(PROJECT_DIR)"
self.write_line('GCC_PRECOMPILE_PREFIX_HEADER = YES;')
self.write_line('GCC_PREFIX_HEADER = "$(PROJECT_DIR)/%s";' % relative_pch_path)
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = "";')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
if headerdirs:
quotedh = ','.join(['"\\"%s\\""' % i for i in headerdirs])
self.write_line('HEADER_SEARCH_PATHS=(%s);' % quotedh)
self.write_line('INSTALL_PATH = "%s";' % install_path)
self.write_line('LIBRARY_SEARCH_PATHS = "";')
if isinstance(target, build.SharedLibrary):
self.write_line('LIBRARY_STYLE = DYNAMIC;')
for langname, args in langargs.items():
self.write_build_setting_line('OTHER_%sFLAGS' % langname, args)
self.write_line('OTHER_LDFLAGS = "%s";' % ldstr)
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = %s;' % product_name)
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % symroot)
self.write_build_setting_line('SYSTEM_HEADER_SEARCH_PATHS', [self.environment.get_build_dir()])
self.write_line('USE_HEADERMAP = NO;')
self.write_build_setting_line('WARNING_CFLAGS', ['-Wmost', '-Wno-four-char-constants', '-Wno-unknown-pragmas'])
self.indent_level -= 1
self.write_line('};')
self.write_line('name = %s;' % buildtype)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End XCBuildConfiguration section */\n')
def generate_xc_configurationList(self):
# FIXME: sort items
self.ofile.write('\n/* Begin XCConfigurationList section */\n')
self.write_line('%s /* Build configuration list for PBXProject "%s" */ = {' % (self.project_conflist, self.build.project_name))
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.project_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
# Now the all target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.all_buildconf_id)
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
# Test target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.test_buildconf_id)
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.test_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
for target_name in self.build.targets:
listid = self.buildconflistmap[target_name]
self.write_line('%s /* Build configuration list for PBXNativeTarget "%s" */ = {' % (listid, target_name))
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
typestr = 'debug'
idval = self.buildconfmap[target_name][typestr]
self.write_line('%s /* %s */,' % (idval, typestr))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = %s;' % typestr)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End XCConfigurationList section */\n')
def write_build_setting_line(self, flag_name, flag_values, explicit=False):
if flag_values:
if len(flag_values) == 1:
value = flag_values[0]
if (' ' in value):
# If path contains spaces surround it with double colon
self.write_line('%s = "\\"%s\\"";' % (flag_name, value))
else:
self.write_line('%s = "%s";' % (flag_name, value))
else:
self.write_line('%s = (' % flag_name)
self.indent_level += 1
for value in flag_values:
if (' ' in value):
# If path contains spaces surround it with double colon
self.write_line('"\\"%s\\"",' % value)
else:
self.write_line('"%s",' % value)
self.indent_level -= 1
self.write_line(');')
else:
if explicit:
self.write_line('%s = "";' % flag_name)
def generate_prefix(self):
self.ofile.write('// !$*UTF8*$!\n{\n')
self.indent_level += 1
self.write_line('archiveVersion = 1;\n')
self.write_line('classes = {\n')
self.write_line('};\n')
self.write_line('objectVersion = 46;\n')
self.write_line('objects = {\n')
self.indent_level += 1
def generate_suffix(self):
self.indent_level -= 1
self.write_line('};\n')
self.write_line('rootObject = ' + self.project_uid + ' /* Project object */;')
self.indent_level -= 1
self.write_line('}\n')
| 48.93559 | 272 | 0.569972 |
611a191d50cdd22d9fe1e7e3cb1730ca9447941d | 2,559 | py | Python | test/unit/data/test_supplementary_data.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 358 | 2020-06-11T09:34:53.000Z | 2022-03-31T12:56:22.000Z | test/unit/data/test_supplementary_data.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 467 | 2020-06-11T13:49:45.000Z | 2022-03-31T14:19:48.000Z | test/unit/data/test_supplementary_data.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 48 | 2020-07-13T14:50:45.000Z | 2022-03-26T09:37:13.000Z | from fedot.core.data.data import OutputData
from fedot.core.data.supplementary_data import SupplementaryData
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
from test.unit.data.test_data_merge import generate_outputs
from test.unit.tasks.test_regression import get_synthetic_regression_data
def generate_straight_pipeline():
""" Simple linear pipeline """
node_scaling = PrimaryNode('scaling')
node_ridge = SecondaryNode('ridge', nodes_from=[node_scaling])
node_linear = SecondaryNode('linear', nodes_from=[node_ridge])
pipeline = Pipeline(node_linear)
return pipeline
def test_parent_mask_correct():
""" Test correctness of function for tables mask generation """
correct_parent_mask = {'input_ids': [0, 1], 'flow_lens': [1, 0]}
# Generates outputs with 1 column in prediction
list_with_outputs, idx_1, idx_2 = generate_outputs()
# Calculate parent mask from outputs
data_info = SupplementaryData()
data_info.prepare_parent_mask(list_with_outputs)
p_mask = data_info.features_mask
assert tuple(p_mask['input_ids']) == tuple(correct_parent_mask['input_ids'])
assert tuple(p_mask['flow_lens']) == tuple(correct_parent_mask['flow_lens'])
def test_calculate_data_flow_len_correct():
""" Function checks whether the number of nodes visited by the data block
is calculated correctly """
# Pipeline consists of 3 nodes
simple_pipeline = generate_straight_pipeline()
data = get_synthetic_regression_data(n_samples=100, n_features=2)
simple_pipeline.fit(data)
predict_output = simple_pipeline.predict(data)
assert predict_output.supplementary_data.data_flow_length == 2
def test_get_compound_mask_correct():
""" Checking whether the procedure for combining lists with keys is
performed correctly for features_mask """
synthetic_mask = {'input_ids': [0, 0, 1, 1],
'flow_lens': [1, 1, 0, 0]}
output_example = OutputData(idx=[0, 0], features=[0, 0], predict=[0, 0],
task=Task(TaskTypesEnum.regression),
target=[0, 0], data_type=DataTypesEnum.table,
supplementary_data=SupplementaryData(features_mask=synthetic_mask))
mask = output_example.supplementary_data.get_compound_mask()
assert ('01', '01', '10', '10') == tuple(mask)
| 40.619048 | 99 | 0.724893 |
ffcd10a406c5b40eb5476b2bbaae8f6185d1c22d | 6,975 | py | Python | python/build.py | strongoier/test_actions | 0373e78f139b66c69861fce1cf1efa7e60ddeb00 | [
"MIT"
] | 1 | 2022-01-29T11:59:50.000Z | 2022-01-29T11:59:50.000Z | python/build.py | strongoier/test_actions | 0373e78f139b66c69861fce1cf1efa7e60ddeb00 | [
"MIT"
] | null | null | null | python/build.py | strongoier/test_actions | 0373e78f139b66c69861fce1cf1efa7e60ddeb00 | [
"MIT"
] | null | null | null | import argparse
import os
import platform
import re
import shutil
import sys
import taichi as ti
def get_os_name():
name = platform.platform()
# in python 3.8, platform.platform() uses mac_ver() on macOS
# it will return 'macOS-XXXX' instead of 'Darwin-XXXX'
if name.lower().startswith('darwin') or name.lower().startswith('macos'):
return 'osx'
elif name.lower().startswith('windows'):
return 'win'
elif name.lower().startswith('linux'):
return 'linux'
assert False, "Unknown platform name %s" % name
def get_python_executable():
return '"' + sys.executable.replace('\\', '/') + '"'
def build(project_name):
"""Build and package the wheel file in `python/dist`"""
if platform.system() == 'Linux':
if re.search("^clang\+\+-*\d*", str(os.environ.get('CXX'))) is None:
raise RuntimeError(
'Only the wheel with clang will be released to PyPI')
version = ti.core.get_version_string()
with open('../setup.py') as fin:
with open('setup.py', 'w') as fout:
print("project_name = '{}'".format(project_name), file=fout)
print("version = '{}'".format(version), file=fout)
for l in fin:
print(l, file=fout, end='')
print("*** project_name = '{}'".format(project_name))
try:
os.remove('taichi/CHANGELOG.md')
except FileNotFoundError:
pass
shutil.rmtree('taichi/lib', ignore_errors=True)
shutil.rmtree('taichi/tests', ignore_errors=True)
shutil.rmtree('taichi/examples', ignore_errors=True)
shutil.rmtree('taichi/assets', ignore_errors=True)
os.makedirs('taichi/lib', exist_ok=True)
shutil.rmtree('build', ignore_errors=True)
shutil.rmtree('dist', ignore_errors=True)
shutil.rmtree('taichi/include', ignore_errors=True)
# shutil.copytree('../include/', 'taichi/include')
build_dir = '../build'
if get_os_name() == 'linux':
shutil.copy('../build/libtaichi_core.so', 'taichi/lib/taichi_core.so')
elif get_os_name() == 'osx':
shutil.copy('../build/libtaichi_core.dylib',
'taichi/lib/taichi_core.so')
else:
shutil.copy('../runtimes/RelWithDebInfo/taichi_core.dll',
'taichi/lib/taichi_core.pyd')
os.system(f'cd .. && {get_python_executable()} -m taichi changelog --save')
try:
with open('../CHANGELOG.md') as f:
print(f.read())
except FileNotFoundError:
print('CHANGELOG.md not found')
pass
try:
shutil.copy('../CHANGELOG.md', './taichi/CHANGELOG.md')
except FileNotFoundError:
pass
shutil.copytree('../tests/python', './taichi/tests')
shutil.copytree('../examples', './taichi/examples')
shutil.copytree('../external/assets', './taichi/assets')
if get_os_name() != 'osx':
libdevice_path = ti.core.libdevice_path()
print("copying libdevice:", libdevice_path)
assert os.path.exists(libdevice_path)
shutil.copy(libdevice_path, 'taichi/lib/slim_libdevice.10.bc')
ti.core.compile_runtimes()
runtime_dir = ti.core.get_runtime_dir()
for f in os.listdir(runtime_dir):
if f.startswith('runtime_') and f.endswith('.bc'):
print(f"Fetching runtime file {f}")
shutil.copy(os.path.join(runtime_dir, f), 'taichi/lib')
print("Using python executable", get_python_executable())
os.system(
'{} -m pip install --user --upgrade twine setuptools wheel'.format(
get_python_executable()))
if get_os_name() == 'linux':
os.system('{} setup.py bdist_wheel -p manylinux1_x86_64'.format(
get_python_executable()))
else:
os.system('{} setup.py bdist_wheel'.format(get_python_executable()))
shutil.rmtree('taichi/lib')
shutil.rmtree('taichi/tests')
shutil.rmtree('taichi/examples')
shutil.rmtree('taichi/assets')
try:
os.remove('taichi/CHANGELOG.md')
except FileNotFoundError:
pass
shutil.rmtree('./build')
def parse_args():
parser = argparse.ArgumentParser(description=(
'Build and uploads wheels to PyPI. Make sure to run this script '
'inside `python/`'))
parser.add_argument('mode',
type=str,
default='',
help=('Choose one of the modes: '
'[build, test, try_upload, upload]'))
parser.add_argument('--skip_build',
action='store_true',
help=('Skip the build process if this is enabled'))
parser.add_argument('--testpypi',
action='store_true',
help='Upload to test server if this is enabled')
parser.add_argument('--project_name',
action='store',
dest='project_name',
default='taichi',
help='Set the project name')
return parser.parse_args()
def main():
args = parse_args()
mode = args.mode
pypi_user = '__token__'
pypi_repo = ''
project_name = args.project_name
env_pypi_pwd = os.environ.get('PYPI_PWD', '')
if mode == 'try_upload':
if env_pypi_pwd == '':
print("Missing environment variable PYPI_PWD")
print("Giving up and exiting 0 [try_upload mode]")
exit(0)
mode = 'upload'
if mode == 'upload' and env_pypi_pwd == '':
raise RuntimeError("Missing environment variable PYPI_PWD")
os.environ['TWINE_PASSWORD'] = env_pypi_pwd
if mode == 'upload' and args.testpypi:
pypi_repo = '--repository testpypi'
if not args.skip_build:
build(project_name)
if mode == 'build':
return
elif mode == 'upload':
os.system('{} -m twine upload {} dist/* --verbose -u {}'.format(
get_python_executable(), pypi_repo, pypi_user))
elif mode == 'test':
print('Uninstalling old taichi packages...')
os.system(f'{get_python_executable()} -m pip uninstall taichi-nightly')
os.system(f'{get_python_executable()} -m pip uninstall taichi')
dists = os.listdir('dist')
assert len(dists) == 1
dist = dists[0]
print('Installing ', dist)
os.environ['PYTHONPATH'] = ''
os.makedirs('test_env', exist_ok=True)
os.system('cd test_env && {} -m pip install ../dist/{} --user'.format(
get_python_executable(), dist))
print('Entering test environment...')
if get_os_name() == 'win':
os.system(
'cmd /V /C "set PYTHONPATH=&& set TAICHI_REPO_DIR=&& cd test_env && cmd"'
)
else:
os.system(
'cd test_env && PYTHONPATH= TAICHI_REPO_DIR= bash --noprofile --norc '
)
else:
raise ValueError("Unknown mode: %s" % mode)
if __name__ == '__main__':
main()
| 34.529703 | 89 | 0.590108 |
06c2b20a1de74f33dbdffe6b8104f9c080bdf429 | 4,184 | py | Python | backend/functions/byu-covid-scraping/lambda_function.py | eabrouwer3/ebrouwer.dev | a7c51de7afaac6a2e5e550abb7d0435bc392cb4f | [
"MIT"
] | null | null | null | backend/functions/byu-covid-scraping/lambda_function.py | eabrouwer3/ebrouwer.dev | a7c51de7afaac6a2e5e550abb7d0435bc392cb4f | [
"MIT"
] | null | null | null | backend/functions/byu-covid-scraping/lambda_function.py | eabrouwer3/ebrouwer.dev | a7c51de7afaac6a2e5e550abb7d0435bc392cb4f | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import re
import datetime
from itertools import accumulate
from toolz.curried import assoc
import os
from decimal import Decimal
import boto3
from dateutil import parser
DDB = boto3.client('dynamodb')
TOTAL_TABLE = os.environ.get('TOTAL_TABLE')
AVERAGE_TABLE = os.environ.get('AVERAGE_TABLE')
SIR_TABLE = os.environ.get('SIR_TABLE')
def get_soup():
resp = requests.get('https://www.byu.edu/coronavirus/covid-19-screening')
return BeautifulSoup(resp.text, 'html.parser')
def get_total_pop(soup):
str_re = re.compile(r'There are ([0-9,]+) people in the campus community this fall semester\.')
text = str(soup.find(string=str_re))
count = int(re.search(str_re, text).group(1).replace(',', ''))
return count
def get_IR(soup):
table = soup.find_all(name='table')[0]
def get_data(row):
cols = row.find_all('td')
percent = str(cols[2].string or '').replace('%', '') or None
return {
'count': int(cols[1].string),
'percent': percent and str(Decimal(percent) / 100)
}
rows = table.find_all(name='tr')[1:3]
data = tuple(map(get_data, rows))
return data
def get_IR_date(soup):
str_re = re.compile(r'as of (.*)\)')
text = str(soup.find(string=str_re))
date_str = re.search(str_re, text).group(1)
return parser.parse(date_str).date().isoformat()
def parse_weekly_table(soup):
table = soup.find_all(name='table')[2]
rows = table.find_all(name='tr')[1:]
def get_data(row):
cols = row.find_all('td')
date = re.search(r'[0-9]+/[0-9]+–([0-9]+)/([0-9]+)', str(cols[0].string))
year = datetime.datetime.now().year
month = int(date.group(1))
day = int(date.group(2))
average = Decimal(cols[1].string) / 100
total = int(cols[2].string)
return {
'date': f'{year}-{month:02}-{day:02}',
'average': str(average),
'total': total
}
data = list(map(get_data, rows))
return data
def handler(event=None, context=None, callback=None):
soup = get_soup()
total_pop = get_total_pop(soup)
print(total_pop)
I, R = get_IR(soup)
IR_date = get_IR_date(soup)
S_count = total_pop - I['count'] - R['count']
S = {
'count': S_count,
'percent': S_count / total_pop
}
print(I, R)
weekly = parse_weekly_table(soup)
print(weekly)
accumulated = list(accumulate(weekly, lambda a, b: assoc(b, 'total', b['total'] + a['total'])))
print(accumulated)
# Save Totals
for row in accumulated:
DDB.put_item(
TableName=TOTAL_TABLE,
Item={
"caseCount": {
"N": str(row['total'])
},
"date": {
"S": row['date']
}
}
)
DDB.put_item(
TableName=TOTAL_TABLE,
Item={
"caseCount": {
"N": str(I['count'] + R['count'])
},
"date": {
"S": IR_date
}
}
)
# Save Averages
for row in accumulated:
DDB.put_item(
TableName=AVERAGE_TABLE,
Item={
"average": {
"N": row['average']
},
"date": {
"S": row['date']
}
}
)
# Save SIR
DDB.put_item(
TableName=SIR_TABLE,
Item={
"date": {
"S": IR_date
},
"susceptibleCount": {
"N": str(S['count'])
},
"susceptiblePercent": {
"N": str(S['percent'])
},
"infectedCount": {
"N": str(I['count'])
},
"infectedPercent": {
"N": str(I['percent'] or I['count']/total_pop)
},
"recoveredCount": {
"N": str(R['count'])
},
"recoveredPercent": {
"N": str(R['percent'] or R['count']/total_pop)
}
}
)
| 26.820513 | 99 | 0.503107 |
95d4fa6cbf876ddc4e0ad77dbaa0fdfa28f1bd43 | 2,083 | py | Python | spark_scripts/rabbitmq_to_kafka.py | LSS-USP/interscity-traffic-anomaly-experiment | 8f218ab7b9ff0e065323162d1580e00543e9ebb2 | [
"Apache-2.0"
] | null | null | null | spark_scripts/rabbitmq_to_kafka.py | LSS-USP/interscity-traffic-anomaly-experiment | 8f218ab7b9ff0e065323162d1580e00543e9ebb2 | [
"Apache-2.0"
] | 1 | 2018-06-05T12:58:12.000Z | 2018-06-05T12:58:12.000Z | spark_scripts/rabbitmq_to_kafka.py | LSS-USP/interscity-traffic-anomaly-experiment | 8f218ab7b9ff0e065323162d1580e00543e9ebb2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from kafka import KafkaProducer
import pika
from xml.dom import minidom
import json
producer = KafkaProducer(bootstrap_servers='localhost:9092')
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='data_stream',
exchange_type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='data_stream',
routing_key = '*.current_location.simulated',
queue=queue_name)
print(' [*] Waiting for logs. To exit press CTRL+C')
def load_edges():
dom = minidom.parse("map.xml")\
.getElementsByTagName('link')
results = {}
for u in dom:
results[int(u.getAttribute('id'))] = float(u.getAttribute('length'))
return results
db = {}
edges = load_edges()
# print("edges => ", edges)
print("Edges loading completed...")
def callback(ch, method, properties, body):
payload = json.loads(body)
prev_point = db.get(payload["uuid"])
if (prev_point != None):
prev_tick, prev_edge_id = prev_point
new_tick, edge_id = (payload["tick"], payload["nodeID"])
if (new_tick > prev_tick):
print("edge_id => ", edge_id)
edge_length = edges.get(int(edge_id), None)
if (edge_length == None):
print("%")
return
velocity_data = {
"edge_id": edge_id,
"avg_speed": edge_length / (new_tick - prev_tick)
}
print("Posting this data to Kafka: ", velocity_data)
producer.send('data_stream', json.dumps(velocity_data).encode())
else:
print("Wrong tick arrived! WARNING")
else:
print("X")
db[payload["uuid"]] = (payload["tick"], payload["nodeID"])
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
print("Queue consuming starting...")
channel.start_consuming()
| 28.148649 | 81 | 0.610658 |
260fe596a39f28c22f0328d1dea9752e20ae124f | 4,403 | py | Python | lldb/test/API/functionalities/thread/exit_during_expression/TestExitDuringExpression.py | elizabethandrews/llvm | 308498236c1c4778fdcba0bfbb556adf8aa333ea | [
"Apache-2.0"
] | 305 | 2019-09-14T17:16:05.000Z | 2022-03-31T15:05:20.000Z | lldb/test/API/functionalities/thread/exit_during_expression/TestExitDuringExpression.py | elizabethandrews/llvm | 308498236c1c4778fdcba0bfbb556adf8aa333ea | [
"Apache-2.0"
] | 11 | 2019-10-17T21:11:52.000Z | 2022-02-17T20:10:00.000Z | lldb/test/API/functionalities/thread/exit_during_expression/TestExitDuringExpression.py | elizabethandrews/llvm | 308498236c1c4778fdcba0bfbb556adf8aa333ea | [
"Apache-2.0"
] | 24 | 2019-10-03T11:22:11.000Z | 2022-01-25T09:59:30.000Z | """
Make sure that we handle an expression on a thread, if
the thread exits while the expression is running.
"""
import lldb
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class TestExitDuringExpression(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfWindows
def test_exit_before_one_thread_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(True, True)
@skipIfWindows
def test_exit_before_one_thread_no_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(True, False)
@skipIfWindows
def test_exit_after_one_thread_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(False, True)
@skipIfWindows
def test_exit_after_one_thread_no_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(False, False)
def setUp(self):
TestBase.setUp(self)
self.main_source_file = lldb.SBFileSpec("main.c")
self.build()
@skipIfReproducer # Timeouts are not currently modeled.
def exiting_expression_test(self, before_one_thread_timeout , unwind):
"""function_to_call sleeps for g_timeout microseconds, then calls pthread_exit.
This test calls function_to_call with an overall timeout of 500
microseconds, and a one_thread_timeout as passed in.
It also sets unwind_on_exit for the call to the unwind passed in.
This allows you to have the thread exit either before the one thread
timeout is passed. """
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
"Break here and cause the thread to exit", self.main_source_file)
# We'll continue to this breakpoint after running our expression:
return_bkpt = target.BreakpointCreateBySourceRegex("Break here to make sure the thread exited", self.main_source_file)
frame = thread.frames[0]
tid = thread.GetThreadID()
# Find the timeout:
var_options = lldb.SBVariablesOptions()
var_options.SetIncludeArguments(False)
var_options.SetIncludeLocals(False)
var_options.SetIncludeStatics(True)
value_list = frame.GetVariables(var_options)
g_timeout = value_list.GetFirstValueByName("g_timeout")
self.assertTrue(g_timeout.IsValid(), "Found g_timeout")
error = lldb.SBError()
timeout_value = g_timeout.GetValueAsUnsigned(error)
self.assertTrue(error.Success(), "Couldn't get timeout value: %s"%(error.GetCString()))
one_thread_timeout = 0
if (before_one_thread_timeout):
one_thread_timeout = timeout_value * 2
else:
one_thread_timeout = int(timeout_value / 2)
options = lldb.SBExpressionOptions()
options.SetUnwindOnError(unwind)
options.SetOneThreadTimeoutInMicroSeconds(one_thread_timeout)
options.SetTimeoutInMicroSeconds(4 * timeout_value)
result = frame.EvaluateExpression("function_to_call()", options)
# Make sure the thread actually exited:
thread = process.GetThreadByID(tid)
self.assertFalse(thread.IsValid(), "The thread exited")
# Make sure the expression failed:
self.assertFalse(result.GetError().Success(), "Expression failed.")
# Make sure we can keep going:
threads = lldbutil.continue_to_breakpoint(process, return_bkpt)
if not threads:
self.fail("didn't get any threads back after continuing")
self.assertEqual(len(threads), 1, "One thread hit our breakpoint")
thread = threads[0]
frame = thread.frames[0]
# Now get the return value, if we successfully caused the thread to exit
# it should be 10, not 20.
ret_val = frame.FindVariable("ret_val")
self.assertTrue(ret_val.GetError().Success(), "Found ret_val")
ret_val_value = ret_val.GetValueAsSigned(error)
self.assertTrue(error.Success(), "Got ret_val's value")
self.assertEqual(ret_val_value, 10, "We put the right value in ret_val")
| 40.768519 | 126 | 0.690893 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.