hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f1ff8268d2c598520fc1239b5d51ce064eb37aa
| 65
|
py
|
Python
|
without-attrs/conanfile.py
|
grisumbras/conan-promote
|
f1d746bbd134f64af0d9f6b4cf6d579c99980a1f
|
[
"BSL-1.0"
] | 1
|
2020-02-02T16:39:43.000Z
|
2020-02-02T16:39:43.000Z
|
without-attrs/conanfile.py
|
grisumbras/conan-promote
|
f1d746bbd134f64af0d9f6b4cf6d579c99980a1f
|
[
"BSL-1.0"
] | null | null | null |
without-attrs/conanfile.py
|
grisumbras/conan-promote
|
f1d746bbd134f64af0d9f6b4cf6d579c99980a1f
|
[
"BSL-1.0"
] | null | null | null |
from conans import ConanFile
class MyConan(ConanFile):
pass
| 13
| 28
| 0.769231
|
9eba8787d82043df9e5bd74786b24722dc5210d4
| 13,367
|
py
|
Python
|
mutatery/__init__.py
|
serin-delaunay/mutatery-nngm-2019
|
b2786c81726192cb9455fd31153e9021f202d262
|
[
"Apache-2.0"
] | 1
|
2020-10-12T21:09:41.000Z
|
2020-10-12T21:09:41.000Z
|
mutatery/__init__.py
|
serin-delaunay/mutatery-nngm-2019
|
b2786c81726192cb9455fd31153e9021f202d262
|
[
"Apache-2.0"
] | null | null | null |
mutatery/__init__.py
|
serin-delaunay/mutatery-nngm-2019
|
b2786c81726192cb9455fd31153e9021f202d262
|
[
"Apache-2.0"
] | null | null | null |
import re
import random
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
basestring = basestring
class Node(object):
def __init__(self, parent, child_index, settings):
self.errors = []
if settings.get('raw', None) is None:
self.errors.append("Empty input for node")
settings['raw'] = ""
if isinstance(parent, Grammar):
self.grammar = parent
self.parent = None
self.depth = 0
self.child_index = 0
else:
self.grammar = parent.grammar
self.parent = parent
self.depth = parent.depth + 1
self.child_index = child_index
self.raw = settings['raw']
self.type = settings.get('type', None)
self.is_expanded = False
def expand_children(self, child_rule, path, prevent_recursion=False):
self.children = []
self.finished_text = ""
self.child_rule = child_rule
if self.child_rule is not None:
sections, errors = parse(child_rule)
self.errors.extend(errors)
for i, section in enumerate(sections):
node = Node(self, i, section)
self.children.append(node)
if not prevent_recursion:
node.expand(path + (i,), prevent_recursion)
self.finished_text += node.finished_text
else:
self.errors.append("No child rule provided, can't expand children")
def expand(self, path, prevent_recursion=False):
if not self.is_expanded:
self.is_expanded = True
self.expansion_errors = []
# Types of nodes
# -1: raw, needs parsing
# 0: Plaintext
# 1: Tag ("#symbol.mod.mod2.mod3#" or
# "#[pushTarget:pushRule]symbol.mod")
# 2: Action ("[pushTarget:pushRule], [pushTarget:POP]",
# more in the future)
if self.type == -1:
self.expand_children(self.raw, path, prevent_recursion)
elif self.type == 0:
self.finished_text = self.raw
elif self.type == 1:
self.preactions = []
self.postactions = []
parsed = parse_tag(self.raw)
self.symbol = parsed['symbol']
self.modifiers = parsed['modifiers']
for preaction in parsed['preactions']:
self.preactions.append(NodeAction(self, preaction['raw']))
for preaction in self.preactions:
if preaction.type == 0:
self.postactions.append(preaction.create_undo())
for i, preaction in enumerate(self.preactions, 1):
preaction.activate(path + (-i,))
self.finished_text = self.raw
selected_rule = self.grammar.select_rule(self.symbol, self,
self.errors, path)
self.expand_children(selected_rule, path, prevent_recursion)
# apply modifiers
for mod_name in self.modifiers:
mod_params = []
if mod_name.find('(') > 0:
regexp = re.compile(r'\(([^)]+)\)')
matches = regexp.findall(mod_name)
if len(matches) > 0:
mod_params = matches[0].split(",")
mod_name = mod_name[:mod_name.find('(')]
mod = self.grammar.modifiers.get(mod_name, None)
if mod is None:
self.errors.append("Missing modifier " + mod_name)
self.finished_text += "((." + mod_name + "))"
else:
self.finished_text = mod(self.finished_text,
*mod_params)
elif self.type == 2:
self.action = NodeAction(self, self.raw)
self.action.activate(path + (-1,))
self.finished_text = ""
def clear_escape_chars(self):
self.finished_text = self.finished_text.replace(
"\\\\", "DOUBLEBACKSLASH").replace(
"\\", "").replace(
"DOUBLEBACKSLASH", "\\")
class NodeAction(object): # has a 'raw' attribute
def __init__(self, node, raw):
self.node = node
sections = raw.split(":")
self.target = sections[0]
if len(sections) == 1:
self.type = 2
else:
self.rule = sections[1]
if self.rule == "POP":
self.type = 1
else:
self.type = 0
def create_undo(self):
if self.type == 0:
return NodeAction(self.node, self.target + ":POP")
return None
def activate(self, path):
grammar = self.node.grammar
if self.type == 0:
self.rule_sections = self.rule.split(",")
self.finished_rules = []
self.rule_nodes = []
for rule_section in self.rule_sections:
n = Node(grammar, 0, {'type': -1, 'raw': rule_section})
n.expand(path)
self.finished_rules.append(n.finished_text)
grammar.push_rules(self.target, self.finished_rules, self)
elif self.type == 1:
grammar.pop_rules(self.target)
elif self.type == 2:
grammar.flatten(self.target, path, True)
def to_text(self): pass # FIXME
class RuleSet(object):
def __init__(self, grammar, raw):
self.raw = raw
self.grammar = grammar
self.default_uses = []
if isinstance(raw, list):
self.default_rules = raw
elif isinstance(raw, basestring):
self.default_rules = [raw]
else:
self.default_rules = []
def select_rule(self, path):
# in kate's code there's a bunch of stuff for different methods of
# selecting a rule, none of which seem to be implemented yet! so for
# now I'm just going to ...
if len(set(self.default_rules)) == 1:
return self.default_rules[0]
if path in self.grammar.choices:
return self.grammar.choices[path]
else:
if path == self.grammar.mutated_path:
remaining_rules = [r for r in self.default_rules if r != self.grammar.mutated_choice]
else:
remaining_rules = self.default_rules
result = random.choice(remaining_rules)
self.grammar.choices[path] = result
return result
def clear_state(self):
self.default_uses = []
class Symbol(object):
def __init__(self, grammar, key, raw_rules):
self.grammar = grammar
self.key = key
self.raw_rules = raw_rules
self.base_rules = RuleSet(grammar, raw_rules)
self.clear_state()
def clear_state(self):
self.stack = [self.base_rules]
self.uses = []
self.base_rules.clear_state()
def push_rules(self, raw_rules):
rules = RuleSet(self.grammar, raw_rules)
self.stack.append(rules)
def pop_rules(self):
self.stack.pop()
def select_rule(self, node, errors, path):
self.uses.append({'node': node})
if len(self.stack) == 0:
errors.append("The rule stack for '" + self.key +
"' is empty, too many pops?")
return self.stack[-1].select_rule(path)
# def get_active_rules(self, path):
# if len(self.stack) == 0:
# return None
# return self.stack[-1].select_rule(path)
class Grammar(object):
def __init__(self, raw, settings=None):
self.modifiers = {}
self.load_from_raw_obj(raw)
self.errors = []
self.choices = {}
self.mutated_path = None
self.mutated_choice = None
if settings is None:
self.settings = {}
def clear_state(self):
for val in self.symbols.values():
val.clear_state()
def add_modifiers(self, mods):
# not sure what this is for yet
for key in mods:
self.modifiers[key] = mods[key]
def load_from_raw_obj(self, raw):
self.raw = raw
self.symbols = dict()
self.subgrammars = list()
if raw:
self.symbols = dict(
(k, Symbol(self, k, v)) for k, v in raw.items())
def create_root(self, rule):
return Node(self, 0, {'type': -1, 'raw': rule})
def expand(self, rule, path, allow_escape_chars=False):
root = self.create_root(rule)
root.expand(path)
if not allow_escape_chars:
root.clear_escape_chars()
self.errors.extend(root.errors)
return root
def flatten(self, rule, path=(), allow_escape_chars=False):
root = self.expand(rule, path, allow_escape_chars)
return root.finished_text
def push_rules(self, key, raw_rules, source_action=None):
if key not in self.symbols:
self.symbols[key] = Symbol(self, key, raw_rules)
else:
self.symbols[key].push_rules(raw_rules)
def pop_rules(self, key):
if key not in self.symbols:
self.errors.append("Can't pop: no symbol for key " + key)
else:
self.symbols[key].pop_rules()
def select_rule(self, key, node, errors, path):
if key in self.symbols:
return self.symbols[key].select_rule(node, errors, path)
else:
if key is None:
key = str(None)
self.errors.append("No symbol for " + key)
return "((" + key + "))"
def parse_tag(tag_contents):
"""
returns a dictionary with 'symbol', 'modifiers', 'preactions',
'postactions'
"""
parsed = dict(
symbol=None,
preactions=[],
postactions=[],
modifiers=[])
sections, errors = parse(tag_contents)
symbol_section = None
for section in sections:
if section['type'] == 0:
if symbol_section is None:
symbol_section = section['raw']
else:
raise Exception("multiple main sections in " + tag_contents)
else:
parsed['preactions'].append(section)
if symbol_section is not None:
components = symbol_section.split(".")
parsed['symbol'] = components[0]
parsed['modifiers'] = components[1:]
return parsed
def parse(rule):
depth = 0
in_tag = False
sections = list()
escaped = False
errors = []
start = 0
escaped_substring = ""
last_escaped_char = None
if rule is None:
return sections
def create_section(start, end, type_):
if end - start < 1:
if type_ == 1:
errors.append(str(start) + ": empty tag")
elif type_ == 2:
errors.append(str(start) + ": empty action")
raw_substring = None
if last_escaped_char is not None:
raw_substring = escaped_substring + "\\" + \
rule[last_escaped_char+1:end]
else:
raw_substring = rule[start:end]
sections.append({'type': type_, 'raw': raw_substring})
for i, c in enumerate(rule):
if not escaped:
if c == '[':
if depth == 0 and not in_tag:
if start < i:
create_section(start, i, 0)
last_escaped_char = None
escaped_substring = ""
start = i + 1
depth += 1
elif c == ']':
depth -= 1
if depth == 0 and not in_tag:
create_section(start, i, 2)
last_escaped_char = None
escaped_substring = ""
start = i + 1
elif c == '#':
if depth == 0:
if in_tag:
create_section(start, i, 1)
last_escaped_char = None
escaped_substring = ""
start = i + 1
else:
if start < i:
create_section(start, i, 0)
last_escaped_char = None
escaped_substring = ""
start = i + 1
in_tag = not in_tag
elif c == '\\':
escaped = True
escaped_substring = escaped_substring + rule[start:i]
start = i + 1
last_escaped_char = i
else:
escaped = False
if start < len(rule):
create_section(start, len(rule), 0)
last_escaped_char = None
escaped_substring = ""
if in_tag:
errors.append("unclosed tag")
if depth > 0:
errors.append("too many [")
if depth < 0:
errors.append("too many ]")
sections = [s for s in sections
if not(s['type'] == 0 and len(s['raw']) == 0)]
return sections, errors
| 34.09949
| 101
| 0.519638
|
d4503729db5fccc927b8b0d22bf5cdd92e29502f
| 7,603
|
py
|
Python
|
tests/api/apitestbase.py
|
MJochim/seahub
|
66fcc6772511d43346a2980613576c5fdb4c4945
|
[
"Apache-2.0"
] | 420
|
2015-01-03T11:34:46.000Z
|
2022-03-10T07:15:41.000Z
|
tests/api/apitestbase.py
|
MJochim/seahub
|
66fcc6772511d43346a2980613576c5fdb4c4945
|
[
"Apache-2.0"
] | 735
|
2015-01-04T21:22:51.000Z
|
2022-03-31T09:26:07.000Z
|
tests/api/apitestbase.py
|
MJochim/seahub
|
66fcc6772511d43346a2980613576c5fdb4c4945
|
[
"Apache-2.0"
] | 379
|
2015-01-05T17:08:03.000Z
|
2022-03-06T00:11:50.000Z
|
#coding: UTF-8
import requests
import unittest
from contextlib import contextmanager
from nose.tools import assert_equal, assert_in # pylint: disable=E0611
from urllib.parse import quote
from tests.common.common import USERNAME, PASSWORD, \
ADMIN_USERNAME, ADMIN_PASSWORD
from tests.common.utils import apiurl, urljoin, randstring
from tests.api.urls import TOKEN_URL, GROUPS_URL, ACCOUNTS_URL, REPOS_URL
class ApiTestBase(unittest.TestCase):
_token = None
_admin_token = None
username = USERNAME
password = PASSWORD
admin_username = ADMIN_USERNAME
admin_password = ADMIN_PASSWORD
@classmethod
def get(cls, *args, **kwargs):
return cls._req('GET', *args, **kwargs)
@classmethod
def post(cls, *args, **kwargs):
return cls._req('POST', *args, **kwargs)
@classmethod
def put(cls, *args, **kwargs):
return cls._req('PUT', *args, **kwargs)
@classmethod
def delete(cls, *args, **kwargs):
return cls._req('DELETE', *args, **kwargs)
@classmethod
def admin_get(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.get(*args, **kwargs)
@classmethod
def admin_post(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.post(*args, **kwargs)
@classmethod
def admin_put(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.put(*args, **kwargs)
@classmethod
def admin_delete(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.delete(*args, **kwargs)
@classmethod
def _req(cls, method, *args, **kwargs):
use_token = kwargs.pop('use_token', True)
token = kwargs.pop('token', None)
if use_token and token is None:
admin = kwargs.pop('admin', False)
if admin:
if cls._admin_token is None:
cls._admin_token = get_auth_token(ADMIN_USERNAME,
ADMIN_PASSWORD)
token = cls._admin_token
else:
if cls._token is None:
cls._token = get_auth_token(USERNAME, PASSWORD)
token = cls._token
if use_token:
headers = kwargs.get('headers', {})
headers.setdefault('Authorization', 'Token ' + token)
kwargs['headers'] = headers
expected = kwargs.pop('expected', 200)
resp = requests.request(method, *args, **kwargs)
if expected is not None:
if hasattr(expected, '__iter__'):
assert_in(resp.status_code, expected,
"Expected http status in %s, received %s" % (expected,
resp.status_code))
else:
assert_equal(resp.status_code, expected,
"Expected http status %s, received %s" % (expected,
resp.status_code))
return resp
def assertHasLen(self, lst, length):
"""
Assert a list/tuple/string has exact `length`
"""
msg = 'Expected to have length %s, but length is %s' \
% (length, len(lst))
self.assertEqual(len(lst), length, msg)
def assertNotEmpty(self, lst):
"""
Assert a list/tuple/string is not empty
"""
msg = 'Expected not empty, but it is'
self.assertGreater(len(lst), 0, msg)
@contextmanager
def get_tmp_repo(self):
"""
Context manager to create a tmp repo, and automatically delete it after use
with self.tmp_repo() as repo:
self.get(repo.file_url + '?p=/')
"""
repo = self.create_repo()
try:
yield repo
finally:
self.remove_repo(repo.repo_id)
@contextmanager
def get_tmp_group(self):
"""
Context manager to create a tmp group, and automatically delete it after use
with self.tmp_repo() as repo:
self.get(repo.file_url + '?p=/')
"""
group = self.create_group()
try:
yield group
finally:
self.remove_group(group.group_id)
@contextmanager
def get_tmp_user(self):
"""
Context manager to create a tmp user, and automatically delete it after use
with self.get_tmp_user() as user:
...
"""
user = self.create_user()
try:
yield user
finally:
self.remove_user(user.user_name)
def create_repo(self):
repo_name = '测试-test-repo-%s' % randstring(6)
data = {
'name': repo_name,
'desc': 'just for test - 测试用资料库',
}
repo = self.post(REPOS_URL, data=data).json()
repo_id = repo['repo_id']
return _Repo(repo_id)
def remove_repo(self, repo_id):
repo_url = urljoin(REPOS_URL, repo_id)
self.delete(repo_url)
def create_group(self):
group_name = '测试群组-%s' % randstring(16)
data = {'group_name': group_name}
group_id = self.put(GROUPS_URL, data=data).json()['group_id']
return _Group(group_name, group_id)
def remove_group(self, group_id):
group_url = urljoin(GROUPS_URL, str(group_id))
self.delete(group_url)
def create_user(self):
username = '%s@test.com' % randstring(20)
password = randstring(20)
data = {'password': password}
self.admin_put(urljoin(ACCOUNTS_URL, username), data=data, expected=201)
return _User(username, password)
def remove_user(self, username):
user_url = urljoin(ACCOUNTS_URL, username)
self.admin_delete(user_url)
def create_file(self, repo, fname=None):
if isinstance(repo, str):
repo = _Repo(repo)
fname = fname or ('文件 %s.txt' % randstring())
furl = repo.get_filepath_url('/' + fname)
data = {'operation': 'create'}
res = self.post(furl, data=data, expected=201)
self.assertEqual(res.text, '"success"')
return fname, furl
def create_dir(self, repo):
data = {'operation': 'mkdir'}
dpath = '/目录 %s' % randstring()
durl = repo.get_dirpath_url(dpath)
res = self.post(durl, data=data, expected=201)
self.assertEqual(res.text, '"success"')
return dpath, durl
def get_auth_token(username, password):
data = {
'username': username,
'password': password,
'platform': 'linux',
'device_id': '701143c1238e6736b61c20e73de82fc95989c413',
'device_name': 'test',
}
res = requests.post(TOKEN_URL, data=data)
assert_equal(res.status_code, 200)
token = res.json()['token']
assert_equal(len(token), 40)
return token
class _Repo(object):
def __init__(self, repo_id):
self.repo_id = repo_id
self.repo_url = urljoin(REPOS_URL, self.repo_id)
self.file_url = urljoin(self.repo_url, 'file')
self.dir_url = urljoin(self.repo_url, 'dir')
def get_filepath_url(self, path):
query = '?p=%s' % quote(path)
return self.file_url + query
def get_dirpath_url(self, path):
query = '?p=%s' % quote(path)
return self.dir_url + query
class _Group(object):
def __init__(self, group_name, group_id):
self.group_name = group_name
self.group_id = group_id
self.group_url = urljoin(GROUPS_URL, str(self.group_id))
class _User(object):
def __init__(self, username, password):
self.user_name = username
self.password = password
self.user_url = urljoin(ACCOUNTS_URL, username)
| 31.032653
| 84
| 0.589504
|
df426a335732ebe029328ffce41cd068fc432063
| 4,369
|
py
|
Python
|
model_pruning/python/pruning_interface_test.py
|
TokyoYoshida/google-research
|
102b6a5cdb85acd490e23831b643fc679cbb8db5
|
[
"Apache-2.0"
] | 1
|
2021-02-22T23:19:11.000Z
|
2021-02-22T23:19:11.000Z
|
model_pruning/python/pruning_interface_test.py
|
TokyoYoshida/google-research
|
102b6a5cdb85acd490e23831b643fc679cbb8db5
|
[
"Apache-2.0"
] | null | null | null |
model_pruning/python/pruning_interface_test.py
|
TokyoYoshida/google-research
|
102b6a5cdb85acd490e23831b643fc679cbb8db5
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for pruning_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from model_pruning.python import pruning
from model_pruning.python import pruning_interface
class MockWeightInit(object):
"""Mock class providing weight initialization config params."""
@staticmethod
def Constant(scale):
"""Constant initialization."""
return {"scale": scale}
class MockLSTMVars(object):
"""Mock LSTM vars."""
def __init__(self):
self.wm = None
self.mask = None
self.threshold = None
class MockLSTMCell(object):
"""Mock LSTM cell."""
def __init__(self):
self._private_vars = {}
self._private_theta = {}
self.vars = MockLSTMVars()
def CreateVariable(self, name, var_params, theta_fn=None, trainable=False):
dtype = var_params["dtype"]
shape = var_params["shape"]
scale = var_params["init"]["scale"]
v_init = tf.constant_initializer(value=scale, dtype=dtype)
with tf.variable_scope("MockLSTMCell"):
var = tf.get_variable(name, shape, dtype, v_init, trainable=trainable)
value = var
if theta_fn is not None:
value = theta_fn(value)
self._private_vars[name] = var
self._private_theta[name] = value
if name == "wm":
self.vars.wm = var
elif name == "mask":
self.vars.mask = var
elif name == "threshold":
self.vars.threshold = var
else:
raise ValueError("name {} is not supported".format(name))
class PruningSpeechUtilsTest(tf.test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"sparsity_function_end_step=100", "target_sparsity=0.9",
"weight_sparsity_map=[conv1:0.8,conv2/kernel:0.8]",
"block_dims_map=[dense1:4x4,dense2:1x4]"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningSpeechUtilsTest, self).setUp()
# Add global step variable to the graph
self.global_step = tf.train.get_or_create_global_step()
# Add sparsity
self.sparsity = tf.Variable(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
self.pruning_obj = pruning.Pruning(
self.pruning_hparams, global_step=self.global_step)
self.compression_obj = pruning_interface.get_matrix_compression_object(
self.pruning_hparams, global_step=self.global_step)
def MockWeightParamsFn(shape, init=None, dtype=None):
if init is None:
init = MockWeightInit.Constant(0.0)
if dtype is None:
dtype = tf.float32
return {"dtype": dtype, "shape": shape, "init": init}
self.mock_weight_params_fn = MockWeightParamsFn
self.mock_lstmobj = MockLSTMCell()
self.wm_pc = np.zeros((2, 2))
def testApplyPruningLSTM(self):
pruning_obj = pruning_interface.apply_pruning(
self.pruning_obj, self.pruning_hparams,
self.mock_weight_params_fn,
MockWeightInit,
self.mock_lstmobj,
self.wm_pc, tf.float32)
self.assertEqual(pruning_obj, self.pruning_obj)
def testGetPruningUpdate(self):
mask_update_op = pruning_interface.get_matrix_compression_update_op(
self.pruning_obj)
self.assertNotEqual(mask_update_op, tf.no_op())
def testApplyCustomizedLSTMMatrixCompression(self):
pruning_interface.apply_customized_lstm_matrix_compression(
self.compression_obj,
self.mock_weight_params_fn,
MockWeightInit,
self.mock_lstmobj,
self.wm_pc.shape, tf.float32)
self.assertGreater(len(tf.get_collection_ref(pruning.MASK_COLLECTION)), 0)
if __name__ == "__main__":
tf.test.main()
| 30.340278
| 78
| 0.710002
|
d4dce68b478d0f6d9015303931c6462ce579df7c
| 715
|
py
|
Python
|
iceworm/trees/tests/test_types.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | null | null | null |
iceworm/trees/tests/test_types.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T14:29:19.000Z
|
2021-01-19T14:34:27.000Z
|
iceworm/trees/tests/test_types.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | 1
|
2020-12-31T22:29:52.000Z
|
2020-12-31T22:29:52.000Z
|
from omnibus.serde import mapping as sm
from .. import nodes as no
from ..types import AstQuery
from ..types import Query
from ..types import StrQuery
def test_query():
s = 'abc'
d = sm.deserialize(s, Query)
assert d == StrQuery('abc')
s = sm.serialize(d, Query)
assert s == {'str_query': {'src': 'abc'}}
d = sm.deserialize(s, Query)
assert d == StrQuery('abc')
r = no.Select([
no.ExprSelectItem(
no.Integer(1))])
d = AstQuery(r)
s = sm.serialize(d, Query)
assert s == {'ast_query': {'root': {'select': {'items': [{'expr_select_item': {'value': {'integer': {'value': 1}}}}]}}}} # noqa
d = sm.deserialize(s, Query)
assert d == AstQuery(r)
| 27.5
| 132
| 0.586014
|
a9ce326d3de307f5765cd92b8c6ec2fef069979c
| 27
|
py
|
Python
|
lru_cache/__init__.py
|
lceames/lru-cache
|
9812edf5372d49f3719368d9f9962e27e03f5953
|
[
"MIT"
] | null | null | null |
lru_cache/__init__.py
|
lceames/lru-cache
|
9812edf5372d49f3719368d9f9962e27e03f5953
|
[
"MIT"
] | null | null | null |
lru_cache/__init__.py
|
lceames/lru-cache
|
9812edf5372d49f3719368d9f9962e27e03f5953
|
[
"MIT"
] | null | null | null |
from .cache import LRUCache
| 27
| 27
| 0.851852
|
f26132d9e5bc5e1a65585f0a18811268242fdb3b
| 17,827
|
py
|
Python
|
cwltool/schemas/v1.0/salad/schema_salad/schema.py
|
jeremiahsavage/cwltool
|
fe1c06f16f866fa2d1a4655f236b9a52b9564fac
|
[
"Apache-2.0"
] | 2
|
2017-07-06T13:25:23.000Z
|
2017-07-06T13:26:15.000Z
|
cwltool/schemas/v1.0/salad/schema_salad/schema.py
|
jeremiahsavage/cwltool
|
fe1c06f16f866fa2d1a4655f236b9a52b9564fac
|
[
"Apache-2.0"
] | 1
|
2018-05-10T06:45:21.000Z
|
2018-05-10T06:45:21.000Z
|
cwltool/schemas/v1.0/salad/schema_salad/schema.py
|
jeremiahsavage/cwltool
|
fe1c06f16f866fa2d1a4655f236b9a52b9564fac
|
[
"Apache-2.0"
] | 3
|
2018-04-05T17:14:59.000Z
|
2021-07-12T00:54:13.000Z
|
import avro
import copy
from .add_dictlist import add_dictlist
import sys
import pprint
from pkg_resources import resource_stream
import ruamel.yaml as yaml
try:
from ruamel.yaml import CSafeLoader as SafeLoader
except ImportError:
from ruamel.yaml import SafeLoader # type: ignore
import avro.schema
from . import validate
import json
import urlparse
AvroSchemaFromJSONData = avro.schema.make_avsc_object
# AvroSchemaFromJSONData=avro.schema.SchemaFromJSONData
from . import ref_resolver
from .flatten import flatten
import logging
from .aslist import aslist
from . import jsonld_context
from typing import Any, AnyStr, cast, Dict, List, Tuple, TypeVar, Union
_logger = logging.getLogger("salad")
salad_files = ('metaschema.yml',
'metaschema_base.yml',
'salad.md',
'field_name.yml',
'import_include.md',
'link_res.yml',
'ident_res.yml',
'vocab_res.yml',
'vocab_res.yml',
'field_name_schema.yml',
'field_name_src.yml',
'field_name_proc.yml',
'ident_res_schema.yml',
'ident_res_src.yml',
'ident_res_proc.yml',
'link_res_schema.yml',
'link_res_src.yml',
'link_res_proc.yml',
'vocab_res_schema.yml',
'vocab_res_src.yml',
'vocab_res_proc.yml')
def get_metaschema():
# type: () -> Tuple[avro.schema.Names, List[Dict[unicode, Any]], ref_resolver.Loader]
loader = ref_resolver.Loader({
"Any": "https://w3id.org/cwl/salad#Any",
"ArraySchema": "https://w3id.org/cwl/salad#ArraySchema",
"DocType": "https://w3id.org/cwl/salad#DocType",
"Documentation": "https://w3id.org/cwl/salad#Documentation",
"EnumSchema": "https://w3id.org/cwl/salad#EnumSchema",
"JsonldPredicate": "https://w3id.org/cwl/salad#JsonldPredicate",
"NamedType": "https://w3id.org/cwl/salad#NamedType",
"RecordField": "https://w3id.org/cwl/salad#RecordField",
"RecordSchema": "https://w3id.org/cwl/salad#RecordSchema",
"SaladEnumSchema": "https://w3id.org/cwl/salad#SaladEnumSchema",
"SaladRecordField": "https://w3id.org/cwl/salad#SaladRecordField",
"SaladRecordSchema": "https://w3id.org/cwl/salad#SaladRecordSchema",
"SchemaDefinedType": "https://w3id.org/cwl/salad#SchemaDefinedType",
"SpecializeDef": "https://w3id.org/cwl/salad#SpecializeDef",
"_container": "https://w3id.org/cwl/salad#JsonldPredicate/_container",
"_id": {
"@id": "https://w3id.org/cwl/salad#_id",
"@type": "@id",
"identity": True
},
"_type": "https://w3id.org/cwl/salad#JsonldPredicate/_type",
"abstract": "https://w3id.org/cwl/salad#SaladRecordSchema/abstract",
"array": "https://w3id.org/cwl/salad#array",
"boolean": "http://www.w3.org/2001/XMLSchema#boolean",
"dct": "http://purl.org/dc/terms/",
"doc": "sld:doc",
"docAfter": {
"@id": "https://w3id.org/cwl/salad#docAfter",
"@type": "@id"
},
"docChild": {
"@id": "https://w3id.org/cwl/salad#docChild",
"@type": "@id"
},
"docParent": {
"@id": "https://w3id.org/cwl/salad#docParent",
"@type": "@id"
},
"documentRoot": "https://w3id.org/cwl/salad#SchemaDefinedType/documentRoot",
"documentation": "https://w3id.org/cwl/salad#documentation",
"double": "http://www.w3.org/2001/XMLSchema#double",
"enum": "https://w3id.org/cwl/salad#enum",
"extends": {
"@id": "https://w3id.org/cwl/salad#extends",
"@type": "@id",
"refScope": 1
},
"fields": {
"@id": "https://w3id.org/cwl/salad#fields",
"mapPredicate": "type",
"mapSubject": "name"
},
"float": "http://www.w3.org/2001/XMLSchema#float",
"identity": "https://w3id.org/cwl/salad#JsonldPredicate/identity",
"int": "http://www.w3.org/2001/XMLSchema#int",
"items": {
"@id": "https://w3id.org/cwl/salad#items",
"@type": "@vocab",
"refScope": 2
},
"jsonldPredicate": "sld:jsonldPredicate",
"long": "http://www.w3.org/2001/XMLSchema#long",
"mapPredicate": "https://w3id.org/cwl/salad#JsonldPredicate/mapPredicate",
"mapSubject": "https://w3id.org/cwl/salad#JsonldPredicate/mapSubject",
"name": "@id",
"noLinkCheck": "https://w3id.org/cwl/salad#JsonldPredicate/noLinkCheck",
"null": "https://w3id.org/cwl/salad#null",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"record": "https://w3id.org/cwl/salad#record",
"refScope": "https://w3id.org/cwl/salad#JsonldPredicate/refScope",
"sld": "https://w3id.org/cwl/salad#",
"specialize": {
"@id": "https://w3id.org/cwl/salad#specialize",
"mapPredicate": "specializeTo",
"mapSubject": "specializeFrom"
},
"specializeFrom": {
"@id": "https://w3id.org/cwl/salad#specializeFrom",
"@type": "@id",
"refScope": 1
},
"specializeTo": {
"@id": "https://w3id.org/cwl/salad#specializeTo",
"@type": "@id",
"refScope": 1
},
"string": "http://www.w3.org/2001/XMLSchema#string",
"symbols": {
"@id": "https://w3id.org/cwl/salad#symbols",
"@type": "@id",
"identity": True
},
"type": {
"@id": "https://w3id.org/cwl/salad#type",
"@type": "@vocab",
"refScope": 2,
"typeDSL": True
},
"typeDSL": "https://w3id.org/cwl/salad#JsonldPredicate/typeDSL",
"xsd": "http://www.w3.org/2001/XMLSchema#"
})
for f in salad_files:
rs = resource_stream(__name__, 'metaschema/' + f)
loader.cache["https://w3id.org/cwl/" + f] = rs.read()
rs.close()
rs = resource_stream(__name__, 'metaschema/metaschema.yml')
loader.cache["https://w3id.org/cwl/salad"] = rs.read()
rs.close()
j = yaml.load(loader.cache["https://w3id.org/cwl/salad"],
Loader=SafeLoader)
j, _ = loader.resolve_all(j, "https://w3id.org/cwl/salad#")
# pprint.pprint(j)
(sch_names, sch_obj) = make_avro_schema(j, loader)
if isinstance(sch_names, Exception):
_logger.error("Metaschema error, avro was:\n%s",
json.dumps(sch_obj, indent=4))
raise sch_names
validate_doc(sch_names, j, loader, strict=True)
return (sch_names, j, loader)
def load_schema(schema_ref, cache=None):
# type: (Union[unicode, Dict[unicode, Any]], Dict) -> Tuple[ref_resolver.Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[unicode, Any], ref_resolver.Loader]
metaschema_names, metaschema_doc, metaschema_loader = get_metaschema()
if cache is not None:
metaschema_loader.cache = cache
schema_doc, schema_metadata = metaschema_loader.resolve_ref(schema_ref, "")
if not isinstance(schema_doc, list):
raise ValueError("Schema reference must resolve to a list.")
validate_doc(metaschema_names, schema_doc, metaschema_loader, True)
metactx = schema_metadata.get("@context", {})
metactx.update(schema_metadata.get("$namespaces", {}))
(schema_ctx, rdfs) = jsonld_context.salad_to_jsonld_context(
schema_doc, metactx)
# Create the loader that will be used to load the target document.
document_loader = ref_resolver.Loader(schema_ctx, cache=cache)
# Make the Avro validation that will be used to validate the target
# document
(avsc_names, avsc_obj) = make_avro_schema(schema_doc, document_loader)
return document_loader, avsc_names, schema_metadata, metaschema_loader
def load_and_validate(document_loader, avsc_names, document, strict):
# type: (ref_resolver.Loader, avro.schema.Names, Union[Dict[unicode, Any], unicode], bool) -> Tuple[Any, Dict[unicode, Any]]
if isinstance(document, dict):
data, metadata = document_loader.resolve_all(document, document["id"])
else:
data, metadata = document_loader.resolve_ref(document)
validate_doc(avsc_names, data, document_loader, strict)
return data, metadata
def validate_doc(schema_names, doc, loader, strict):
# type: (avro.schema.Names, Union[Dict[unicode, Any], List[Dict[unicode, Any]], unicode], ref_resolver.Loader, bool) -> None
has_root = False
for r in schema_names.names.values():
if ((hasattr(r, 'get_prop') and r.get_prop(u"documentRoot")) or (
u"documentRoot" in r.props)):
has_root = True
break
if not has_root:
raise validate.ValidationException(
"No document roots defined in the schema")
if isinstance(doc, list):
validate_doc = doc
elif isinstance(doc, dict):
validate_doc = [doc]
else:
raise validate.ValidationException("Document must be dict or list")
anyerrors = []
for pos, item in enumerate(validate_doc):
errors = []
success = False
for r in schema_names.names.values():
if ((hasattr(r, "get_prop") and r.get_prop(u"documentRoot")) or (
u"documentRoot" in r.props)):
try:
validate.validate_ex(
r, item, loader.identifiers, strict, foreign_properties=loader.foreign_properties)
success = True
break
except validate.ValidationException as e:
if hasattr(r, "get_prop"):
name = r.get_prop(u"name")
elif hasattr(r, "name"):
name = r.name
errors.append("Could not validate as `%s` because\n%s" % (
name, validate.indent(str(e), nolead=False)))
if not success:
objerr = "Validation error at position %i" % pos
for ident in loader.identifiers:
if ident in item:
objerr = "Validation error in object %s" % (item[ident])
break
anyerrors.append("%s\n%s" %
(objerr, validate.indent("\n".join(errors))))
if anyerrors:
raise validate.ValidationException("\n".join(anyerrors))
def replace_type(items, spec, loader, found):
# type: (Any, Dict[unicode, Any], ref_resolver.Loader, Set[unicode]) -> Any
""" Go through and replace types in the 'spec' mapping"""
items = copy.deepcopy(items)
if isinstance(items, dict):
# recursively check these fields for types to replace
if "type" in items and items["type"] in ("record", "enum"):
if items.get("name"):
if items["name"] in found:
return items["name"]
else:
found.add(items["name"])
for n in ("type", "items", "fields"):
if n in items:
items[n] = replace_type(items[n], spec, loader, found)
if isinstance(items[n], list):
items[n] = flatten(items[n])
return items
elif isinstance(items, list):
# recursively transform list
return [replace_type(i, spec, loader, found) for i in items]
elif isinstance(items, (str, unicode)):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found)
return items
def avro_name(url): # type: (AnyStr) -> AnyStr
doc_url, frg = urlparse.urldefrag(url)
if frg:
if '/' in frg:
return frg[frg.rindex('/') + 1:]
else:
return frg
return url
Avro = TypeVar('Avro', Dict[unicode, Any], List[Any], unicode)
def make_valid_avro(items, alltypes, found, union=False):
# type: (Avro, Dict[unicode, Dict[unicode, Any]], Set[unicode], bool) -> Union[Avro, Dict]
items = copy.deepcopy(items)
if isinstance(items, dict):
if items.get("name"):
items["name"] = avro_name(items["name"])
if "type" in items and items["type"] in ("https://w3id.org/cwl/salad#record", "https://w3id.org/cwl/salad#enum", "record", "enum"):
if (hasattr(items, "get") and items.get("abstract")) or ("abstract"
in items):
return items
if not items.get("name"):
raise Exception(
"Named schemas must have a non-empty name: %s" % items)
if items["name"] in found:
return items["name"]
else:
found.add(items["name"])
for n in ("type", "items", "values", "fields"):
if n in items:
items[n] = make_valid_avro(
items[n], alltypes, found, union=True)
if "symbols" in items:
items["symbols"] = [avro_name(sym) for sym in items["symbols"]]
return items
if isinstance(items, list):
ret = []
for i in items:
ret.append(make_valid_avro(i, alltypes, found, union=union))
return ret
if union and isinstance(items, (str, unicode)):
if items in alltypes and avro_name(items) not in found:
return cast(Dict, make_valid_avro(alltypes[items], alltypes, found,
union=union))
items = avro_name(items) # type: ignore
# bug in mypy 0.3.1, fixed in 0.4-dev
return items
def extend_and_specialize(items, loader):
# type: (List[Dict[unicode, Any]], ref_resolver.Loader) -> List[Dict[unicode, Any]]
"""Apply 'extend' and 'specialize' to fully materialize derived record
types."""
types = {} # type: Dict[unicode, Any]
for t in items:
types[t["name"]] = t
n = []
for t in items:
t = copy.deepcopy(t)
if "extends" in t:
spec = {} # type: Dict[unicode, unicode]
if "specialize" in t:
for sp in aslist(t["specialize"]):
spec[sp["specializeFrom"]] = sp["specializeTo"]
exfields = [] # type: List[unicode]
exsym = [] # type: List[unicode]
for ex in aslist(t["extends"]):
if ex not in types:
raise Exception("Extends %s in %s refers to invalid base type" % (
t["extends"], t["name"]))
basetype = copy.deepcopy(types[ex])
if t["type"] == "record":
if spec:
basetype["fields"] = replace_type(
basetype.get("fields", []), spec, loader, set())
for f in basetype.get("fields", []):
if "inherited_from" not in f:
f["inherited_from"] = ex
exfields.extend(basetype.get("fields", []))
elif t["type"] == "enum":
exsym.extend(basetype.get("symbols", []))
if t["type"] == "record":
exfields.extend(t.get("fields", []))
t["fields"] = exfields
fieldnames = set() # type: Set[unicode]
for field in t["fields"]:
if field["name"] in fieldnames:
raise validate.ValidationException(
"Field name %s appears twice in %s" % (field["name"], t["name"]))
else:
fieldnames.add(field["name"])
elif t["type"] == "enum":
exsym.extend(t.get("symbols", []))
t["symbol"] = exsym
types[t["name"]] = t
n.append(t)
ex_types = {}
for t in n:
ex_types[t["name"]] = t
extended_by = {} # type: Dict[unicode, unicode]
for t in n:
if "extends" in t:
for ex in aslist(t["extends"]):
if ex_types[ex].get("abstract"):
add_dictlist(extended_by, ex, ex_types[t["name"]])
add_dictlist(extended_by, avro_name(ex), ex_types[ex])
for t in n:
if t.get("abstract") and t["name"] not in extended_by:
raise validate.ValidationException("%s is abstract but missing a concrete subtype" % t["name"])
for t in n:
if "fields" in t:
t["fields"] = replace_type(t["fields"], extended_by, loader, set())
return n
def make_avro_schema(i, loader):
# type: (List[Dict[unicode, Any]], ref_resolver.Loader) -> Tuple[Union[avro.schema.Names,avro.schema.SchemaParseException], List[Dict[unicode, Any]]]
names = avro.schema.Names()
j = extend_and_specialize(i, loader)
name_dict = {} # type: Dict[unicode, Dict[unicode, Any]]
for t in j:
name_dict[t["name"]] = t
j2 = make_valid_avro(j, name_dict, set())
j3 = [t for t in j2 if isinstance(t, dict) and not t.get(
"abstract") and t.get("type") != "documentation"]
try:
AvroSchemaFromJSONData(j3, names)
except avro.schema.SchemaParseException as e:
return (e, j3)
return (names, j3)
| 38.670282
| 185
| 0.563247
|
d7c0cb38a3d3a8f0ec485843fa6260e100dcfcc9
| 4,955
|
py
|
Python
|
test/test_time.py
|
ToyotaResearchInstitute/task_behavior_ros
|
0bc58f88556c9029c9a579511e1fdab9bd58248b
|
[
"Apache-2.0"
] | 2
|
2017-02-16T00:47:39.000Z
|
2018-05-12T13:34:17.000Z
|
test/test_time.py
|
ToyotaResearchInstitute/task_behavior_ros
|
0bc58f88556c9029c9a579511e1fdab9bd58248b
|
[
"Apache-2.0"
] | 4
|
2017-02-17T19:05:22.000Z
|
2017-05-04T17:41:26.000Z
|
test/test_time.py
|
ToyotaResearchInstitute/task_behavior_ros
|
0bc58f88556c9029c9a579511e1fdab9bd58248b
|
[
"Apache-2.0"
] | 2
|
2019-03-08T06:45:25.000Z
|
2022-03-08T10:08:00.000Z
|
#!/usr/bin/env python
# Copyright 2016 Toyota Research Institute
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nose
from nose.tools import assert_equal
import argparse
import rospy
import sys
from task_behavior_engine import node
from task_behavior_engine.tree import NodeStatus
from task_behavior_ros import time
class TestTimeout(object):
def test_success(self):
success_node = node.Success(name="success")
timeout_success = time.Timeout(name="timed_success",
timeout=0.5,
child=success_node)
result = timeout_success.tick()
assert_equal(result, NodeStatus.SUCCESS)
def test_fail(self):
fail_node = node.Fail(name="fail")
timeout_fail = time.Timeout(name="timed_fail",
timeout=0.5,
child=fail_node)
result = timeout_fail.tick()
assert_equal(result, NodeStatus.FAIL)
def test_timedout(self):
active_node = node.Continue(name="continue")
timeout_active = time.Timeout(name="timed_continue",
timeout=0.5,
child=active_node)
result = timeout_active.tick()
assert_equal(result, NodeStatus.ACTIVE)
rospy.sleep(0.5)
result = timeout_active.tick()
assert_equal(result, NodeStatus.FAIL)
assert_equal(active_node.get_status(), NodeStatus.CANCEL)
def test_force(self):
active_node = node.Continue(name="continue")
timeout_active = time.Timeout(name="timed_continue",
timeout=0.5,
child=active_node)
result = timeout_active.tick()
assert_equal(result, NodeStatus.ACTIVE)
timeout_active.force(NodeStatus.SUCCESS)
rospy.sleep(0.5)
result = timeout_active.tick()
assert_equal(result, NodeStatus.SUCCESS)
assert_equal(active_node.get_status(), NodeStatus.CANCEL)
def test_cancel(self):
active_node = node.Continue(name="continue")
timeout_active = time.Timeout(name="timed_continue",
timeout=0.5,
child=active_node)
result = timeout_active.tick()
assert_equal(result, NodeStatus.ACTIVE)
timeout_active.cancel()
end = rospy.Time.now() + rospy.Duration(0.5)
while(rospy.Time.now() < end):
rospy.sleep(.1)
result = timeout_active.tick()
assert_equal(result, NodeStatus.CANCEL)
assert_equal(active_node.get_status(), NodeStatus.CANCEL)
class TestTimedWait(object):
def test_time(self):
timed_node = time.TimedWait(name="timer",
timeout=0.5)
result = timed_node.tick()
assert_equal(result, NodeStatus.ACTIVE)
rospy.sleep(0.5)
result = timed_node.tick()
assert_equal(result, NodeStatus.SUCCESS)
def test_force(self):
timed_node = time.TimedWait(name="timer",
timeout=0.5)
result = timed_node.tick()
assert_equal(result, NodeStatus.ACTIVE)
timed_node.force(NodeStatus.FAIL)
result = timed_node.tick()
assert_equal(result, NodeStatus.FAIL)
result = timed_node.tick()
assert_equal(result, NodeStatus.ACTIVE)
timed_node.force(NodeStatus.FAIL)
rospy.sleep(0.5)
result = timed_node.tick()
assert_equal(result, NodeStatus.FAIL)
def test_cancel(self):
timed_node = time.TimedWait(name="timer",
timeout=0.5)
result = timed_node.tick()
assert_equal(result, NodeStatus.ACTIVE)
timed_node.cancel()
result = timed_node.tick()
assert_equal(result, NodeStatus.CANCEL)
if __name__ == '__main__':
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
parser = argparse.ArgumentParser(description='Perform unit test.')
parser.add_argument(
'--gtest_output', nargs='?', default='test.xml')
args, unknown = parser.parse_known_args()
noseargs = [sys.argv[0], module_name, '--with-xunit',
'--xunit-file='+str(args.gtest_output.lstrip('xml:'))]
nose.run(argv=noseargs)
| 32.385621
| 77
| 0.615338
|
f8ded3b55a23633eb09cc6c8ff67f56c9f72f0d7
| 7,433
|
py
|
Python
|
train.py
|
awesomephant/char-rnn-tensorflow
|
922835519f08e598260f35ae8680279aaea6fd87
|
[
"MIT"
] | null | null | null |
train.py
|
awesomephant/char-rnn-tensorflow
|
922835519f08e598260f35ae8680279aaea6fd87
|
[
"MIT"
] | null | null | null |
train.py
|
awesomephant/char-rnn-tensorflow
|
922835519f08e598260f35ae8680279aaea6fd87
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import tensorflow as tf
import tfdeploy as td
import argparse
import time
import os
from six.moves import cPickle
from utils import TextLoader
from model import Model
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir', type=str, default='data/tinyshakespeare',
help='data directory containing input.txt')
parser.add_argument('--save_dir', type=str, default='save',
help='directory to store checkpointed models')
parser.add_argument('--log_dir', type=str, default='logs',
help='directory to store tensorboard logs')
parser.add_argument('--rnn_size', type=int, default=128,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='lstm',
help='rnn, gru, lstm, or nas')
parser.add_argument('--batch_size', type=int, default=50,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=50,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=1000,
help='save frequency')
parser.add_argument('--grad_clip', type=float, default=5.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=0.002,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.97,
help='decay rate for rmsprop')
parser.add_argument('--output_keep_prob', type=float, default=1.0,
help='probability of keeping weights in the hidden layer')
parser.add_argument('--input_keep_prob', type=float, default=1.0,
help='probability of keeping weights in the input layer')
parser.add_argument('--init_from', type=str, default=None,
help="""continue training from saved model at this path. Path must contain files saved by previous training process:
'config.pkl' : configuration;
'chars_vocab.pkl' : vocabulary definitions;
'checkpoint' : paths to model file(s) (created by tf).
Note: this file contains absolute paths, be careful when moving files around;
'model.ckpt-*' : file(s) with model definition (created by tf)
""")
args = parser.parse_args()
train(args)
def train(args):
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
# check compatibility if training is continued from previously saved model
if args.init_from is not None:
# check if all necessary files exist
assert os.path.isdir(args.init_from)," %s must be a a path" % args.init_from
assert os.path.isfile(os.path.join(args.init_from,"config.pkl")),"config.pkl file does not exist in path %s"%args.init_from
assert os.path.isfile(os.path.join(args.init_from,"chars_vocab.pkl")),"chars_vocab.pkl.pkl file does not exist in path %s" % args.init_from
ckpt = tf.train.get_checkpoint_state(args.init_from)
assert ckpt, "No checkpoint found"
assert ckpt.model_checkpoint_path, "No model path found in checkpoint"
# open old config and check if models are compatible
with open(os.path.join(args.init_from, 'config.pkl'), 'rb') as f:
saved_model_args = cPickle.load(f)
need_be_same = ["model", "rnn_size", "num_layers", "seq_length"]
for checkme in need_be_same:
assert vars(saved_model_args)[checkme]==vars(args)[checkme],"Command line argument and saved model disagree on '%s' "%checkme
# open saved vocab/dict and check if vocabs/dicts are compatible
with open(os.path.join(args.init_from, 'chars_vocab.pkl'), 'rb') as f:
saved_chars, saved_vocab = cPickle.load(f)
assert saved_chars==data_loader.chars, "Data and loaded model disagree on character set!"
assert saved_vocab==data_loader.vocab, "Data and loaded model disagree on dictionary mappings!"
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
cPickle.dump((data_loader.chars, data_loader.vocab), f)
model = Model(args)
with tf.Session() as sess:
# instrument for tensorboard
summaries = tf.summary.merge_all()
writer = tf.summary.FileWriter(
os.path.join(args.log_dir, time.strftime("%Y-%m-%d-%H-%M-%S")))
writer.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
# restore model
if args.init_from is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr,
args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
state = sess.run(model.initial_state)
for b in range(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.targets: y}
for i, (c, h) in enumerate(model.initial_state):
feed[c] = state[i].c
feed[h] = state[i].h
# instrument for tensorboard
summ, train_loss, state, _ = sess.run([summaries, model.cost, model.final_state, model.train_op], feed)
writer.add_summary(summ, e * data_loader.num_batches + b)
end = time.time()
print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
.format(e * data_loader.num_batches + b,
args.num_epochs * data_loader.num_batches,
e, train_loss, end - start))
if (e * data_loader.num_batches + b) % args.save_every == 0\
or (e == args.num_epochs-1 and
b == data_loader.num_batches-1):
# save for the last result
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path,
global_step=e * data_loader.num_batches + b)
# Save model using tfdepoly
# model = td.Model()
# model.add(y, sess) # y and all its ops and related tensors are added recursively
# model.save("model.pkl")
print("model saved to {}".format(checkpoint_path))
if __name__ == '__main__':
main()
| 51.262069
| 147
| 0.593031
|
3e4e50e437b8266101f1a9edd97e38e6976343a3
| 65
|
py
|
Python
|
example/client/main.py
|
ATenderholt/lambda-router
|
240b47dd0aa44e18099676271cf20069f5c1977d
|
[
"BSD-3-Clause"
] | null | null | null |
example/client/main.py
|
ATenderholt/lambda-router
|
240b47dd0aa44e18099676271cf20069f5c1977d
|
[
"BSD-3-Clause"
] | null | null | null |
example/client/main.py
|
ATenderholt/lambda-router
|
240b47dd0aa44e18099676271cf20069f5c1977d
|
[
"BSD-3-Clause"
] | null | null | null |
import requests
def handle(event, _):
print("Event:", event)
| 16.25
| 26
| 0.676923
|
ddb81a757a58aa29245eeb4a9da21dbb75bbf265
| 2,787
|
py
|
Python
|
gcloud/bigtable/_generated/duration_pb2.py
|
scrapinghub/gcloud-python
|
1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29
|
[
"Apache-2.0"
] | null | null | null |
gcloud/bigtable/_generated/duration_pb2.py
|
scrapinghub/gcloud-python
|
1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29
|
[
"Apache-2.0"
] | null | null | null |
gcloud/bigtable/_generated/duration_pb2.py
|
scrapinghub/gcloud-python
|
1ec6d636ebf2c4d618aca6b2485fbbfa5f0fde29
|
[
"Apache-2.0"
] | 2
|
2017-07-30T16:18:23.000Z
|
2020-10-14T11:24:18.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/duration.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/duration.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=b'\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42)\n\x13\x63om.google.protobufB\rDurationProtoP\x01\xa0\x01\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DURATION = _descriptor.Descriptor(
name='Duration',
full_name='google.protobuf.Duration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='seconds', full_name='google.protobuf.Duration.seconds', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nanos', full_name='google.protobuf.Duration.nanos', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name['Duration'] = _DURATION
Duration = _reflection.GeneratedProtocolMessageType('Duration', (_message.Message,), dict(
DESCRIPTOR = _DURATION,
__module__ = 'google.protobuf.duration_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Duration)
))
_sym_db.RegisterMessage(Duration)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\023com.google.protobufB\rDurationProtoP\001\240\001\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.early_adopter import implementations as early_adopter_implementations
from grpc.framework.alpha import utilities as alpha_utilities
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| 33.578313
| 252
| 0.775745
|
055475d6dfb9a738d8fdf8283a71d7d07cd72345
| 8,242
|
py
|
Python
|
port_range/tests/test_port_range.py
|
jeking3/port-range
|
7d071b4d6124d9e926922d5fca714519a28b0d5c
|
[
"BSD-2-Clause"
] | null | null | null |
port_range/tests/test_port_range.py
|
jeking3/port-range
|
7d071b4d6124d9e926922d5fca714519a28b0d5c
|
[
"BSD-2-Clause"
] | null | null | null |
port_range/tests/test_port_range.py
|
jeking3/port-range
|
7d071b4d6124d9e926922d5fca714519a28b0d5c
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2016 Scaleway and Contributors. All Rights Reserved.
# Kevin Deldycke <kdeldycke@scaleway.com>
#
# Licensed under the BSD 2-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at https://opensource.org/licenses/BSD-2-Clause
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import unittest
from port_range import PortRange
class TestPortRange(unittest.TestCase):
def test_cidr_parsing(self):
self.assertEqual(PortRange('1027/15').bounds, (1027, 1028))
self.assertEqual(PortRange(' 1027 / 15 ').bounds, (1027, 1028))
def test_range_parsing(self):
# Normal range.
self.assertEqual(PortRange('42-4242').bounds, (42, 4242))
self.assertEqual(PortRange([42, 4242]).bounds, (42, 4242))
self.assertEqual(PortRange(['42', '4242']).bounds, (42, 4242))
# Single port.
self.assertEqual(PortRange('42').bounds, (42, 42))
self.assertEqual(PortRange(42).bounds, (42, 42))
self.assertEqual(PortRange([42]).bounds, (42, 42))
self.assertEqual(PortRange(['42']).bounds, (42, 42))
# Reversed order.
self.assertEqual(PortRange('4242-42').bounds, (42, 4242))
self.assertEqual(PortRange([4242, 42]).bounds, (42, 4242))
self.assertEqual(PortRange((4242, 42)).bounds, (42, 4242))
self.assertEqual(PortRange(('4242', '42')).bounds, (42, 4242))
self.assertEqual(PortRange(set([4242, 42])).bounds, (42, 4242))
self.assertEqual(PortRange(set(['4242', '42'])).bounds, (42, 4242))
# Invalid types and length.
self.assertRaises(ValueError, PortRange, [None, 42])
self.assertRaises(ValueError, PortRange, [42, None])
self.assertRaises(ValueError, PortRange, [42, 32, 3])
self.assertRaises(ValueError, PortRange, [42, None, 32, 3, -4])
def test_strict_range_parsing(self):
# Normal range.
self.assertEqual(
PortRange('42-4242', strict=True).bounds, (42, 4242))
self.assertEqual(
PortRange([42, 4242], strict=True).bounds, (42, 4242))
self.assertEqual(
PortRange(['42', '4242'], strict=True).bounds, (42, 4242))
# Single port.
self.assertEqual(PortRange('42', strict=True).bounds, (42, 42))
self.assertEqual(PortRange(42, strict=True).bounds, (42, 42))
self.assertEqual(PortRange([42], strict=True).bounds, (42, 42))
self.assertEqual(PortRange(['42'], strict=True).bounds, (42, 42))
# Reversed order.
self.assertRaises(ValueError, PortRange, [4242, 42], True)
self.assertRaises(ValueError, PortRange, '4242-42', True)
self.assertRaises(ValueError, PortRange, (4242, 42), True)
self.assertRaises(ValueError, PortRange, ('4242', '42'), True)
# Casting a set to a list might ends up with a naturally sorted list.
# self.assertRaises(ValueError, PortRange, set([4242, 42]), True)
# self.assertRaises(ValueError, PortRange, set(['4242', '42']), True)
# Invalid types and length.
self.assertRaises(ValueError, PortRange, [None, 42], True)
self.assertRaises(ValueError, PortRange, [42, None], True)
self.assertRaises(ValueError, PortRange, [42, 32, 3], True)
self.assertRaises(ValueError, PortRange, [42, None, 32, 3, -4], True)
def test_cidr_properties(self):
port = PortRange('1027/15')
self.assertEqual(port.base, 1027)
self.assertEqual(port.prefix, 15)
self.assertEqual(port.cidr, (1027, 15))
self.assertEqual(port.mask, 1)
self.assertEqual(port.offset, 3)
self.assertEqual(port.port_from, 1027)
self.assertEqual(port.port_to, 1028)
self.assertEqual(port.bounds, (1027, 1028))
def test_range_properties(self):
port = PortRange([4242, 42])
self.assertEqual(str(port), '42-4242')
self.assertEqual(port.base, 42)
self.assertEqual(port.prefix, None)
self.assertEqual(port.cidr, (42, None))
self.assertEqual(port.mask, None)
self.assertEqual(port.offset, 10)
self.assertEqual(port.port_from, 42)
self.assertEqual(port.port_to, 4242)
self.assertEqual(port.bounds, (42, 4242))
def test_normalization(self):
port = PortRange(' 0001234 ')
self.assertEqual(str(port), '1234')
self.assertEqual(port.base, 1234)
self.assertEqual(port.prefix, 16)
self.assertEqual(port.cidr, (1234, 16))
self.assertEqual(port.mask, 0)
self.assertEqual(port.offset, 210)
self.assertEqual(port.port_from, 1234)
self.assertEqual(port.port_to, 1234)
self.assertEqual(port.bounds, (1234, 1234))
# Upper-bound cap.
self.assertEqual(PortRange('64666/3').bounds, (64666, 65535))
def test_output_string(self):
self.assertEqual(str(PortRange('1027/15')), '1027/15')
self.assertEqual(str(PortRange([42, 4242])), '42-4242')
self.assertEqual(str(PortRange(42)), '42')
self.assertEqual(str(PortRange([1027, 1028])), '1027/15')
self.assertEqual(
repr(PortRange([1027, 1028])),
"PortRange(port_from=1027, port_to=1028, base=1027, offset=3, "
"prefix=15, mask=1, is_single_port=False, is_cidr=True)")
def test_cidr_string_rendering(self):
self.assertEqual(PortRange([32768, 65535]).cidr_string, '32768/1')
self.assertEqual(PortRange([32767, 65534]).cidr_string, '32767/1')
with self.assertRaises(ValueError):
PortRange([32767, 65535]).cidr_string
def test_validation(self):
# Test empty params
self.assertRaises(ValueError, PortRange, None)
self.assertRaises(ValueError, PortRange, [None])
self.assertRaises(ValueError, PortRange, [None, None])
# Invalid int
self.assertRaises(ValueError, PortRange, ' A233 ')
self.assertRaises(ValueError, PortRange, '1 2 3 4')
self.assertRaises(ValueError, PortRange, 'abcd')
# Test negative values
self.assertRaises(ValueError, PortRange, '-24')
self.assertRaises(ValueError, PortRange, '-24/3')
self.assertRaises(ValueError, PortRange, '1024/-3')
self.assertRaises(ValueError, PortRange, '-1000/-3')
self.assertRaises(ValueError, PortRange, '-3/-1000')
self.assertRaises(ValueError, PortRange, ' - 24')
self.assertRaises(ValueError, PortRange, ' - 24 / 3')
self.assertRaises(ValueError, PortRange, ' 1024 / - 3')
self.assertRaises(ValueError, PortRange, ' - 1000 / - 3')
self.assertRaises(ValueError, PortRange, ' - 3/ - 1000 ')
# Test maximums and minimums
self.assertRaises(ValueError, PortRange, '1024/0')
self.assertRaises(ValueError, PortRange, '1024/17')
self.assertRaises(ValueError, PortRange, '10-66666', strict=True)
# Test capping.
self.assertEqual(PortRange('0').bounds, (1, 1))
self.assertRaises(ValueError, PortRange, '0', True)
self.assertEqual(PortRange('66666').bounds, (65535, 65535))
self.assertRaises(ValueError, PortRange, '66666', True)
# Test notation
self.assertRaises(ValueError, PortRange, '23/')
self.assertRaises(ValueError, PortRange, '/4')
self.assertRaises(ValueError, PortRange, '/4')
self.assertRaises(ValueError, PortRange, '400-')
self.assertRaises(ValueError, PortRange, '123 / 123/ 123')
self.assertRaises(ValueError, PortRange, ' 123 - 31123- 1323')
def test_strict_mode(self):
# Test power of two port base
PortRange('257', strict=True)
PortRange('257/16', strict=True)
self.assertRaises(ValueError, PortRange, '257/4', strict=True)
# Test overflowing upper bound
self.assertRaises(ValueError, PortRange, '65535/8', strict=True)
def test_computation(self):
self.assertEqual(PortRange('2/3').bounds, (2, 8193))
self.assertEqual(PortRange('7/3').bounds, (7, 8198))
| 42.704663
| 79
| 0.637952
|
68078a17905b3a3e63802618a91656b5851ee832
| 61
|
py
|
Python
|
src/lesson_runtime_features/sys_exit.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | 3
|
2018-08-14T09:33:52.000Z
|
2022-03-21T12:31:58.000Z
|
src/lesson_runtime_features/sys_exit.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
src/lesson_runtime_features/sys_exit.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
import sys
exit_code = int(sys.argv[1])
sys.exit(exit_code)
| 12.2
| 28
| 0.737705
|
c2c3a0baa15f76a63154e5cedc20810cf65df216
| 2,917
|
py
|
Python
|
src/deep_nlp/grad_cam/utils/token/reshape_token_list.py
|
ENSAE-CKW/nlp_understanding
|
8d184666a107679f972a65bbe84374b849242924
|
[
"MIT"
] | 3
|
2021-05-02T17:51:18.000Z
|
2022-01-17T10:49:23.000Z
|
src/deep_nlp/grad_cam/utils/token/reshape_token_list.py
|
ENSAE-CKW/nlp_understanding
|
8d184666a107679f972a65bbe84374b849242924
|
[
"MIT"
] | 1
|
2021-05-11T14:18:38.000Z
|
2021-05-11T14:18:38.000Z
|
src/deep_nlp/grad_cam/utils/token/reshape_token_list.py
|
ENSAE-CKW/nlp_understanding
|
8d184666a107679f972a65bbe84374b849242924
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..utils import implemente_multiple_time
def reshape_token_to_plot(text, heatmap, threshold= 10):
num_token= len(text)
token_adjusted= None
heatmap_adjusted= None
if num_token < threshold:
diff_size= threshold - num_token
text= text + ["" for i in range(diff_size)]
heatmap= np.array(heatmap.tolist() + [0 for i in range(diff_size)])
# easy case : 80 // 40 = 2 ==> 40*2==80 = true
entire_part= num_token // threshold
if entire_part * threshold == num_token:
num_rows= entire_part
else:
num_rows= entire_part + 1
# Now we need to define the number of "columns" needed (= the max of num character from rows)
stock_num_character_per_row= []
step_index= 0
for i in range(num_rows): # TODO: delete those awful double "for" bound
max_token= threshold*(i+1)
tokens_rows= text[step_index:max_token]
token_per_row_len= 0
for token in tokens_rows:
try:
assert type(token) in [str, np.str_]
except:
raise ValueError("Need a str object")
token_per_row_len += len(token)
# Get the num of character per row
stock_num_character_per_row.append(token_per_row_len)
# Update index of first token to take
step_index= max_token
# Get the num of col needed to create a matrix [num_rows x num_col]
num_col= np.max(stock_num_character_per_row)
# With num col and rows, adjust heatmap dimension and token
heatmap_adjusted= []
token_adjusted= []
step_index = 0
for i in range(num_rows):
max_token = threshold * (i + 1)
heatmap_rows= heatmap[step_index:max_token]
tokens_rows = text[step_index:max_token]
token_adjusted.append(tokens_rows)
new_heatmap_row= []
for j in range(len(heatmap_rows)):
new_heatmap_row= implemente_multiple_time(new_heatmap_row
, value= heatmap_rows[j]
, times= len(tokens_rows[j]))
# If the heatmap adjusted (by the number of token) is under the num of col, add some 0
diff_len= num_col - len(new_heatmap_row)
# heatmap_adjusted.append(np.pad(new_heatmap_row, (0, diff_len)))
heatmap_adjusted.append(implemente_multiple_time(new_heatmap_row
, value= 0
, times= diff_len))
# Update index of first heatmap value to take (associated to a token)
step_index = max_token
# Be sure, the last list of token get threshold num of value
diff_len= threshold - len(token_adjusted[-1])
token_adjusted[-1]= token_adjusted[-1] + [""]*diff_len
return np.array(token_adjusted), np.array(heatmap_adjusted)
| 36.012346
| 97
| 0.613987
|
dd34bd033bea695a318d0f0db0839a9d30f23d54
| 566
|
py
|
Python
|
Mac/Lib/test/icgluetest.py
|
deadsnakes/python2.3
|
0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849
|
[
"PSF-2.0"
] | null | null | null |
Mac/Lib/test/icgluetest.py
|
deadsnakes/python2.3
|
0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849
|
[
"PSF-2.0"
] | null | null | null |
Mac/Lib/test/icgluetest.py
|
deadsnakes/python2.3
|
0b4a6871ca57123c10aa48cc2a5d2b7c0ee3c849
|
[
"PSF-2.0"
] | null | null | null |
"""Test icglue module by printing all preferences. Note that the ic module,
not the icglue module, is what you should normally use."""
import icglue
from Carbon import Res
ici = icglue.ICStart('Pyth')
#ici.ICFindConfigFile()
h = Res.Resource("")
ici.ICBegin(1)
numprefs = ici.ICCountPref()
print "Number of preferences:", numprefs
for i in range(1, numprefs+1):
key = ici.ICGetIndPref(i)
print "Key: ", key
h.data = ""
attrs = ici.ICFindPrefHandle(key, h)
print "Attr: ", attrs
print "Data: ", `h.data[:64]`
ici.ICEnd()
del ici
import sys
sys.exit(1)
| 19.517241
| 75
| 0.69788
|
0af6e06fc3d2db66475f82307a04b3927cbdafab
| 221
|
py
|
Python
|
roocs_utils/parameter/__init__.py
|
aulemahal/roocs-utils
|
2894c6b137b47f7eac57e650d7e7ad03dddfd75a
|
[
"BSD-3-Clause"
] | 1
|
2021-03-26T09:47:29.000Z
|
2021-03-26T09:47:29.000Z
|
roocs_utils/parameter/__init__.py
|
aulemahal/roocs-utils
|
2894c6b137b47f7eac57e650d7e7ad03dddfd75a
|
[
"BSD-3-Clause"
] | 53
|
2020-08-14T08:16:24.000Z
|
2022-01-07T13:43:55.000Z
|
roocs_utils/parameter/__init__.py
|
aulemahal/roocs-utils
|
2894c6b137b47f7eac57e650d7e7ad03dddfd75a
|
[
"BSD-3-Clause"
] | 2
|
2020-09-02T19:16:23.000Z
|
2020-11-12T21:28:42.000Z
|
from .area_parameter import AreaParameter
from .collection_parameter import CollectionParameter
from .level_parameter import LevelParameter
from .parameterise import parameterise
from .time_parameter import TimeParameter
| 36.833333
| 53
| 0.886878
|
6c480a1d4895e350256d3aaba67b279f9a3d35dc
| 1,485
|
py
|
Python
|
setup.py
|
mariocesar/django-ltree
|
154c7e31dc004a753c5f6387680464a23510a8ce
|
[
"MIT"
] | 42
|
2018-10-06T15:29:25.000Z
|
2021-12-25T22:18:34.000Z
|
setup.py
|
mariocesar/django-ltree
|
154c7e31dc004a753c5f6387680464a23510a8ce
|
[
"MIT"
] | 8
|
2019-05-03T13:28:29.000Z
|
2021-05-28T14:12:47.000Z
|
setup.py
|
mariocesar/django-ltree
|
154c7e31dc004a753c5f6387680464a23510a8ce
|
[
"MIT"
] | 16
|
2019-04-06T11:50:54.000Z
|
2022-03-16T16:27:28.000Z
|
# -*- coding: UTF-8 -*-
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="django_ltree",
version="0.5.3",
python_requires=">=2.7",
url="https://github.com/mariocesar/django-ltree",
author="Mario César Señoranis Ayala",
author_email="mariocesar@humanzilla.com",
description="Django app to support ltree postgres extension",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
packages=find_packages(exclude=("example",)),
extras_require={"develop": ["twine", "tox"]},
install_requires=["django>=1.11", "six"],
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.1",
"Framework :: Django :: 2.2",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
project_urls={
"Source": "https://github.com/mariocesar/django_ltree",
"Tracker": "https://github.com/mariocesar/django_ltree/issues",
},
)
| 33.75
| 71
| 0.613468
|
8f419042222adb5f1e4ccb45819be60f7207c0aa
| 1,090
|
py
|
Python
|
phoenix/templates/templating.py
|
ThoughtWorksInc/Phoenix
|
b1fa1e53699dd218953f8964f3ddaba645b5b2b5
|
[
"Apache-2.0"
] | 2
|
2015-01-13T22:13:39.000Z
|
2021-05-21T11:16:44.000Z
|
phoenix/templates/templating.py
|
ThoughtWorksInc/Phoenix
|
b1fa1e53699dd218953f8964f3ddaba645b5b2b5
|
[
"Apache-2.0"
] | null | null | null |
phoenix/templates/templating.py
|
ThoughtWorksInc/Phoenix
|
b1fa1e53699dd218953f8964f3ddaba645b5b2b5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 ThoughtWorks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as path
import phoenix
def copy_template(dest_dir, template_name):
# This looks daft, but we want to allow for values to be templated via pystache or similar...
with(open('%s/%s' % (path.dirname(phoenix.templates.__file__), template_name), 'r')) as env_template:
filecontents = env_template.read()
dest_filename = template_name.replace('_template', '')
with(open(path.join(dest_dir, dest_filename), 'w')) as dest_file:
dest_file.write(filecontents)
| 40.37037
| 105
| 0.734862
|
4148be54ae676bd9e855574ead806d9dd3afe866
| 521
|
py
|
Python
|
py/encrypt.py
|
davemannn/RailfenceCipher
|
14170c605e47236af4cee3b8db703a58752b91ce
|
[
"MIT"
] | null | null | null |
py/encrypt.py
|
davemannn/RailfenceCipher
|
14170c605e47236af4cee3b8db703a58752b91ce
|
[
"MIT"
] | null | null | null |
py/encrypt.py
|
davemannn/RailfenceCipher
|
14170c605e47236af4cee3b8db703a58752b91ce
|
[
"MIT"
] | null | null | null |
# ==============================================================================
#
# Use:
# decrypt("Hello World", 4)
# => "HWe o!lordll"
#
# ==============================================================================
def encrypt(s,n):
fence = [[] for i in range(n)]
rail = 0
var = 1
for char in s:
fence[rail].append(char)
rail += var
if rail == n-1 or rail == 0:
var = -var
res = ''
for i in fence:
for j in i:
res += j
return res
| 19.296296
| 81
| 0.307102
|
4fdeaad528302a31c7bef13173975e2a4d0e1572
| 2,237
|
py
|
Python
|
lib/coginvasion/hood/DGHood.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | 1
|
2020-03-12T16:44:10.000Z
|
2020-03-12T16:44:10.000Z
|
lib/coginvasion/hood/DGHood.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | null | null | null |
lib/coginvasion/hood/DGHood.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.hood.DGHood
from panda3d.core import TransparencyAttrib
from direct.directnotify.DirectNotifyGlobal import directNotify
from ToonHood import ToonHood
import SkyUtil
from DGSafeZoneLoader import DGSafeZoneLoader
import DGTownLoader
from lib.coginvasion.globals import CIGlobals
from lib.coginvasion.holiday.HolidayManager import HolidayType
class DGHood(ToonHood):
notify = directNotify.newCategory('DGHood')
def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):
ToonHood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)
self.id = CIGlobals.DaisyGardens
self.safeZoneLoader = DGSafeZoneLoader
self.townLoader = DGTownLoader.DGTownLoader
self.skyUtil = SkyUtil.SkyUtil()
self.storageDNAFile = 'phase_8/dna/storage_DG.pdna'
self.holidayDNAFile = None
if base.cr.holidayManager.getHoliday() == HolidayType.CHRISTMAS:
self.holidayDNAFile = 'phase_8/dna/winter_storage_DG.pdna'
self.skyFilename = 'phase_3.5/models/props/TT_sky.bam'
self.spookySkyFile = 'phase_3.5/models/props/BR_sky.bam'
self.titleColor = (0.4, 0.67, 0.18, 1.0)
self.loaderDoneEvent = 'DGHood-loaderDone'
return
def load(self):
ToonHood.load(self)
self.parentFSM.getStateNamed('DGHood').addChild(self.fsm)
def unload(self):
self.parentFSM.getStateNamed('DGHood').removeChild(self.fsm)
ToonHood.unload(self)
def startSuitEffect(self):
ToonHood.startSuitEffect(self)
if base.cr.playGame.getPlace():
base.cr.playGame.getPlace().stopBirds()
def stopSuitEffect(self, newSky=1):
if base.cr.playGame.getPlace():
base.cr.playGame.getPlace().startBirds()
ToonHood.stopSuitEffect(self, newSky)
def startSky(self):
ToonHood.startSky(self)
self.sky.setTransparency(TransparencyAttrib.MDual, 1)
self.skyUtil.startSky(self.sky)
def stopSky(self):
ToonHood.stopSky(self)
self.skyUtil.stopSky()
| 38.568966
| 104
| 0.703174
|
6d0d9f2cf48023db77dbd16a05a151be3f330f1c
| 1,106
|
py
|
Python
|
jetsonnano_yolov5.py
|
datavocals/hand-gesture-recognition
|
3915073364b2ff78a6a840b93272e1051db7b9ca
|
[
"Apache-2.0"
] | null | null | null |
jetsonnano_yolov5.py
|
datavocals/hand-gesture-recognition
|
3915073364b2ff78a6a840b93272e1051db7b9ca
|
[
"Apache-2.0"
] | null | null | null |
jetsonnano_yolov5.py
|
datavocals/hand-gesture-recognition
|
3915073364b2ff78a6a840b93272e1051db7b9ca
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import os
import sys
import subprocess
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
sys.path.append(os.path.join(ROOT_DIR, 'model/yolov5'))
from yolov5_model import Yolov5Model
from capture_frame_in_csi_camera import gstreamer_pipeline
# set speed when communicate with arduino
subprocess.Popen(["stty", "9600", "-F", "/dev/ttyACM0", "raw", "-echo"]) # arduino device id on ubuntu: /dev/ttyACM0
dev = os.open("/dev/ttyACM0", os.O_RDWR)
model = Yolov5Model("model/weights/best.pt")
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
while True:
if cap.isOpened():
retVal, img = cap.read()
if retVal:
img = cv2.resize(img, (640, 480))
pred = model.infer(img)
if pred != None:
os.write(dev, (pred + "\n").encode())
print("sending %s" % pred)
else:
continue
else:
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
| 29.891892
| 117
| 0.661844
|
19232c03f052ccae771982317f6d6cea25fd224f
| 2,867
|
py
|
Python
|
materialsproject.py
|
jamwine/BioMed
|
20a53e4d33b5759cd68d256eaf55811602bfeefc
|
[
"MIT"
] | null | null | null |
materialsproject.py
|
jamwine/BioMed
|
20a53e4d33b5759cd68d256eaf55811602bfeefc
|
[
"MIT"
] | null | null | null |
materialsproject.py
|
jamwine/BioMed
|
20a53e4d33b5759cd68d256eaf55811602bfeefc
|
[
"MIT"
] | null | null | null |
import pandas as pd
import json
import requests
import time
import datetime
from pymatgen import MPRester
from pymatgen.electronic_structure.plotter import DosPlotter, BSPlotter
def to_dict(obj):
return json.dumps(obj, default=lambda o: o.__dict__)
API_KEY='XXXXXXXXXXXX'
USER_AGENT='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
df_2d=pd.read_csv('C:/Users/Subrat.Subedi/Downloads/2D.csv')
for index, row in df_2d.iterrows():
baseurl = 'https://www.materialsproject.org/rest/v2/materials/%s'%(row['mpid'])
output = {}
try:
response = requests.get(baseurl+'/doc',headers={'x-api-key':API_KEY,'User-Agent':USER_AGENT}).json()
mp_json = response['response']
except KeyError:
m = MPRester(API_KEY)
data = m.get_data(row['mpid'])
mp_json = to_dict(data)
try:
output['name']=mp_json["exp"]["tags"][0]
except:
try:
output['name']=mp_json["pretty_formula"]
except:
continue
with open('C:/Users/Subrat.Subedi/Downloads/Data Collections/rex_stanford_xls/'+row['mpid']+'.json','w',encoding='utf-8') as rex:
json.dump(mp_json,rex)
output['authors']=["Materials Project"]
output['description']=mp_json["pretty_formula"]
output['resource_type']="Materials"
output['source_repository']=['https://materialsproject.org']
try:
isodate = datetime.datetime.strptime(mp_json['created_at'],'%Y-%m-%d %H:%M:%S')
output['version']=isodate.isoformat()
except KeyError:
output['version']=datetime.datetime.now().isoformat()
output['link_to_source']=['https://materialsproject.org/materials/'+row['mpid']]
output['direct_download_link']=['https://materialsproject.org/materials/'+row['mpid']]
output['tags']=mp_json['exp']['tags']
output['tags'].append('Materials Project')
output['tags'].append('2D Materials')
output['tags'].append('Stanford Collection')
reduced_json = {key:values for key, values in mp_json.items() if key not in ['snl','snl_final']}
output['bulk']=reduced_json
output['bulk']['formats']={'bibtex':{}}
try:
bibtex_format=mp_json['doi_bibtex'].replace("\n", "")
except:
time.sleep(1)
bibtex_format=requests.get('https://materialsproject.org/materials/%s/bibtex'%(row['mpid']),headers={'x-api-key':API_KEY,'User-Agent':USER_AGENT}).content()
output['bulk']['formats']['bibtex']=bibtex_format
output['bulk']['Band_Gap']=row['Band_Gap']
output['bulk']['Bulk_Point']=row['Bulk_Point']
# output['bulk']['Mono_Point']=row['Mono_Point']
with open('C:/Users/Subrat.Subedi/Downloads/Data Collections/remap_stanford_xls/'+row['mpid']+'.json','w',encoding='utf-8') as f:
json.dump(output, f, ensure_ascii=False, indent=4)
time.sleep(1)
| 45.507937
| 164
| 0.66655
|
9e68e5206e6f9256a46ad0829b679c929bebacaa
| 978
|
py
|
Python
|
p_030_039/problem33.py
|
ericgreveson/projecteuler
|
1844bf383fca871b82d88ef1eb3a9b1a0e363054
|
[
"Apache-2.0"
] | null | null | null |
p_030_039/problem33.py
|
ericgreveson/projecteuler
|
1844bf383fca871b82d88ef1eb3a9b1a0e363054
|
[
"Apache-2.0"
] | null | null | null |
p_030_039/problem33.py
|
ericgreveson/projecteuler
|
1844bf383fca871b82d88ef1eb3a9b1a0e363054
|
[
"Apache-2.0"
] | null | null | null |
from fractions import Fraction
from functools import reduce
def main():
"""
Entry point
"""
# We consider fractions with two digits in num and denom, less than one
curious = []
for numerator in range(10, 100):
for denominator in range(numerator + 1, 100):
nums = numerator // 10, numerator % 10
denoms = denominator // 10, denominator % 10
# Skip "trivial" fractions and those we know won't simplify
if nums[1] == 0 or denoms[1] == 0:
continue
# Is this a "curious" fraction?
frac = Fraction(numerator, denominator)
if ((nums[0] == denoms[1] and Fraction(nums[1], denoms[0]) == frac) or
(nums[1] == denoms[0] and Fraction(nums[0], denoms[1]) == frac)):
curious.append(frac)
product = reduce(lambda a, b: a * b, curious, 1)
print(f"Product: {product}")
return
if __name__ == "__main__":
main()
| 31.548387
| 82
| 0.564417
|
9c9591c1905c1e6a9bbd7ebc487bd9f656c200ee
| 631
|
py
|
Python
|
manage.py
|
c727657851/xzf_project
|
6e570dbf0bb7bdddeb6dcfacdea0a85c3f686f8e
|
[
"Apache-2.0"
] | 1
|
2020-07-23T09:19:03.000Z
|
2020-07-23T09:19:03.000Z
|
manage.py
|
c727657851/xzf_project
|
6e570dbf0bb7bdddeb6dcfacdea0a85c3f686f8e
|
[
"Apache-2.0"
] | 5
|
2021-03-30T14:10:19.000Z
|
2022-01-13T03:07:44.000Z
|
manage.py
|
c727657851/xzf_project
|
6e570dbf0bb7bdddeb6dcfacdea0a85c3f686f8e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xfz_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.681818
| 75
| 0.684628
|
ebb31515216ff199f74399f4ec14a216e7cbe58a
| 188
|
py
|
Python
|
company_hero_python_test/companies/urls.py
|
gabrielboian/company-hero-python-test
|
155e85dc727f53684bf6a3593c9d102175d33f3a
|
[
"MIT"
] | null | null | null |
company_hero_python_test/companies/urls.py
|
gabrielboian/company-hero-python-test
|
155e85dc727f53684bf6a3593c9d102175d33f3a
|
[
"MIT"
] | null | null | null |
company_hero_python_test/companies/urls.py
|
gabrielboian/company-hero-python-test
|
155e85dc727f53684bf6a3593c9d102175d33f3a
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import CompanyView, ListCompanyUsersView
urlpatterns = [
path('', CompanyView.as_view()),
path('users', ListCompanyUsersView.as_view())
]
| 23.5
| 52
| 0.739362
|
b2bc9f290cb7f9c70d6cfb3b3285054667ab2a1d
| 24,480
|
py
|
Python
|
plugin.video.catchuptvandmore/resources/lib/channels/fr/6play.py
|
akuala/REPO.KUALA
|
ea9a157025530d2ce8fa0d88431c46c5352e89d4
|
[
"Apache-2.0"
] | 2
|
2018-11-02T19:55:30.000Z
|
2020-08-14T02:22:20.000Z
|
plugin.video.catchuptvandmore/resources/lib/channels/fr/6play.py
|
akuala/REPO.KUALA
|
ea9a157025530d2ce8fa0d88431c46c5352e89d4
|
[
"Apache-2.0"
] | null | null | null |
plugin.video.catchuptvandmore/resources/lib/channels/fr/6play.py
|
akuala/REPO.KUALA
|
ea9a157025530d2ce8fa0d88431c46c5352e89d4
|
[
"Apache-2.0"
] | 3
|
2019-12-17T20:47:00.000Z
|
2021-02-11T19:03:59.000Z
|
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Original work (C) JUL1EN094, SPM, SylvainCecchetto
Copyright (C) 2016 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from builtins import str
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib.labels import LABELS
from resources.lib import web_utils
from resources.lib import download
from resources.lib.menu_utils import item_post_treatment
from resources.lib.kodi_utils import get_kodi_version, get_selected_item_art, get_selected_item_label, get_selected_item_info
import inputstreamhelper
import json
import re
import urlquick
from kodi_six import xbmc
from kodi_six import xbmcgui
# TO DO
# Playlists (cas les blagues de TOTO)
# Some DRM (m3u8) not working old videos (Kamelot)
# Thank you (https://github.com/peak3d/plugin.video.simple)
# Url to get channel's categories
# e.g. Info, Divertissement, Séries, ...
# We get an id by category
URL_ROOT = 'http://pc.middleware.6play.fr/6play/v2/platforms/' \
'm6group_web/services/%s/folders?limit=999&offset=0'
# Url to get catgory's programs
# e.g. Le meilleur patissier, La france à un incroyable talent, ...
# We get an id by program
URL_CATEGORY = 'http://pc.middleware.6play.fr/6play/v2/platforms/' \
'm6group_web/services/6play/folders/%s/programs' \
'?limit=999&offset=0&csa=6&with=parentcontext'
# Url to get program's subfolders
# e.g. Saison 5, Les meilleurs moments, les recettes pas à pas, ...
# We get an id by subfolder
URL_SUBCATEGORY = 'http://pc.middleware.6play.fr/6play/v2/platforms/' \
'm6group_web/services/6play/programs/%s' \
'?with=links,subcats,rights'
# Url to get shows list
# e.g. Episode 1, Episode 2, ...
URL_VIDEOS = 'http://pc.middleware.6play.fr/6play/v2/platforms/' \
'm6group_web/services/6play/programs/%s/videos?' \
'csa=6&with=clips,freemiumpacks&type=vi,vc,playlist&limit=999'\
'&offset=0&subcat=%s&sort=subcat'
URL_VIDEOS2 = 'https://pc.middleware.6play.fr/6play/v2/platforms/' \
'm6group_web/services/6play/programs/%s/videos?' \
'csa=6&with=clips,freemiumpacks&type=vi&limit=999&offset=0'
URL_JSON_VIDEO = 'https://pc.middleware.6play.fr/6play/v2/platforms/' \
'm6group_web/services/6play/videos/%s'\
'?csa=6&with=clips,freemiumpacks'
URL_IMG = 'https://images.6play.fr/v1/images/%s/raw'
URL_COMPTE_LOGIN = 'https://login.6play.fr/accounts.login'
# https://login.6play.fr/accounts.login?loginID=*****&password=*******&targetEnv=mobile&format=jsonp&apiKey=3_hH5KBv25qZTd_sURpixbQW6a4OsiIzIEF2Ei_2H7TXTGLJb_1Hr4THKZianCQhWK&callback=jsonp_3bbusffr388pem4
# TODO get value Callback
# callback: jsonp_3bbusffr388pem4
URL_GET_JS_ID_API_KEY = 'https://www.6play.fr/connexion'
URL_API_KEY = 'https://www.6play.fr/client-%s.bundle.js'
# Id
URL_TOKEN_DRM = 'https://6play-users.6play.fr/v2/platforms/m6group_web/services/6play/users/%s/videos/%s/upfront-token'
# URL_LICENCE_KEY = 'https://lic.drmtoday.com/license-proxy-widevine/cenc/|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&Origin=https://www.6play.fr&Referer=%s&x-dt-auth-token=%s|R{SSM}|JBlicense'
URL_LICENCE_KEY = 'https://lic.drmtoday.com/license-proxy-widevine/cenc/|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense'
# Referer, Token
URL_LIVE_JSON = 'https://pc.middleware.6play.fr/6play/v2/platforms/m6group_web/services/6play/live?channel=%s&with=service_display_images,nextdiffusion,extra_data'
# Chaine
DESIRED_QUALITY = Script.setting['quality']
def replay_entry(plugin, item_id, **kwargs):
"""
First executed function after replay_bridge
"""
return list_categories(plugin, item_id)
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
if item_id == 'rtl2' or \
item_id == 'fun_radio' or \
item_id == 'courses' or \
item_id == '100foot':
resp = urlquick.get(URL_ROOT % item_id)
else:
resp = urlquick.get(URL_ROOT % (item_id + 'replay'))
json_parser = json.loads(resp.text)
for array in json_parser:
category_id = str(array['id'])
category_name = array['name']
item = Listitem()
item.label = category_name
item.set_callback(list_programs,
item_id=item_id,
category_id=category_id)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, category_id, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_CATEGORY % category_id)
json_parser = json.loads(resp.text)
for array in json_parser:
item = Listitem()
program_title = array['title']
program_id = str(array['id'])
program_desc = array['description']
program_imgs = array['images']
program_img = ''
program_fanart = ''
for img in program_imgs:
if img['role'] == 'vignette':
external_key = img['external_key']
program_img = URL_IMG % (external_key)
elif img['role'] == 'carousel':
external_key = img['external_key']
program_fanart = URL_IMG % (external_key)
item.label = program_title
item.art['thumb'] = item.art['landscape'] = program_img
item.art['fanart'] = program_fanart
item.info['plot'] = program_desc
item.set_callback(list_program_categories,
item_id=item_id,
program_id=program_id)
item_post_treatment(item)
yield item
@Route.register
def list_program_categories(plugin, item_id, program_id, **kwargs):
"""
Build program categories
- Toutes les vidéos
- Tous les replay
- Saison 1
- ...
"""
resp = urlquick.get(URL_SUBCATEGORY % program_id)
json_parser = json.loads(resp.text)
for sub_category in json_parser['program_subcats']:
item = Listitem()
sub_category_id = str(sub_category['id'])
sub_category_title = sub_category['title']
item.label = sub_category_title
item.set_callback(list_videos,
item_id=item_id,
program_id=program_id,
sub_category_id=sub_category_id)
item_post_treatment(item)
yield item
item = Listitem()
item.label = plugin.localize(30701)
item.set_callback(list_videos,
item_id=item_id,
program_id=program_id,
sub_category_id=None)
item_post_treatment(item)
yield item
def populate_item(item, clip_dict):
item.info['duration'] = clip_dict.get('duration', None)
item.info['plot'] = clip_dict.get('description', None)
try:
aired = clip_dict['product']['last_diffusion']
aired = aired
aired = aired[:10]
item.info.date(aired, '%Y-%m-%d')
except Exception:
pass
program_imgs = clip_dict['images']
for img in program_imgs:
if img['role'] == 'vignette':
external_key = img['external_key']
program_img = URL_IMG % (external_key)
item.art['thumb'] = item.art['landscape'] = program_img
item.art['fanart'] = program_img
break
@Route.register
def list_videos(plugin, item_id, program_id, sub_category_id, **kwargs):
url = ''
if sub_category_id is None:
url = URL_VIDEOS2 % program_id
else:
url = URL_VIDEOS % (program_id, sub_category_id)
resp = urlquick.get(url)
json_parser = json.loads(resp.text)
if not json_parser:
plugin.notify(plugin.localize(LABELS['No videos found']), '')
yield False
for video in json_parser:
video_id = str(video['id'])
item = Listitem()
item.label = video['title']
is_downloadable = False
if get_kodi_version() < 18:
is_downloadable = True
if 'type' in video and video['type'] == 'playlist':
populate_item(item, video)
item.set_callback(get_playlist_urls,
item_id=item_id,
video_id=video_id,
url=url)
else:
populate_item(item, video['clips'][0])
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item,
is_playable=True,
is_downloadable=is_downloadable)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_id,
download_mode=False,
**kwargs):
if get_kodi_version() < 18:
video_json = urlquick.get(URL_JSON_VIDEO % video_id,
headers={
'User-Agent': web_utils.get_random_ua(),
'x-customer-name': 'm6web'
},
max_age=-1)
json_parser = json.loads(video_json.text)
video_assets = json_parser['clips'][0]['assets']
if video_assets is None:
plugin.notify('ERROR', plugin.localize(30721))
return False
final_video_url = ''
all_datas_videos_quality = []
all_datas_videos_path = []
for asset in video_assets:
if 'http_h264' in asset["type"]:
all_datas_videos_quality.append(asset["video_quality"])
all_datas_videos_path.append(asset['full_physical_path'])
elif 'h264' in asset["type"]:
manifest = urlquick.get(
asset['full_physical_path'],
headers={'User-Agent': web_utils.get_random_ua()},
max_age=-1)
if 'drm' not in manifest.text:
all_datas_videos_quality.append(asset["video_quality"])
all_datas_videos_path.append(asset['full_physical_path'])
if len(all_datas_videos_quality) == 0:
xbmcgui.Dialog().ok('Info', plugin.localize(30602))
return False
elif len(all_datas_videos_quality) == 1:
final_video_url = all_datas_videos_path[0]
else:
if DESIRED_QUALITY == "DIALOG":
seleted_item = xbmcgui.Dialog().select(
plugin.localize(LABELS['choose_video_quality']),
all_datas_videos_quality)
if seleted_item == -1:
return False
return all_datas_videos_path[seleted_item]
elif DESIRED_QUALITY == "BEST":
url_best = ''
i = 0
for data_video in all_datas_videos_quality:
if 'lq' not in data_video:
url_best = all_datas_videos_path[i]
i = i + 1
final_video_url = url_best
else:
final_video_url = all_datas_videos_path[0]
if download_mode:
return download.download_video(final_video_url)
return final_video_url
else:
resp_js_id = urlquick.get(URL_GET_JS_ID_API_KEY)
js_id = re.compile(r'client\-(.*?)\.bundle\.js').findall(
resp_js_id.text)[0]
resp = urlquick.get(URL_API_KEY % js_id)
api_key = re.compile(r'\"eu1.gigya.com\"\,key\:\"(.*?)\"').findall(
resp.text)[0]
if plugin.setting.get_string('6play.login') == '' or\
plugin.setting.get_string('6play.password') == '':
xbmcgui.Dialog().ok(
'Info',
plugin.localize(30604) % ('6play', 'https://www.6play.fr'))
return False
# Build PAYLOAD
payload = {
"loginID": plugin.setting.get_string('6play.login'),
"password": plugin.setting.get_string('6play.password'),
"apiKey": api_key,
"format": "jsonp",
"callback": "jsonp_3bbusffr388pem4"
}
# LOGIN
resp2 = urlquick.post(URL_COMPTE_LOGIN,
data=payload,
headers={
'User-Agent': web_utils.get_random_ua(),
'referer': 'https://www.6play.fr/connexion'
})
json_parser = json.loads(
resp2.text.replace('jsonp_3bbusffr388pem4(', '').replace(');', ''))
if "UID" not in json_parser:
plugin.notify('ERROR', '6play : ' + plugin.localize(30711))
return False
account_id = json_parser["UID"]
account_timestamp = json_parser["signatureTimestamp"]
account_signature = json_parser["UIDSignature"]
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
# Build PAYLOAD headers
payload_headers = {
'x-auth-gigya-signature': account_signature,
'x-auth-gigya-signature-timestamp': account_timestamp,
'x-auth-gigya-uid': account_id,
'x-customer-name': 'm6web'
}
token_json = urlquick.get(URL_TOKEN_DRM % (account_id, video_id),
headers=payload_headers,
max_age=-1)
token_jsonparser = json.loads(token_json.text)
token = token_jsonparser["token"]
video_json = urlquick.get(URL_JSON_VIDEO % video_id,
headers={
'User-Agent': web_utils.get_random_ua(),
'x-customer-name': 'm6web'
},
max_age=-1)
json_parser = json.loads(video_json.text)
video_assets = json_parser['clips'][0]['assets']
if video_assets is None:
plugin.notify('ERROR', plugin.localize(30721))
return False
subtitle_url = ''
if plugin.setting.get_boolean('active_subtitle'):
for asset in video_assets:
if 'subtitle_vtt' in asset["type"]:
subtitle_url = asset['full_physical_path']
for asset in video_assets:
if 'usp_dashcenc_h264' in asset["type"]:
item = Listitem()
item.path = asset['full_physical_path']
if 'http' in subtitle_url:
item.subtitles.append(subtitle_url)
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
item.property['inputstreamaddon'] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
item.property[
'inputstream.adaptive.license_key'] = URL_LICENCE_KEY % token
return item
for asset in video_assets:
if 'http_h264' in asset["type"]:
if "hd" in asset["video_quality"]:
item = Listitem()
item.path = asset['full_physical_path']
if 'http' in subtitle_url:
item.subtitles.append(subtitle_url)
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
return False
@Resolver.register
def get_playlist_urls(plugin,
item_id,
video_id,
url,
**kwargs):
resp = urlquick.get(url)
json_parser = json.loads(resp.text)
for video in json_parser:
current_video_id = str(video['id'])
if current_video_id != video_id:
continue
playlist_videos = []
for clip in video['clips']:
clip_id = str(clip['video_id'])
item = Listitem()
item.label = clip['title']
populate_item(item, clip)
video = get_video_url(
plugin,
item_id=item_id,
video_id=clip_id)
playlist_videos.append(video)
return playlist_videos
def live_entry(plugin, item_id, **kwargs):
return get_live_url(plugin, item_id, item_id.upper())
@Resolver.register
def get_live_url(plugin, item_id, video_id, **kwargs):
if item_id == 'fun_radio' or \
item_id == 'rtl2' or \
item_id == 'mb':
if item_id == 'mb':
video_json = urlquick.get(
URL_LIVE_JSON % (item_id.upper()),
headers={'User-Agent': web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(video_json.text)
video_assets = json_parser[item_id.upper()][0]['live']['assets']
else:
video_json = urlquick.get(
URL_LIVE_JSON % (item_id),
headers={'User-Agent': web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(video_json.text)
video_assets = json_parser[item_id][0]['live']['assets']
if not video_assets:
plugin.notify('INFO', plugin.localize(30716))
return False
subtitle_url = ''
if plugin.setting.get_boolean('active_subtitle'):
for asset in video_assets:
if 'subtitle_vtt' in asset["type"]:
subtitle_url = asset['full_physical_path']
for asset in video_assets:
if 'delta_hls_h264' in asset["type"]:
item = Listitem()
item.path = asset['full_physical_path']
if 'http' in subtitle_url:
item.subtitles.append(subtitle_url)
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
return False
else:
if get_kodi_version() < 18:
xbmcgui.Dialog().ok('Info', plugin.localize(30602))
return False
resp_js_id = urlquick.get(URL_GET_JS_ID_API_KEY)
js_id = re.compile(r'client\-(.*?)\.bundle\.js').findall(
resp_js_id.text)[0]
resp = urlquick.get(URL_API_KEY % js_id)
api_key = re.compile(r'\"eu1.gigya.com\"\,key\:\"(.*?)\"').findall(
resp.text)[0]
if plugin.setting.get_string('6play.login') == '' or\
plugin.setting.get_string('6play.password') == '':
xbmcgui.Dialog().ok(
'Info',
plugin.localize(30604) % ('6play', 'https://www.6play.fr'))
return False
# Build PAYLOAD
payload = {
"loginID": plugin.setting.get_string('6play.login'),
"password": plugin.setting.get_string('6play.password'),
"apiKey": api_key,
"format": "jsonp",
"callback": "jsonp_3bbusffr388pem4"
}
# LOGIN
resp2 = urlquick.post(URL_COMPTE_LOGIN,
data=payload,
headers={
'User-Agent': web_utils.get_random_ua(),
'referer': 'https://www.6play.fr/connexion'
})
json_parser = json.loads(
resp2.text.replace('jsonp_3bbusffr388pem4(', '').replace(');', ''))
if "UID" not in json_parser:
plugin.notify('ERROR', '6play : ' + plugin.localize(30711))
return False
account_id = json_parser["UID"]
account_timestamp = json_parser["signatureTimestamp"]
account_signature = json_parser["UIDSignature"]
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
# Build PAYLOAD headers
payload_headers = {
'x-auth-gigya-signature': account_signature,
'x-auth-gigya-signature-timestamp': account_timestamp,
'x-auth-gigya-uid': account_id,
'x-customer-name': 'm6web'
}
if item_id == '6ter':
token_json = urlquick.get(URL_TOKEN_DRM %
(account_id, 'dashcenc_%s' % '6T'),
headers=payload_headers,
max_age=-1)
else:
token_json = urlquick.get(
URL_TOKEN_DRM % (account_id, 'dashcenc_%s' % item_id.upper()),
headers=payload_headers,
max_age=-1)
token_jsonparser = json.loads(token_json.text)
token = token_jsonparser["token"]
if item_id == '6ter':
video_json = urlquick.get(
URL_LIVE_JSON % '6T',
headers={'User-Agent': web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(video_json.text)
video_assets = json_parser['6T'][0]['live']['assets']
else:
video_json = urlquick.get(
URL_LIVE_JSON % (item_id.upper()),
headers={'User-Agent': web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(video_json.text)
video_assets = json_parser[item_id.upper()][0]['live']['assets']
if not video_assets:
plugin.notify('INFO', plugin.localize(30716))
return False
subtitle_url = ''
if plugin.setting.get_boolean('active_subtitle'):
for asset in video_assets:
if 'subtitle_vtt' in asset["type"]:
subtitle_url = asset['full_physical_path']
for asset in video_assets:
if 'delta_dashcenc_h264' in asset["type"]:
item = Listitem()
item.path = asset['full_physical_path']
if 'http' in subtitle_url:
item.subtitles.append(subtitle_url)
item.property['inputstreamaddon'] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
item.property[
'inputstream.adaptive.license_key'] = URL_LICENCE_KEY % token
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
return False
| 36.81203
| 306
| 0.573284
|
81f4b8e01461777aa9344d7ae2c06231fb1df9f2
| 19,812
|
py
|
Python
|
tests/runtests.py
|
RinkeHoekstra/pyld
|
aa62815c687c8b21342852e34b440a956ee6e0d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/runtests.py
|
RinkeHoekstra/pyld
|
aa62815c687c8b21342852e34b440a956ee6e0d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/runtests.py
|
RinkeHoekstra/pyld
|
aa62815c687c8b21342852e34b440a956ee6e0d7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
Test runner for JSON-LD.
.. module:: runtests
:synopsis: Test harness for pyld
.. moduleauthor:: Dave Longley
.. moduleauthor:: Olaf Conradi <olaf@conradi.org>
"""
from __future__ import print_function
import datetime
import json
import os
import sys
import traceback
import unittest
import re
from optparse import OptionParser
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'lib'))
from pyld import jsonld
try:
from unittest import TextTestResult
except ImportError:
from unittest import _TextTestResult as TextTestResult
__copyright__ = 'Copyright (c) 2011-2013 Digital Bazaar, Inc.'
__license__ = 'New BSD license'
# support python 2
if sys.version_info[0] >= 3:
basestring = str
ROOT_MANIFEST_DIR = None
SKIP_TESTS = []
HTTP_BASE = 'http://json-ld.org/test-suite'
HTTPS_BASE = 'https://json-ld.org/test-suite'
class TestRunner(unittest.TextTestRunner):
"""
Loads test manifests and runs tests.
"""
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1):
unittest.TextTestRunner.__init__(
self, stream, descriptions, verbosity)
# command line options
self.options = {}
self.parser = OptionParser()
def _makeResult(self):
return EarlTestResult(self.stream, self.descriptions, self.verbosity)
def main(self):
print('PyLD Tests')
print('Use -h or --help to view options.\n')
# add program options
self.parser.add_option('-m', '--manifest', dest='manifest',
help='The single test manifest to run', metavar='FILE')
self.parser.add_option('-d', '--directory', dest='directory',
help='The directory with the root manifest to run', metavar='DIR')
self.parser.add_option('-e', '--earl', dest='earl',
help='The filename to write an EARL report to')
self.parser.add_option('-b', '--bail', dest='bail',
action='store_true', default=False,
help='Bail out as soon as any test fails')
self.parser.add_option('-l', '--loader', dest='loader',
default='requests',
help='The remote URL document loader: requests, aiohttp '
'[default: %default]')
self.parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='Print verbose test data')
# parse command line options
(self.options, args) = self.parser.parse_args()
# ensure a manifest or a directory was specified
if self.options.manifest is None and self.options.directory is None:
raise Exception('No test manifest or directory specified.')
# Set a default JSON-LD document loader
if self.options.loader == 'requests':
jsonld._default_document_loader = jsonld.requests_document_loader()
elif self.options.loader == 'aiohttp':
jsonld._default_document_loader = jsonld.aiohttp_document_loader()
# config runner
self.failfast = self.options.bail
# get root manifest filename
if self.options.manifest:
global HTTP_BASE
global HTTPS_BASE
HTTP_BASE = 'http://json-ld.org/test-suite/tests'
HTTPS_BASE = 'https://json-ld.org/test-suite/tests'
filename = os.path.abspath(self.options.manifest)
else:
filename = os.path.abspath(
os.path.join(self.options.directory, 'manifest.jsonld'))
# load root manifest
global ROOT_MANIFEST_DIR
ROOT_MANIFEST_DIR = os.path.dirname(filename)
root_manifest = read_json(filename)
suite = Manifest(root_manifest, filename).load()
# run tests
result = self.run(suite)
# output earl report if specified
if self.options.earl:
filename = os.path.abspath(self.options.earl)
print('Writing EARL report to: %s' % filename)
result.writeReport(filename)
if not result.wasSuccessful():
exit(1)
class Manifest:
def __init__(self, data, filename):
self.data = data
self.suite = unittest.TestSuite()
self.filename = filename
self.dirname = os.path.dirname(self.filename)
def load(self):
entries = []
# get entries and sequence (alias for entries)
entries.extend(get_jsonld_values(self.data, 'entries'))
entries.extend(get_jsonld_values(self.data, 'sequence'))
# add includes to entries as jsonld files
includes = get_jsonld_values(self.data, 'include')
for filename in includes:
entries.append(filename + '.jsonld')
for entry in entries:
if isinstance(entry, basestring):
filename = os.path.join(self.dirname, entry)
entry = read_json(filename)
else:
filename = self.filename
# entry is another manifest
if is_jsonld_type(entry, 'mf:Manifest'):
self.suite = unittest.TestSuite(
[self.suite, Manifest(entry, filename).load()])
# assume entry is a test
else:
self.suite.addTest(Test(self, entry, filename))
return self.suite
class Test(unittest.TestCase):
def __init__(self, manifest, data, filename):
unittest.TestCase.__init__(self)
#self.maxDiff = None
self.manifest = manifest
self.data = data
self.filename = filename
self.dirname = os.path.dirname(filename)
self.is_positive = is_jsonld_type(data, 'jld:PositiveEvaluationTest')
self.is_negative = is_jsonld_type(data, 'jld:NegativeEvaluationTest')
self.test_type = None
global TEST_TYPES
for t in TEST_TYPES.keys():
if is_jsonld_type(data, t):
self.test_type = t
break
def __str__(self):
manifest = self.manifest.data.get(
'name', self.manifest.data.get('label'))
test_id = self.data.get('id', self.data.get('@id'))
label = self.data.get(
'purpose', self.data.get('name', self.data.get('label')))
return ('%s: %s: %s' % (manifest, test_id, label))
def _get_expect_property(self):
'''Find the expected output property or raise error.'''
if 'expect' in self.data:
return 'expect'
elif 'result' in self.data:
return 'result'
else:
raise Exception('No expected output property found')
def setUp(self):
data = self.data
manifest = self.manifest
# skip unknown and explicitly skipped test types
global SKIP_TESTS
types = []
types.extend(get_jsonld_values(data, '@type'))
types.extend(get_jsonld_values(data, 'type'))
if self.test_type is None or self.test_type in SKIP_TESTS:
self.skipTest('Test type of %s' % types)
global TEST_TYPES
test_info = TEST_TYPES[self.test_type]
# skip based on regular expression
skip_re = test_info.get('skip', {}).get('regex', [])
for regex in skip_re:
if re.match(regex, data.get('@id', '')):
self.skipTest('Test with regex %s' % regex)
# skip based on processingMode
skip_pm = test_info.get('skip', {}).get('processingMode', [])
data_pm = data.get('option', {}).get('processingMode', None)
if data_pm in skip_pm:
self.skipTest('Test with processingMode %s' % data_pm)
# skip based on specVersion
skip_sv = test_info.get('skip', {}).get('specVersion', [])
data_sv = data.get('option', {}).get('specVersion', None)
if data_sv in skip_sv:
self.skipTest('Test with specVersion %s' % data_sv)
# expand @id and input base
if 'baseIri' in manifest.data:
data['@id'] = (
manifest.data['baseIri'] +
os.path.basename(manifest.filename) + data['@id'])
self.base = self.manifest.data['baseIri'] + data['input']
def runTest(self):
data = self.data
global TEST_TYPES
test_info = TEST_TYPES[self.test_type]
fn = test_info['fn']
params = test_info['params']
params = [param(self) for param in params]
result = None
if self.is_negative:
expect = data[self._get_expect_property()]
else:
expect = read_test_property(self._get_expect_property())(self)
try:
result = getattr(jsonld, fn)(*params)
if self.is_negative:
raise AssertionError('Expected an error; one was not raised')
self.assertEqual(result, expect)
except Exception as e:
if not self.is_negative:
if not isinstance(e, AssertionError):
print('\n')
traceback.print_exc(file=sys.stdout)
else:
print('\nEXPECTED: ', json.dumps(expect, indent=2))
print('ACTUAL: ', json.dumps(result, indent=2))
raise e
result = get_jsonld_error_code(e)
self.assertEqual(result, expect)
def is_jsonld_type(node, type_):
node_types = []
node_types.extend(get_jsonld_values(node, '@type'))
node_types.extend(get_jsonld_values(node, 'type'))
types = type_ if isinstance(type_, list) else [type_]
return len(set(node_types).intersection(set(types))) > 0
def get_jsonld_values(node, property):
rval = []
if property in node:
rval = node[property]
if not isinstance(rval, list):
rval = [rval]
return rval
def get_jsonld_error_code(err):
if isinstance(err, jsonld.JsonLdError):
if err.code:
return err.code
elif err.cause:
return get_jsonld_error_code(err.cause)
return str(err)
def read_json(filename):
with open(filename) as f:
return json.load(f)
def read_file(filename):
with open(filename) as f:
if sys.version_info[0] >= 3:
return f.read()
else:
return f.read().decode('utf8')
def read_test_url(property):
def read(test):
if property not in test.data:
return None
if 'baseIri' in test.manifest.data:
return test.manifest.data['baseIri'] + test.data[property]
else:
return test.data[property]
return read
def read_test_property(property):
def read(test):
if property not in test.data:
return None
filename = os.path.join(test.dirname, test.data[property])
if filename.endswith('.jsonld'):
return read_json(filename)
else:
return read_file(filename)
return read
def create_test_options(opts=None):
def create(test):
http_options = ['contentType', 'httpLink', 'httpStatus', 'redirectTo']
test_options = test.data.get('option', {})
options = {}
for k, v in test_options.items():
if k not in http_options:
options[k] = v
options['documentLoader'] = create_document_loader(test)
options.update(opts or {})
if 'expandContext' in options:
filename = os.path.join(test.dirname, options['expandContext'])
options['expandContext'] = read_json(filename)
return options
return create
def create_document_loader(test):
loader = jsonld.get_document_loader()
def is_test_suite_url(url):
global HTTP_BASE
global HTTPS_BASE
return url.startswith(HTTP_BASE) or url.startswith(HTTPS_BASE)
def strip_base(url):
global HTTP_BASE
global HTTPS_BASE
if url.startswith(HTTP_BASE):
return url[len(HTTP_BASE):]
elif url.startswith(HTTPS_BASE):
return url[len(HTTPS_BASE):]
else:
raise Exception('unkonwn base')
def load_locally(url):
doc = {'contextUrl': None, 'documentUrl': url, 'document': None}
options = test.data.get('option')
if options and url == test.base:
if ('redirectTo' in options and options.get('httpStatus') >= 300):
doc['documentUrl'] = (
test.manifest.data['baseIri'] + options['redirectTo'])
elif 'httpLink' in options:
content_type = options.get('contentType')
if not content_type and url.endswith('.jsonld'):
content_type = 'application/ld+json'
link_header = options.get('httpLink', '')
if isinstance(link_header, list):
link_header = ','.join(link_header)
link_header = jsonld.parse_link_header(
link_header).get('http://www.w3.org/ns/json-ld#context')
if link_header and content_type != 'application/ld+json':
if isinstance(link_header, list):
raise Exception('multiple context link headers')
doc['contextUrl'] = link_header['target']
global ROOT_MANIFEST_DIR
if doc['documentUrl'].find(':') == -1:
filename = os.path.join(ROOT_MANIFEST_DIR, doc['documentUrl'])
doc['documentUrl'] = 'file://' + filename
else:
#filename = os.path.join(
# ROOT_MANIFEST_DIR, doc['documentUrl'][len(base):])
filename = ROOT_MANIFEST_DIR + strip_base(doc['documentUrl'])
try:
doc['document'] = read_json(filename)
except:
raise Exception('loading document failed')
return doc
def local_loader(url):
# always load remote-doc and non-base tests remotely
if ((not is_test_suite_url(url) and url.find(':') != -1) or
test.manifest.data.get('name') == 'Remote document'):
return loader(url)
# attempt to load locally
return load_locally(url)
return local_loader
class EarlTestResult(TextTestResult):
def __init__(self, stream, descriptions, verbosity):
TextTestResult.__init__(self, stream, descriptions, verbosity)
self.report = EarlReport()
def addError(self, test, err):
TextTestResult.addError(self, test, err)
self.report.add_assertion(test, False)
def addFailure(self, test, err):
TextTestResult.addFailure(self, test, err)
self.report.add_assertion(test, False)
def addSuccess(self, test):
TextTestResult.addSuccess(self, test)
self.report.add_assertion(test, True)
def writeReport(self, filename):
self.report.write(filename)
class EarlReport():
"""
Generates an EARL report.
"""
def __init__(self):
self.report = {
'@context': {
'doap': 'http://usefulinc.com/ns/doap#',
'foaf': 'http://xmlns.com/foaf/0.1/',
'dc': 'http://purl.org/dc/terms/',
'earl': 'http://www.w3.org/ns/earl#',
'xsd': 'http://www.w3.org/2001/XMLSchema#',
'doap:homepage': {'@type': '@id'},
'doap:license': {'@type': '@id'},
'dc:creator': {'@type': '@id'},
'foaf:homepage': {'@type': '@id'},
'subjectOf': {'@reverse': 'earl:subject'},
'earl:assertedBy': {'@type': '@id'},
'earl:mode': {'@type': '@id'},
'earl:test': {'@type': '@id'},
'earl:outcome': {'@type': '@id'},
'dc:date': {'@type': 'xsd:date'}
},
'@id': 'https://github.com/digitalbazaar/pyld',
'@type': [
'doap:Project',
'earl:TestSubject',
'earl:Software'
],
'doap:name': 'PyLD',
'dc:title': 'PyLD',
'doap:homepage': 'https://github.com/digitalbazaar/pyld',
'doap:license': 'https://github.com/digitalbazaar/pyld/blob/master/LICENSE',
'doap:description': 'A JSON-LD processor for Python',
'doap:programming-language': 'Python',
'dc:creator': 'https://github.com/dlongley',
'doap:developer': {
'@id': 'https://github.com/dlongley',
'@type': [
'foaf:Person',
'earl:Assertor'
],
'foaf:name': 'Dave Longley',
'foaf:homepage': 'https://github.com/dlongley'
},
'dc:date': {
'@value': datetime.datetime.utcnow().strftime('%Y-%m-%d'),
'@type': 'xsd:date'
},
'subjectOf': []
}
def add_assertion(self, test, success):
self.report['subjectOf'].append({
'@type': 'earl:Assertion',
'earl:assertedBy': self.report['doap:developer']['@id'],
'earl:mode': 'earl:automatic',
'earl:test': test.data.get('id', test.data.get('@id')),
'earl:result': {
'@type': 'earl:TestResult',
'dc:date': datetime.datetime.utcnow().isoformat(),
'earl:outcome': 'earl:passed' if success else 'earl:failed'
}
})
return self
def write(self, filename):
with open(filename, 'w') as f:
f.write(json.dumps(self.report, indent=2))
f.close()
# supported test types
TEST_TYPES = {
'jld:CompactTest': {
'skip': {
'specVersion': ['json-ld-1.0']
},
'fn': 'compact',
'params': [
read_test_url('input'),
read_test_property('context'),
create_test_options()
]
},
'jld:ExpandTest': {
'fn': 'expand',
'params': [
read_test_url('input'),
create_test_options()
]
},
'jld:FlattenTest': {
'fn': 'flatten',
'params': [
read_test_url('input'),
read_test_property('context'),
create_test_options()
]
},
'jld:FrameTest': {
'fn': 'frame',
'params': [
read_test_url('input'),
read_test_property('frame'),
create_test_options()
]
},
'jld:FromRDFTest': {
'skip': {
'regex': ['#t0023']
},
'fn': 'from_rdf',
'params': [
read_test_property('input'),
create_test_options({'format': 'application/n-quads'})
]
},
'jld:NormalizeTest': {
'fn': 'normalize',
'params': [
read_test_property('input'),
create_test_options({'format': 'application/n-quads'})
]
},
'jld:ToRDFTest': {
'fn': 'to_rdf',
'params': [
read_test_url('input'),
create_test_options({'format': 'application/n-quads'})
]
},
'rdfn:Urgna2012EvalTest': {
'fn': 'normalize',
'params': [
read_test_property('action'),
create_test_options({
'algorithm': 'URGNA2012',
'inputFormat': 'application/n-quads',
'format': 'application/n-quads'
})
]
},
'rdfn:Urdna2015EvalTest': {
'fn': 'normalize',
'params': [
read_test_property('action'),
create_test_options({
'algorithm': 'URDNA2015',
'inputFormat': 'application/n-quads',
'format': 'application/n-quads'
})
]
}
}
if __name__ == '__main__':
TestRunner(verbosity=2).main()
| 33.409781
| 88
| 0.561629
|
346ca808edbe462de6ed7af1aa05a9ebb2053f7f
| 95,889
|
py
|
Python
|
packages/maya-2017-win64/sipconfig.py
|
qinningfx/zfused_outsource
|
bfc5558f05e3d6005653794a47bd863b61b009b1
|
[
"Apache-2.0"
] | 2
|
2019-02-22T03:33:26.000Z
|
2019-02-23T03:29:26.000Z
|
packages/maya-2017-win64/sipconfig.py
|
qinningfx/zfused_outsource
|
bfc5558f05e3d6005653794a47bd863b61b009b1
|
[
"Apache-2.0"
] | null | null | null |
packages/maya-2017-win64/sipconfig.py
|
qinningfx/zfused_outsource
|
bfc5558f05e3d6005653794a47bd863b61b009b1
|
[
"Apache-2.0"
] | null | null | null |
# This module is intended to be used by the build/installation scripts of
# extension modules created with SIP. It provides information about file
# locations, version numbers etc., and provides some classes and functions.
#
# Copyright (c) 2014 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of SIP.
#
# This copy of SIP is licensed for use under the terms of the SIP License
# Agreement. See the file LICENSE for more details.
#
# This copy of SIP may also used under the terms of the GNU General Public
# License v2 or v3 as published by the Free Software Foundation which can be
# found in the files LICENSE-GPL2 and LICENSE-GPL3 included in this package.
#
# SIP is supplied WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import sys
import os
import stat
import string
import re
# These are installation specific values created when SIP was configured.
_pkg_config = {
'arch': '',
'default_bin_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python',
'default_mod_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\Lib\\site-packages',
'default_sip_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\sip',
'deployment_target': '',
'platform': 'win32-msvc2008',
'py_conf_inc_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\include',
'py_inc_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\include',
'py_lib_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\libs',
'py_version': 0x020703,
'sip_bin': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\sip',
'sip_config_args': '',
'sip_inc_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\include',
'sip_mod_dir': 'C:\\Program Files\\Autodesk\\Maya2014\\Python\\Lib\\site-packages',
'sip_version': 0x040f04,
'sip_version_str': '4.15.4',
'universal': ''
}
_default_macros = {
'AIX_SHLIB': '',
'AR': '',
'CC': 'cl',
'CFLAGS': '-nologo -Zm200 -Zc:wchar_t-',
'CFLAGS_APP': '',
'CFLAGS_CONSOLE': '',
'CFLAGS_DEBUG': '-Zi -MDd',
'CFLAGS_EXCEPTIONS_OFF': '',
'CFLAGS_EXCEPTIONS_ON': '',
'CFLAGS_MT': '',
'CFLAGS_MT_DBG': '',
'CFLAGS_MT_DLL': '',
'CFLAGS_MT_DLLDBG': '',
'CFLAGS_RELEASE': '-O2 -MD',
'CFLAGS_RTTI_OFF': '',
'CFLAGS_RTTI_ON': '',
'CFLAGS_SHLIB': '',
'CFLAGS_STL_OFF': '',
'CFLAGS_STL_ON': '',
'CFLAGS_THREAD': '',
'CFLAGS_WARN_OFF': '-W0',
'CFLAGS_WARN_ON': '-W3',
'CHK_DIR_EXISTS': 'if not exist',
'CONFIG': 'qt warn_on release incremental flat link_prl precompile_header autogen_precompile_source copy_dir_files debug_and_release debug_and_release_target embed_manifest_dll embed_manifest_exe',
'COPY': 'copy /y',
'CXX': 'cl',
'CXXFLAGS': '-nologo -Zm200 -Zc:wchar_t-',
'CXXFLAGS_APP': '',
'CXXFLAGS_CONSOLE': '',
'CXXFLAGS_DEBUG': '-Zi -MDd',
'CXXFLAGS_EXCEPTIONS_OFF': '',
'CXXFLAGS_EXCEPTIONS_ON': '-EHsc',
'CXXFLAGS_MT': '',
'CXXFLAGS_MT_DBG': '',
'CXXFLAGS_MT_DLL': '',
'CXXFLAGS_MT_DLLDBG': '',
'CXXFLAGS_RELEASE': '-O2 -MD',
'CXXFLAGS_RTTI_OFF': '',
'CXXFLAGS_RTTI_ON': '-GR',
'CXXFLAGS_SHLIB': '',
'CXXFLAGS_STL_OFF': '',
'CXXFLAGS_STL_ON': '-EHsc',
'CXXFLAGS_THREAD': '',
'CXXFLAGS_WARN_OFF': '-W0',
'CXXFLAGS_WARN_ON': '-W3 -w34100 -w34189',
'DEFINES': 'UNICODE WIN32 QT_LARGEFILE_SUPPORT',
'DEL_FILE': 'del',
'EXTENSION_PLUGIN': '',
'EXTENSION_SHLIB': '',
'INCDIR': '',
'INCDIR_OPENGL': '',
'INCDIR_X11': '',
'LFLAGS': '/NOLOGO /DYNAMICBASE /NXCOMPAT',
'LFLAGS_CONSOLE': '/SUBSYSTEM:CONSOLE',
'LFLAGS_CONSOLE_DLL': '',
'LFLAGS_DEBUG': '/DEBUG',
'LFLAGS_DLL': '/DLL',
'LFLAGS_OPENGL': '',
'LFLAGS_PLUGIN': '',
'LFLAGS_RELEASE': '/INCREMENTAL:NO',
'LFLAGS_RPATH': '',
'LFLAGS_SHLIB': '',
'LFLAGS_SONAME': '',
'LFLAGS_THREAD': '',
'LFLAGS_WINDOWS': '/SUBSYSTEM:WINDOWS',
'LFLAGS_WINDOWS_DLL': '',
'LIB': 'lib /NOLOGO',
'LIBDIR': '',
'LIBDIR_OPENGL': '',
'LIBDIR_X11': '',
'LIBS': '',
'LIBS_CONSOLE': '',
'LIBS_CORE': 'kernel32.lib user32.lib shell32.lib uuid.lib ole32.lib advapi32.lib ws2_32.lib',
'LIBS_GUI': 'gdi32.lib comdlg32.lib oleaut32.lib imm32.lib winmm.lib winspool.lib ws2_32.lib ole32.lib user32.lib advapi32.lib',
'LIBS_NETWORK': 'ws2_32.lib',
'LIBS_OPENGL': 'glu32.lib opengl32.lib gdi32.lib user32.lib',
'LIBS_RT': '',
'LIBS_RTMT': '',
'LIBS_THREAD': '',
'LIBS_WEBKIT': '',
'LIBS_WINDOWS': '',
'LIBS_X11': '',
'LINK': 'link',
'LINK_SHLIB': '',
'LINK_SHLIB_CMD': '',
'MAKEFILE_GENERATOR': 'MSVC.NET',
'MKDIR': 'mkdir',
'RANLIB': '',
'RPATH': '',
'STRIP': ''
}
# The stack of configuration dictionaries.
_config_stack = []
class Configuration(object):
"""The class that represents SIP configuration values.
"""
def __init__(self, sub_cfg=None):
"""Initialise an instance of the class.
sub_cfg is the list of sub-class configurations. It should be None
when called normally.
"""
# Find the build macros in the closest imported module from where this
# was originally defined.
self._macros = None
for cls in self.__class__.__mro__:
if cls is object:
continue
mod = sys.modules[cls.__module__]
if hasattr(mod, "_default_macros"):
self._macros = mod._default_macros
break
if sub_cfg:
cfg = sub_cfg
else:
cfg = []
cfg.append(_pkg_config)
global _config_stack
_config_stack = cfg
def __getattr__(self, name):
"""Allow configuration values and user options to be handled as
instance variables.
name is the name of the configuration value or user option.
"""
for cfg in _config_stack:
try:
return cfg[name]
except KeyError:
pass
raise AttributeError("\"%s\" is not a valid configuration value or user option" % name)
def build_macros(self):
"""Return the dictionary of platform specific build macros.
"""
return self._macros
def set_build_macros(self, macros):
"""Set the dictionary of build macros to be use when generating
Makefiles.
macros is the dictionary of platform specific build macros.
"""
self._macros = macros
class _UniqueList:
"""A limited list that ensures all its elements are unique.
"""
def __init__(self, value=None):
"""Initialise the instance.
value is the initial value of the list.
"""
if value is None:
self._list = []
else:
self._list = value
def append(self, value):
"""Append a value to the list if it isn't already present.
value is the value to append.
"""
if value not in self._list:
self._list.append(value)
def lextend(self, value):
"""A normal list extend ignoring the uniqueness.
value is the list of elements to append.
"""
self._list.extend(value)
def extend(self, value):
"""Append each element of a value to a list if it isn't already
present.
value is the list of elements to append.
"""
for el in value:
self.append(el)
def as_list(self):
"""Return the list as a raw list.
"""
return self._list
class _Macro:
"""A macro that can be manipulated as a list.
"""
def __init__(self, name, value):
"""Initialise the instance.
name is the name of the macro.
value is the initial value of the macro.
"""
self._name = name
self.set(value)
def set(self, value):
"""Explicitly set the value of the macro.
value is the new value. It may be a string, a list of strings or a
_UniqueList instance.
"""
self._macro = []
if isinstance(value, _UniqueList):
value = value.as_list()
if type(value) == list:
self.extend(value)
else:
self.append(value)
def append(self, value):
"""Append a value to the macro.
value is the value to append.
"""
if value:
self._macro.append(value)
def extend(self, value):
"""Append each element of a value to the macro.
value is the list of elements to append.
"""
for el in value:
self.append(el)
def remove(self, value):
"""Remove a value from the macro. It doesn't matter if the value
wasn't present.
value is the value to remove.
"""
try:
self._macro.remove(value)
except:
pass
def as_list(self):
"""Return the macro as a list.
"""
return self._macro
class Makefile:
"""The base class for the different types of Makefiles.
"""
def __init__(self, configuration, console=0, qt=0, opengl=0, python=0,
threaded=0, warnings=1, debug=0, dir=None,
makefile="Makefile", installs=None, universal=None,
arch=None, deployment_target=None):
"""Initialise an instance of the target. All the macros are left
unchanged allowing scripts to manipulate them at will.
configuration is the current configuration.
console is set if the target is a console (rather than windows) target.
qt is set if the target uses Qt. For Qt v4 a list of Qt libraries may
be specified and a simple non-zero value implies QtCore and QtGui.
opengl is set if the target uses OpenGL.
python is set if the target #includes Python.h.
debug is set to generated a debugging version of the target.
threaded is set if the target requires thread support. It is
automatically set if the target uses Qt and Qt has thread support
enabled.
warnings is set if compiler warning messages are required.
debug is set if debugging symbols should be generated.
dir is the directory for build files and Makefiles.
makefile is the name of the Makefile.
installs is a list of extra install targets. Each element is a two
part list, the first of which is the source and the second is the
destination. If the source is another list then it is a set of source
files and the destination is a directory.
universal is the name of the SDK if the target is a MacOS/X universal
binary. If it is None then the value is taken from the configuration.
arch is the space separated MacOS/X architectures to build. If it is
None then it is taken from the configuration.
deployment_target MacOS/X deployment target. If it is None then it is
taken from the configuration.
"""
if qt:
if not hasattr(configuration, "qt_version"):
error("The target uses Qt but pyqtconfig has not been imported.")
# For Qt v4 interpret Qt support as meaning link against the core
# and GUI libraries (which corresponds to the default qmake
# configuration). Also allow a list of Qt v4 modules to be
# specified.
if configuration.qt_version >= 0x040000:
if type(qt) != list:
qt = ["QtCore", "QtGui"]
self._threaded = configuration.qt_threaded
else:
self._threaded = threaded
self.config = configuration
self.console = console
self._qt = qt
self._opengl = opengl
self._python = python
self._warnings = warnings
self._debug = debug
self._makefile = makefile
self._installs = installs
self._infix = ""
# Make sure the destination directory is an absolute path.
if dir:
self.dir = os.path.abspath(dir)
else:
self.dir = os.path.curdir
# Assume we are building in the source tree.
self._src_dir = self.dir
if universal is None:
self._universal = configuration.universal
else:
self._universal = universal
if arch is None:
self._arch = configuration.arch
else:
self._arch = arch
if deployment_target is None:
self._deployment_target = configuration.deployment_target
else:
self._deployment_target = deployment_target
self._finalised = 0
# Copy the macros and convert them all to instance lists.
macros = configuration.build_macros()
for m in list(macros.keys()):
# Allow the user to override the default.
try:
val = getattr(configuration, m)
except AttributeError:
val = macros[m]
# These require special handling as they are (potentially) a set of
# space separated values rather than a single value that might
# contain spaces.
if m in ("DEFINES", "CONFIG") or m[:6] in ("INCDIR", "LIBDIR"):
val = val.split()
# We also want to treat lists of libraries in the same way so that
# duplicates get eliminated.
if m[:4] == "LIBS":
val = val.split()
self.__dict__[m] = _Macro(m, val)
# This is used to alter the configuration more significantly than can
# be done with just configuration files.
self.generator = self.optional_string("MAKEFILE_GENERATOR", "UNIX")
# These are what configuration scripts normally only need to change.
self.extra_cflags = []
self.extra_cxxflags = []
self.extra_defines = []
self.extra_include_dirs = []
self.extra_lflags = []
self.extra_lib_dirs = []
self.extra_libs = []
# Get these once and make them available to sub-classes.
if sys.platform == "win32":
def_copy = "copy"
def_rm = "del"
def_mkdir = "mkdir"
def_chk_dir_exists = "if not exist"
else:
def_copy = "cp -f"
def_rm = "rm -f"
def_mkdir = "mkdir -p"
def_chk_dir_exists = "test -d"
self.copy = self.optional_string("COPY", def_copy)
self.rm = self.optional_string("DEL_FILE", def_rm)
self.mkdir = self.optional_string("MKDIR", def_mkdir)
self.chkdir = self.optional_string("CHK_DIR_EXISTS", def_chk_dir_exists)
def finalise(self):
"""Finalise the macros by doing any consolidation that isn't specific
to a Makefile.
"""
# Extract the things we might need from the Windows Qt configuration.
# Note that we used to think that if Qt was built with exceptions, RTTI
# and STL support enabled then anything that linked against it also
# needed the same flags. However, detecting this was broken for some
# time and nobody complained. For the moment we'll leave the code in
# but it will never be used.
if self._qt:
wcfg = self.config.qt_winconfig.split()
win_shared = ("shared" in wcfg)
win_exceptions = ("exceptions" in wcfg)
win_rtti = ("rtti" in wcfg)
win_stl = ("stl" in wcfg)
qt_version = self.config.qt_version
else:
win_shared = 1
win_exceptions = 0
win_rtti = 0
win_stl = 0
qt_version = 0
# Get what we are going to transform.
cflags = _UniqueList()
cflags.extend(self.extra_cflags)
cflags.extend(self.optional_list("CFLAGS"))
cxxflags = _UniqueList()
cxxflags.extend(self.extra_cxxflags)
cxxflags.extend(self.optional_list("CXXFLAGS"))
defines = _UniqueList()
defines.extend(self.extra_defines)
defines.extend(self.optional_list("DEFINES"))
incdir = _UniqueList(["."])
incdir.extend(self.extra_include_dirs)
incdir.extend(self.optional_list("INCDIR"))
lflags = _UniqueList()
lflags.extend(self.extra_lflags)
lflags.extend(self.optional_list("LFLAGS"))
libdir = _UniqueList()
libdir.extend(self.extra_lib_dirs)
libdir.extend(self.optional_list("LIBDIR"))
# Handle MacOS/X specific configuration.
if sys.platform == 'darwin':
mac_cflags = []
mac_lflags = []
for a in self._arch.split():
aflag = '-arch ' + a
mac_cflags.append(aflag)
mac_lflags.append(aflag)
if self._universal:
mac_cflags.append('-isysroot %s' % self._universal)
mac_lflags.append('-Wl,-syslibroot,%s' % self._universal)
cflags.lextend(mac_cflags)
cxxflags.lextend(mac_cflags)
lflags.lextend(mac_lflags)
# Don't use a unique list as libraries may need to be searched more
# than once. Also MacOS/X uses the form "-framework lib" so we don't
# want to lose the multiple "-framework".
libs = []
for l in self.extra_libs:
libs.append(self.platform_lib(l))
if self._qt:
libs.extend(self._dependent_libs(l))
libs.extend(self.optional_list("LIBS"))
rpaths = _UniqueList()
for l in self.extra_lib_dirs:
l_dir = os.path.dirname(l)
# This is a hack to ignore PyQt's internal support libraries.
if '/qpy/' in l_dir:
continue
# Ignore relative directories. This is really a hack to handle
# SIP v3 inter-module linking.
if l_dir in ("", ".", ".."):
continue
rpaths.append(l)
if self._python:
incdir.append(self.config.py_inc_dir)
incdir.append(self.config.py_conf_inc_dir)
if sys.platform == "cygwin":
libdir.append(self.config.py_lib_dir)
py_lib = "python%u.%u" % ((self.config.py_version >> 16), ((self.config.py_version >> 8) & 0xff))
libs.append(self.platform_lib(py_lib))
elif sys.platform == "win32":
libdir.append(self.config.py_lib_dir)
py_lib = "python%u%u" % ((self.config.py_version >> 16), ((self.config.py_version >> 8) & 0xff))
# For Borland use the OMF version of the Python library if it
# exists, otherwise assume that Python was built with Borland
# and use the normal library.
if self.generator == "BMAKE":
bpy_lib = py_lib + "_bcpp"
bpy_lib_path = os.path.join(self.config.py_lib_dir, self.platform_lib(bpy_lib))
if os.access(bpy_lib_path, os.F_OK):
py_lib = bpy_lib
if self._debug:
py_lib = py_lib + "_d"
if self.generator != "MINGW":
cflags.append("/D_DEBUG")
cxxflags.append("/D_DEBUG")
libs.append(self.platform_lib(py_lib))
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
if win_exceptions:
cflags_exceptions = "CFLAGS_EXCEPTIONS_ON"
cxxflags_exceptions = "CXXFLAGS_EXCEPTIONS_ON"
else:
cflags_exceptions = "CFLAGS_EXCEPTIONS_OFF"
cxxflags_exceptions = "CXXFLAGS_EXCEPTIONS_OFF"
cflags.extend(self.optional_list(cflags_exceptions))
cxxflags.extend(self.optional_list(cxxflags_exceptions))
if win_rtti:
cflags_rtti = "CFLAGS_RTTI_ON"
cxxflags_rtti = "CXXFLAGS_RTTI_ON"
else:
cflags_rtti = "CFLAGS_RTTI_OFF"
cxxflags_rtti = "CXXFLAGS_RTTI_OFF"
cflags.extend(self.optional_list(cflags_rtti))
cxxflags.extend(self.optional_list(cxxflags_rtti))
if win_stl:
cflags_stl = "CFLAGS_STL_ON"
cxxflags_stl = "CXXFLAGS_STL_ON"
else:
cflags_stl = "CFLAGS_STL_OFF"
cxxflags_stl = "CXXFLAGS_STL_OFF"
cflags.extend(self.optional_list(cflags_stl))
cxxflags.extend(self.optional_list(cxxflags_stl))
if self._debug:
if win_shared:
cflags_mt = "CFLAGS_MT_DLLDBG"
cxxflags_mt = "CXXFLAGS_MT_DLLDBG"
else:
cflags_mt = "CFLAGS_MT_DBG"
cxxflags_mt = "CXXFLAGS_MT_DBG"
cflags_debug = "CFLAGS_DEBUG"
cxxflags_debug = "CXXFLAGS_DEBUG"
lflags_debug = "LFLAGS_DEBUG"
else:
if win_shared:
cflags_mt = "CFLAGS_MT_DLL"
cxxflags_mt = "CXXFLAGS_MT_DLL"
else:
cflags_mt = "CFLAGS_MT"
cxxflags_mt = "CXXFLAGS_MT"
cflags_debug = "CFLAGS_RELEASE"
cxxflags_debug = "CXXFLAGS_RELEASE"
lflags_debug = "LFLAGS_RELEASE"
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
if self._threaded:
cflags.extend(self.optional_list(cflags_mt))
cxxflags.extend(self.optional_list(cxxflags_mt))
if self.console:
cflags.extend(self.optional_list("CFLAGS_CONSOLE"))
cxxflags.extend(self.optional_list("CXXFLAGS_CONSOLE"))
cflags.extend(self.optional_list(cflags_debug))
cxxflags.extend(self.optional_list(cxxflags_debug))
lflags.extend(self.optional_list(lflags_debug))
if self._warnings:
cflags_warn = "CFLAGS_WARN_ON"
cxxflags_warn = "CXXFLAGS_WARN_ON"
else:
cflags_warn = "CFLAGS_WARN_OFF"
cxxflags_warn = "CXXFLAGS_WARN_OFF"
cflags.extend(self.optional_list(cflags_warn))
cxxflags.extend(self.optional_list(cxxflags_warn))
if self._threaded:
cflags.extend(self.optional_list("CFLAGS_THREAD"))
cxxflags.extend(self.optional_list("CXXFLAGS_THREAD"))
lflags.extend(self.optional_list("LFLAGS_THREAD"))
if self._qt:
# Get the name of the mkspecs directory.
try:
specd_base = self.config.qt_data_dir
except AttributeError:
specd_base = self.config.qt_dir
mkspecs = os.path.join(specd_base, "mkspecs")
if self.generator != "UNIX" and win_shared:
defines.append("QT_DLL")
if not self._debug:
defines.append("QT_NO_DEBUG")
if qt_version >= 0x040000:
for mod in self._qt:
# Note that qmake doesn't define anything for QtHelp.
if mod == "QtCore":
defines.append("QT_CORE_LIB")
elif mod == "QtDeclarative":
defines.append("QT_DECLARATIVE_LIB")
elif mod == "QtGui":
defines.append("QT_GUI_LIB")
elif mod == "QtMultimedia":
defines.append("QT_MULTIMEDIA_LIB")
elif mod == "QtNetwork":
defines.append("QT_NETWORK_LIB")
elif mod == "QtOpenGL":
defines.append("QT_OPENGL_LIB")
elif mod == "QtScript":
defines.append("QT_SCRIPT_LIB")
elif mod == "QtScriptTools":
defines.append("QT_SCRIPTTOOLS_LIB")
elif mod == "QtSql":
defines.append("QT_SQL_LIB")
elif mod == "QtTest":
defines.append("QT_TEST_LIB")
elif mod == "QtWebKit":
defines.append("QT_WEBKIT_LIB")
elif mod == "QtXml":
defines.append("QT_XML_LIB")
elif mod == "QtXmlPatterns":
defines.append("QT_XMLPATTERNS_LIB")
elif mod == "phonon":
defines.append("QT_PHONON_LIB")
if qt_version >= 0x050000:
if mod == "QtTest":
defines.append("QT_GUI_LIB")
if mod in ("QtSql", "QtTest"):
defines.append("QT_WIDGETS_LIB")
elif self._threaded:
defines.append("QT_THREAD_SUPPORT")
# Handle library directories.
libdir_qt = self.optional_list("LIBDIR_QT")
libdir.extend(libdir_qt)
rpaths.extend(libdir_qt)
if qt_version >= 0x040000:
# Try and read QT_LIBINFIX from qconfig.pri.
qconfig = os.path.join(mkspecs, "qconfig.pri")
self._infix = self._extract_value(qconfig, "QT_LIBINFIX")
# For Windows: the macros that define the dependencies on
# Windows libraries.
wdepmap = {
"QtCore": "LIBS_CORE",
"QtGui": "LIBS_GUI",
"QtNetwork": "LIBS_NETWORK",
"QtOpenGL": "LIBS_OPENGL",
"QtWebKit": "LIBS_WEBKIT"
}
# For Windows: the dependencies between Qt libraries.
qt5_depmap = {
"QtDeclarative": ("QtXmlPatterns", "QtNetwork", "QtSql", "QtScript", "QtWidgets", "QtGui", "QtCore"),
"QtGui": ("QtPrintSupport", "QtWidgets", "QtCore"),
"QtHelp": ("QtNetwork", "QtSql", "QtWidgets", "QtGui", "QtCore"),
"QtMultimedia": ("QtGui", "QtCore"),
"QtNetwork": ("QtCore", ),
"QtOpenGL": ("QtWidgets", "QtGui", "QtCore"),
"QtScript": ("QtCore", ),
"QtScriptTools": ("QtScript", "QtGui", "QtCore"),
"QtSql": ("QtCore", ),
"QtSvg": ("QtXml", "QtWidgets", "QtGui", "QtCore"),
"QtTest": ("QtGui", "QtCore"),
"QtWebKit": ("QtNetwork", "QtWebKitWidgets", "QtWidgets", "QtGui", "QtCore"),
"QtXml": ("QtCore", ),
"QtXmlPatterns": ("QtNetwork", "QtCore"),
"QtDesigner": ("QtGui", "QtCore"),
"QAxContainer": ("Qt5AxBase", "QtWidgets", "QtGui", "QtCore")
}
qt4_depmap = {
"QtAssistant": ("QtNetwork", "QtGui", "QtCore"),
"QtDeclarative": ("QtNetwork", "QtGui", "QtCore"),
"QtGui": ("QtCore", ),
"QtHelp": ("QtSql", "QtGui", "QtCore"),
"QtMultimedia": ("QtGui", "QtCore"),
"QtNetwork": ("QtCore", ),
"QtOpenGL": ("QtGui", "QtCore"),
"QtScript": ("QtCore", ),
"QtScriptTools": ("QtScript", "QtGui", "QtCore"),
"QtSql": ("QtCore", ),
"QtSvg": ("QtXml", "QtGui", "QtCore"),
"QtTest": ("QtGui", "QtCore"),
"QtWebKit": ("QtNetwork", "QtGui", "QtCore"),
"QtXml": ("QtCore", ),
"QtXmlPatterns": ("QtNetwork", "QtCore"),
"phonon": ("QtGui", "QtCore"),
"QtDesigner": ("QtGui", "QtCore"),
"QAxContainer": ("QtGui", "QtCore")
}
if qt_version >= 0x050000:
qt_depmap = qt5_depmap
else:
qt_depmap = qt4_depmap
# The QtSql .prl file doesn't include QtGui as a dependency (at
# least on Linux) so we explcitly set the dependency here for
# everything.
if "QtSql" in self._qt:
if "QtGui" not in self._qt:
self._qt.append("QtGui")
# With Qt v4.2.0, the QtAssistantClient library is now a shared
# library on UNIX. The QtAssistantClient .prl file doesn't
# include QtGui and QtNetwork as a dependency any longer. This
# seems to be a bug in Qt v4.2.0. We explicitly set the
# dependencies here.
if qt_version >= 0x040200 and "QtAssistant" in self._qt:
if "QtGui" not in self._qt:
self._qt.append("QtGui")
if "QtNetwork" not in self._qt:
self._qt.append("QtNetwork")
for mod in self._qt:
lib = self._qt_module_to_lib(mod)
libs.append(self.platform_lib(lib, self._is_framework(mod)))
if sys.platform == "win32":
# On Windows the dependent libraries seem to be in
# qmake.conf rather than the .prl file and the
# inter-dependencies between Qt libraries don't seem to
# be anywhere.
deps = _UniqueList()
if mod in list(wdepmap.keys()):
deps.extend(self.optional_list(wdepmap[mod]))
if mod in list(qt_depmap.keys()):
for qdep in qt_depmap[mod]:
# Ignore the dependency if it is explicitly
# linked.
if qdep not in self._qt:
libs.append(self.platform_lib(self._qt_module_to_lib(qdep)))
if qdep in list(wdepmap.keys()):
deps.extend(self.optional_list(wdepmap[qdep]))
libs.extend(deps.as_list())
else:
libs.extend(self._dependent_libs(lib, self._is_framework(mod)))
else:
# Windows needs the version number appended if Qt is a DLL.
qt_lib = self.config.qt_lib
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE") and win_shared:
qt_lib = qt_lib + version_to_string(qt_version).replace(".", "")
if self.config.qt_edition == "non-commercial":
qt_lib = qt_lib + "nc"
libs.append(self.platform_lib(qt_lib, self.config.qt_framework))
libs.extend(self._dependent_libs(self.config.qt_lib))
# Handle header directories.
specd = os.path.join(mkspecs, "default")
if not os.access(specd, os.F_OK):
specd = os.path.join(mkspecs, self.config.platform)
incdir.append(specd)
qtincdir = self.optional_list("INCDIR_QT")
if qtincdir:
if qt_version >= 0x040000:
for mod in self._qt:
if mod == "QAxContainer":
incdir.append(os.path.join(qtincdir[0], "ActiveQt"))
elif self._is_framework(mod):
idir = libdir_qt[0]
if mod == "QtAssistant" and qt_version < 0x040202:
mod = "QtAssistantClient"
incdir.append(os.path.join(idir,
mod + ".framework", "Headers"))
if qt_version >= 0x050000:
if mod == "QtGui":
incdir.append(os.path.join(idir,
"QtWidgets.framework", "Headers"))
incdir.append(os.path.join(idir,
"QtPrintSupport.framework",
"Headers"))
elif mod == "QtWebKit":
incdir.append(os.path.join(idir,
"QtWebKitWidgets.framework",
"Headers"))
else:
idir = qtincdir[0]
incdir.append(os.path.join(idir, mod))
if qt_version >= 0x050000:
if mod == "QtGui":
incdir.append(os.path.join(idir,
"QtWidgets"))
incdir.append(os.path.join(idir,
"QtPrintSupport"))
elif mod == "QtWebKit":
incdir.append(os.path.join(idir,
"QtWebKitWidgets"))
# This must go after the module include directories.
incdir.extend(qtincdir)
if self._opengl:
incdir.extend(self.optional_list("INCDIR_OPENGL"))
lflags.extend(self.optional_list("LFLAGS_OPENGL"))
libdir.extend(self.optional_list("LIBDIR_OPENGL"))
libs.extend(self.optional_list("LIBS_OPENGL"))
if self._qt or self._opengl:
if qt_version < 0x040000 or self._opengl or "QtGui" in self._qt:
incdir.extend(self.optional_list("INCDIR_X11"))
libdir.extend(self.optional_list("LIBDIR_X11"))
libs.extend(self.optional_list("LIBS_X11"))
if self._threaded:
libs.extend(self.optional_list("LIBS_THREAD"))
libs.extend(self.optional_list("LIBS_RTMT"))
else:
libs.extend(self.optional_list("LIBS_RT"))
if self.console:
libs.extend(self.optional_list("LIBS_CONSOLE"))
libs.extend(self.optional_list("LIBS_WINDOWS"))
lflags.extend(self._platform_rpaths(rpaths.as_list()))
# Save the transformed values.
self.CFLAGS.set(cflags)
self.CXXFLAGS.set(cxxflags)
self.DEFINES.set(defines)
self.INCDIR.set(incdir)
self.LFLAGS.set(lflags)
self.LIBDIR.set(libdir)
self.LIBS.set(libs)
# Don't do it again because it has side effects.
self._finalised = 1
def _add_manifest(self, target=None):
"""Add the link flags for creating a manifest file.
"""
if target is None:
target = "$(TARGET)"
self.LFLAGS.append("/MANIFEST")
self.LFLAGS.append("/MANIFESTFILE:%s.manifest" % target)
def _is_framework(self, mod):
"""Return true if the given Qt module is a framework.
"""
return (self.config.qt_framework and (self.config.qt_version >= 0x040200 or mod != "QtAssistant"))
def _qt_module_to_lib(self, mname):
"""Return the name of the Qt library corresponding to a module.
mname is the name of the module.
"""
qt_version = self.config.qt_version
if mname == "QtAssistant":
if qt_version >= 0x040202 and sys.platform == "darwin":
lib = mname
else:
lib = "QtAssistantClient"
else:
lib = mname
lib += self._infix
if self._debug:
if sys.platform == "win32":
lib = lib + "d"
elif sys.platform == "darwin":
if not self._is_framework(mname):
lib = lib + "_debug"
elif qt_version < 0x040200:
lib = lib + "_debug"
qt5_rename = False
if sys.platform == "win32" and "shared" in self.config.qt_winconfig.split():
if (mname in ("QtCore", "QtDeclarative", "QtDesigner", "QtGui",
"QtHelp", "QtMultimedia", "QtNetwork", "QtOpenGL",
"QtScript", "QtScriptTools", "QtSql", "QtSvg",
"QtTest", "QtWebKit", "QtXml", "QtXmlPatterns",
"phonon", "QAxContainer", "QtPrintSupport",
"QtWebKitWidgets", "QtWidgets") or
(qt_version >= 0x040200 and mname == "QtAssistant")):
if mname == "QAxContainer":
if qt_version >= 0x050000:
lib = "Qt5" + lib[1:]
elif qt_version >= 0x050000:
qt5_rename = True
else:
lib = lib + "4"
elif sys.platform.startswith("linux") and qt_version >= 0x050000:
qt5_rename = True
if qt5_rename:
lib = "Qt5" + lib[2:]
return lib
def optional_list(self, name):
"""Return an optional Makefile macro as a list.
name is the name of the macro.
"""
return self.__dict__[name].as_list()
def optional_string(self, name, default=""):
"""Return an optional Makefile macro as a string.
name is the name of the macro.
default is the default value
"""
s = ' '.join(self.optional_list(name))
if not s:
s = default
return s
def required_string(self, name):
"""Return a required Makefile macro as a string.
name is the name of the macro.
"""
s = self.optional_string(name)
if not s:
raise ValueError("\"%s\" must have a non-empty value" % name)
return s
def _platform_rpaths(self, rpaths):
"""Return a list of platform specific rpath flags.
rpaths is the cannonical list of rpaths.
"""
flags = []
prefix = self.optional_string("RPATH")
if prefix == "":
# This was renamed in Qt v4.7.
prefix = self.optional_string("LFLAGS_RPATH")
if prefix != "":
for r in rpaths:
flags.append(_quote(prefix + r))
return flags
def platform_lib(self, clib, framework=0):
"""Return a library name in platform specific form.
clib is the library name in cannonical form.
framework is set of the library is implemented as a MacOS framework.
"""
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
plib = clib + ".lib"
elif sys.platform == "darwin" and framework:
plib = "-framework " + clib
else:
plib = "-l" + clib
return plib
def _dependent_libs(self, clib, framework=0):
"""Return a list of additional libraries (in platform specific form)
that must be linked with a library.
clib is the library name in cannonical form.
framework is set of the library is implemented as a MacOS framework.
"""
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
prl_name = os.path.join(self.config.qt_lib_dir, clib + ".prl")
elif sys.platform == "darwin" and framework:
prl_name = os.path.join(self.config.qt_lib_dir, clib + ".framework", clib + ".prl")
else:
prl_name = os.path.join(self.config.qt_lib_dir, "lib" + clib + ".prl")
libs = self._extract_value(prl_name, "QMAKE_PRL_LIBS").split()
if self.config.qt_version >= 0x050000:
xtra_libs = []
if clib in ("QtGui", "Qt5Gui"):
xtra_libs.append("QtWidgets")
xtra_libs.append("QtPrintSupport")
elif clib in ("QtWebKit", "Qt5WebKit"):
xtra_libs.append("QtWebKitWidgets")
for xtra in xtra_libs:
libs.extend(
self.platform_lib(
self._qt_module_to_lib(xtra), framework).split())
return libs
def _extract_value(self, fname, vname):
"""Return the stripped value from a name=value line in a file.
fname is the name of the file.
vname is the name of the value.
"""
value = ""
if os.access(fname, os.F_OK):
try:
f = open(fname, "r")
except IOError:
error("Unable to open \"%s\"" % fname)
line = f.readline()
while line:
line = line.strip()
if line and line[0] != "#":
eq = line.find("=")
if eq > 0 and line[:eq].strip() == vname:
value = line[eq + 1:].strip()
break
line = f.readline()
f.close()
return value
def parse_build_file(self, filename):
"""
Parse a build file and return the corresponding dictionary.
filename is the name of the build file. If it is a dictionary instead
then its contents are validated.
"""
if type(filename) == dict:
bfname = "dictionary"
bdict = filename
else:
if os.path.isabs(filename):
# We appear to be building out of the source tree.
self._src_dir = os.path.dirname(filename)
bfname = filename
else:
bfname = os.path.join(self.dir, filename)
bdict = {}
try:
f = open(bfname, "r")
except IOError:
error("Unable to open \"%s\"" % bfname)
line_nr = 1
line = f.readline()
while line:
line = line.strip()
if line and line[0] != "#":
eq = line.find("=")
if eq <= 0:
error("\"%s\" line %d: Line must be in the form 'name = value value...'." % (bfname, line_nr))
bdict[line[:eq].strip()] = line[eq + 1:].strip()
line_nr = line_nr + 1
line = f.readline()
f.close()
# Check the compulsory values.
for i in ("target", "sources"):
try:
bdict[i]
except KeyError:
error("\"%s\" is missing from \"%s\"." % (i, bfname))
# Get the optional values.
for i in ("headers", "moc_headers"):
try:
bdict[i]
except KeyError:
bdict[i] = ""
# Generate the list of objects.
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
ext = ".obj"
else:
ext = ".o"
olist = []
for f in bdict["sources"].split():
root, discard = os.path.splitext(f)
olist.append(root + ext)
for f in bdict["moc_headers"].split():
if not self._qt:
error("\"%s\" defines \"moc_headers\" for a non-Qt module." % bfname)
root, discard = os.path.splitext(f)
olist.append("moc_" + root + ext)
bdict["objects"] = ' '.join(olist)
return bdict
def clean_build_file_objects(self, mfile, build):
"""Generate the clean target.
mfile is the file object.
build is the dictionary created from the build file.
"""
mfile.write("\t-%s $(TARGET)\n" % self.rm)
for f in build["objects"].split():
mfile.write("\t-%s %s\n" % (self.rm, f))
for f in build["moc_headers"].split():
root, discard = os.path.splitext(f)
mfile.write("\t-%s moc_%s.cpp\n" % (self.rm, root))
def ready(self):
"""The Makefile is now ready to be used.
"""
if not self._finalised:
self.finalise()
def generate(self):
"""Generate the Makefile.
"""
self.ready()
# Make sure the destination directory exists.
try:
os.makedirs(self.dir)
except:
pass
mfname = os.path.join(self.dir, self._makefile)
try:
mfile = open(mfname, "w")
except IOError:
error("Unable to create \"%s\"" % mfname)
self.generate_macros_and_rules(mfile)
self.generate_target_default(mfile)
self.generate_target_install(mfile)
if self._installs:
if type(self._installs) != list:
self._installs = [self._installs]
for src, dst in self._installs:
self.install_file(mfile, src, dst)
self.generate_target_clean(mfile)
mfile.close()
def generate_macros_and_rules(self, mfile):
"""The default implementation of the macros and rules generation.
mfile is the file object.
"""
if self._deployment_target:
mfile.write("export MACOSX_DEPLOYMENT_TARGET = %s\n" % self._deployment_target)
mfile.write("CC = %s\n" % self.required_string("CC"))
mfile.write("CXX = %s\n" % self.required_string("CXX"))
mfile.write("LINK = %s\n" % self.required_string("LINK"))
cppflags = []
if not self._debug:
cppflags.append("-DNDEBUG")
for f in self.optional_list("DEFINES"):
cppflags.append("-D" + f)
for f in self.optional_list("INCDIR"):
cppflags.append("-I" + _quote(f))
libs = []
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
libdir_prefix = "/LIBPATH:"
else:
libdir_prefix = "-L"
for ld in self.optional_list("LIBDIR"):
if sys.platform == "darwin" and self.config.qt_framework:
fflag = "-F" + _quote(ld)
libs.append(fflag)
cppflags.append(fflag)
libs.append(libdir_prefix + _quote(ld))
libs.extend(self.optional_list("LIBS"))
mfile.write("CPPFLAGS = %s\n" % ' '.join(cppflags))
mfile.write("CFLAGS = %s\n" % self.optional_string("CFLAGS"))
mfile.write("CXXFLAGS = %s\n" % self.optional_string("CXXFLAGS"))
mfile.write("LFLAGS = %s\n" % self.optional_string("LFLAGS"))
mfile.write("LIBS = %s\n" % ' '.join(libs))
if self._qt:
mfile.write("MOC = %s\n" % _quote(self.required_string("MOC")))
if self._src_dir != self.dir:
mfile.write("VPATH = %s\n\n" % self._src_dir)
# These probably don't matter.
if self.generator == "MINGW":
mfile.write(".SUFFIXES: .cpp .cxx .cc .C .c\n\n")
elif self.generator == "UNIX":
mfile.write(".SUFFIXES: .c .o .cpp .cc .cxx .C\n\n")
else:
mfile.write(".SUFFIXES: .c .cpp .cc .cxx .C\n\n")
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
mfile.write("""
{.}.cpp{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.cc{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.cxx{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.C{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.c{}.obj::
\t$(CC) -c $(CFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
""")
elif self.generator == "BMAKE":
mfile.write("""
.cpp.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.cc.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.cxx.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.C.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.c.obj:
\t$(CC) -c $(CFLAGS) $(CPPFLAGS) -o$@ $<
""")
else:
mfile.write("""
.cpp.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.cc.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.cxx.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.C.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.c.o:
\t$(CC) -c $(CFLAGS) $(CPPFLAGS) -o $@ $<
""")
def generate_target_default(self, mfile):
"""The default implementation of the default target.
mfile is the file object.
"""
mfile.write("\nall:\n")
def generate_target_install(self, mfile):
"""The default implementation of the install target.
mfile is the file object.
"""
mfile.write("\ninstall:\n")
def generate_target_clean(self, mfile):
"""The default implementation of the clean target.
mfile is the file object.
"""
mfile.write("\nclean:\n")
def install_file(self, mfile, src, dst, strip=0):
"""Install one or more files in a directory.
mfile is the file object.
src is the name of a single file to install, or the list of a number of
files to install.
dst is the name of the destination directory.
strip is set if the files should be stripped after been installed.
"""
# Help package builders.
if self.generator == "UNIX":
dst = "$(DESTDIR)" + dst
mfile.write("\t@%s %s " % (self.chkdir, _quote(dst)))
if self.generator == "UNIX":
mfile.write("|| ")
mfile.write("%s %s\n" % (self.mkdir, _quote(dst)))
if type(src) != list:
src = [src]
# Get the strip command if needed.
if strip:
strip_cmd = self.optional_string("STRIP")
if not strip_cmd:
strip = 0
for sf in src:
target = _quote(os.path.join(dst, os.path.basename(sf)))
mfile.write("\t%s %s %s\n" % (self.copy, _quote(sf), target))
if strip:
mfile.write("\t%s %s\n" % (strip_cmd, target))
class ParentMakefile(Makefile):
"""The class that represents a parent Makefile.
"""
def __init__(self, configuration, subdirs, dir=None, makefile="Makefile",
installs=None):
"""Initialise an instance of a parent Makefile.
subdirs is the sequence of subdirectories.
"""
Makefile.__init__(self, configuration, dir=dir, makefile=makefile, installs=installs)
self._subdirs = subdirs
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules.
mfile is the file object.
"""
# We don't want them.
pass
def generate_target_default(self, mfile):
"""Generate the default target.
mfile is the file object.
"""
self._subdir_target(mfile)
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
self._subdir_target(mfile, "install")
def generate_target_clean(self, mfile):
"""Generate the clean target.
mfile is the file object.
"""
self._subdir_target(mfile, "clean")
def _subdir_target(self, mfile, target="all"):
"""Create a target for a list of sub-directories.
mfile is the file object.
target is the name of the target.
"""
if target == "all":
tname = ""
else:
tname = " " + target
mfile.write("\n" + target + ":\n")
for d in self._subdirs:
if self.generator == "MINGW":
mfile.write("\t@$(MAKE) -C %s%s\n" % (d, tname))
elif self.generator == "UNIX":
mfile.write("\t@(cd %s; $(MAKE)%s)\n" % (d, tname))
else:
mfile.write("\tcd %s\n" % d)
mfile.write("\t$(MAKE)%s\n" % tname)
mfile.write("\t@cd ..\n")
class PythonModuleMakefile(Makefile):
"""The class that represents a Python module Makefile.
"""
def __init__(self, configuration, dstdir, srcdir=None, dir=None,
makefile="Makefile", installs=None):
"""Initialise an instance of a parent Makefile.
dstdir is the name of the directory where the module's Python code will
be installed.
srcdir is the name of the directory (relative to the directory in which
the Makefile will be created) containing the module's Python code. It
defaults to the same directory.
"""
Makefile.__init__(self, configuration, dir=dir, makefile=makefile, installs=installs)
if not srcdir:
srcdir = "."
if dir:
self._moddir = os.path.join(dir, srcdir)
else:
self._moddir = srcdir
self._srcdir = srcdir
self._dstdir = dstdir
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules.
mfile is the file object.
"""
# We don't want them.
pass
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
Makefile.generate_target_install(self, mfile)
for root, dirs, files in os.walk(self._moddir):
# Do not recurse into certain directories.
for skip in (".svn", "CVS"):
if skip in dirs:
dirs.remove(skip)
tail = root[len(self._moddir):]
flist = []
for f in files:
if f == "Makefile":
continue
if os.path.isfile(os.path.join(root, f)):
flist.append(os.path.join(self._srcdir + tail, f))
self.install_file(mfile, flist, self._dstdir + tail)
class ModuleMakefile(Makefile):
"""The class that represents a Python extension module Makefile
"""
def __init__(self, configuration, build_file, install_dir=None, static=0,
console=0, qt=0, opengl=0, threaded=0, warnings=1, debug=0,
dir=None, makefile="Makefile", installs=None, strip=1,
export_all=0, universal=None, arch=None,
deployment_target=None):
"""Initialise an instance of a module Makefile.
build_file is the file containing the target specific information. If
it is a dictionary instead then its contents are validated.
install_dir is the directory the target will be installed in.
static is set if the module should be built as a static library.
strip is set if the module should be stripped of unneeded symbols when
installed. The default is 1.
export_all is set if all the module's symbols should be exported rather
than just the module's initialisation function. Exporting all symbols
increases the size of the module and slows down module load times but
may avoid problems with modules that use exceptions. The default is 0.
"""
Makefile.__init__(self, configuration, console, qt, opengl, 1, threaded, warnings, debug, dir, makefile, installs, universal, arch, deployment_target)
self._build = self.parse_build_file(build_file)
self._install_dir = install_dir
self.static = static
self._manifest = ("embed_manifest_dll" in self.optional_list("CONFIG"))
# Don't strip or restrict the exports if this is a debug or static
# build.
if debug or static:
self._strip = 0
self._limit_exports = 0
else:
self._strip = strip
self._limit_exports = not export_all
# Save the target name for later.
self._target = self._build["target"]
# The name of the module entry point is Python version specific.
if self.config.py_version >= 0x030000:
self._entry_point = "PyInit_%s" % self._target
else:
self._entry_point = "init%s" % self._target
if sys.platform != "win32" and static:
self._target = "lib" + self._target
if sys.platform == "win32" and debug:
self._target = self._target + "_d"
def finalise(self):
"""Finalise the macros common to all module Makefiles.
"""
if self.console:
lflags_console = "LFLAGS_CONSOLE"
else:
lflags_console = "LFLAGS_WINDOWS"
if self.static:
self.DEFINES.append("SIP_STATIC_MODULE")
else:
self.CFLAGS.extend(self.optional_list("CFLAGS_SHLIB"))
self.CXXFLAGS.extend(self.optional_list("CXXFLAGS_SHLIB"))
lflags_dll = self.optional_list("LFLAGS_DLL")
if lflags_dll:
self.LFLAGS.extend(lflags_dll)
elif self.console:
lflags_console = "LFLAGS_CONSOLE_DLL"
else:
lflags_console = "LFLAGS_WINDOWS_DLL"
if self._manifest:
self._add_manifest()
# We use this to explictly create bundles on MacOS. Apple's Python
# can handle extension modules that are bundles or dynamic
# libraries, but python.org versions need bundles (unless built
# with DYNLOADFILE=dynload_shlib.o).
if sys.platform == "darwin":
lflags_plugin = ["-bundle"]
else:
lflags_plugin = self.optional_list("LFLAGS_PLUGIN")
if not lflags_plugin:
lflags_plugin = self.optional_list("LFLAGS_SHLIB")
self.LFLAGS.extend(lflags_plugin)
self.LFLAGS.extend(self.optional_list(lflags_console))
if sys.platform == "darwin":
from distutils.sysconfig import get_python_inc
# The Python include directory seems to be the only one that uses
# the real path even when using a virtual environment (eg. pyvenv).
# Note that I can't remember why we need a framework build.
dl = get_python_inc().split(os.sep)
if "Python.framework" not in dl:
error("SIP requires Python to be built as a framework")
self.LFLAGS.append("-undefined dynamic_lookup")
Makefile.finalise(self)
if not self.static:
if self.optional_string("AIX_SHLIB"):
# AIX needs a lot of special handling.
if self.required_string('LINK') == 'g++':
# g++ is used for linking.
# For SIP v4 and g++:
# 1.) Import the python symbols
aix_lflags = ['-Wl,-bI:%s/python.exp' % self.config.py_lib_dir]
if self._limit_exports:
aix_lflags.append('-Wl,-bnoexpall')
aix_lflags.append('-Wl,-bnoentry')
aix_lflags.append('-Wl,-bE:%s.exp' % self._target)
else:
# IBM VisualAge C++ is used for linking.
# For SIP v4 and xlC:
# 1.) Create a shared object
# 2.) Import the python symbols
aix_lflags = ['-qmkshrobj',
'-bI:%s/python.exp' % self.config.py_lib_dir]
if self._limit_exports:
aix_lflags.append('-bnoexpall')
aix_lflags.append('-bnoentry')
aix_lflags.append('-bE:%s.exp' % self._target)
self.LFLAGS.extend(aix_lflags)
else:
if self._limit_exports:
if sys.platform[:5] == 'linux':
self.LFLAGS.extend(['-Wl,--version-script=%s.exp' % self._target])
elif sys.platform[:5] == 'sunos':
if self.required_string('LINK') == 'g++':
self.LFLAGS.extend(['-Wl,-z,noversion', '-Wl,-M,%s.exp' % self._target])
else:
self.LFLAGS.extend(['-z' 'noversion', '-M', '%s.exp' % self._target])
elif sys.platform[:5] == 'hp-ux':
self.LFLAGS.extend(['-Wl,+e,%s' % self._entry_point])
elif sys.platform[:5] == 'irix' and self.required_string('LINK') != 'g++':
# Doesn't work when g++ is used for linking on IRIX.
self.LFLAGS.extend(['-Wl,-exported_symbol,%s' % self._entry_point])
# Force the shared linker if there is one.
link_shlib = self.optional_list("LINK_SHLIB")
if link_shlib:
self.LINK.set(link_shlib)
# This made an appearence in Qt v4.4rc1 and breaks extension modules so
# remove it. It was removed at my request but some stupid distros may
# have kept it.
self.LFLAGS.remove('-Wl,--no-undefined')
def module_as_lib(self, mname):
"""Return the name of a SIP v3.x module when it is used as a library.
This will raise an exception when used with SIP v4.x modules.
mname is the name of the module.
"""
raise ValueError("module_as_lib() can only be used with SIP v3.x")
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules generation.
mfile is the file object.
"""
if self.static:
if sys.platform == "win32":
ext = "lib"
else:
ext = "a"
else:
if sys.platform == "win32":
ext = "pyd"
elif sys.platform == "darwin":
ext = "so"
elif sys.platform == "cygwin":
ext = "dll"
else:
ext = self.optional_string("EXTENSION_PLUGIN")
if not ext:
ext = self.optional_string("EXTENSION_SHLIB", "so")
mfile.write("TARGET = %s\n" % (self._target + "." + ext))
mfile.write("OFILES = %s\n" % self._build["objects"])
mfile.write("HFILES = %s %s\n" % (self._build["headers"], self._build["moc_headers"]))
mfile.write("\n")
if self.static:
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
mfile.write("LIB = %s\n" % self.required_string("LIB"))
elif self.generator == "MINGW":
mfile.write("AR = %s\n" % self.required_string("LIB"))
self._ranlib = None
else:
mfile.write("AR = %s\n" % self.required_string("AR"))
self._ranlib = self.optional_string("RANLIB")
if self._ranlib:
mfile.write("RANLIB = %s\n" % self._ranlib)
Makefile.generate_macros_and_rules(self, mfile)
def generate_target_default(self, mfile):
"""Generate the default target.
mfile is the file object.
"""
# Do these first so that it's safe for a sub-class to append additional
# commands to the real target, but make sure the default is correct.
mfile.write("\nall: $(TARGET)\n")
mfile.write("\n$(OFILES): $(HFILES)\n")
for mf in self._build["moc_headers"].split():
root, discard = os.path.splitext(mf)
cpp = "moc_" + root + ".cpp"
mfile.write("\n%s: %s\n" % (cpp, mf))
mfile.write("\t$(MOC) -o %s %s\n" % (cpp, mf))
mfile.write("\n$(TARGET): $(OFILES)\n")
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
if self.static:
mfile.write("\t$(LIB) /OUT:$(TARGET) @<<\n")
mfile.write("\t $(OFILES)\n")
mfile.write("<<\n")
else:
mfile.write("\t$(LINK) $(LFLAGS) /OUT:$(TARGET) @<<\n")
mfile.write("\t $(OFILES) $(LIBS)\n")
mfile.write("<<\n")
if self._manifest:
mfile.write("\tmt -nologo -manifest $(TARGET).manifest -outputresource:$(TARGET);2\n")
elif self.generator == "BMAKE":
if self.static:
mfile.write("\t-%s $(TARGET)\n" % (self.rm))
mfile.write("\t$(LIB) $(TARGET) @&&|\n")
for of in self._build["objects"].split():
mfile.write("+%s \\\n" % (of))
mfile.write("|\n")
else:
mfile.write("\t$(LINK) @&&|\n")
mfile.write("\t$(LFLAGS) $(OFILES) ,$(TARGET),,$(LIBS),%s\n" % (self._target))
mfile.write("|\n")
# Create the .def file that renames the entry point.
defname = os.path.join(self.dir, self._target + ".def")
try:
dfile = open(defname, "w")
except IOError:
error("Unable to create \"%s\"" % defname)
dfile.write("EXPORTS\n")
dfile.write("%s=_%s\n" % (self._entry_point, self._entry_point))
dfile.close()
else:
if self.static:
mfile.write("\t-%s $(TARGET)\n" % self.rm)
mfile.write("\t$(AR) $(TARGET) $(OFILES)\n")
if self._ranlib:
mfile.write("\t$(RANLIB) $(TARGET)\n")
else:
if self._limit_exports:
# Create an export file for AIX, Linux and Solaris.
if sys.platform[:5] == 'linux':
mfile.write("\t@echo '{ global: %s; local: *; };' > %s.exp\n" % (self._entry_point, self._target))
elif sys.platform[:5] == 'sunos':
mfile.write("\t@echo '{ global: %s; local: *; };' > %s.exp\n" % (self._entry_point, self._target))
elif sys.platform[:3] == 'aix':
mfile.write("\t@echo '#!' >%s.exp" % self._target)
mfile.write("; \\\n\t echo '%s' >>%s.exp\n" % (self._entry_point, self._target))
mfile.write("\t$(LINK) $(LFLAGS) -o $(TARGET) $(OFILES) $(LIBS)\n")
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
if self._install_dir is None:
self._install_dir = self.config.default_mod_dir
mfile.write("\ninstall: $(TARGET)\n")
self.install_file(mfile, "$(TARGET)", self._install_dir, self._strip)
def generate_target_clean(self, mfile):
"""Generate the clean target.
mfile is the file object.
"""
mfile.write("\nclean:\n")
self.clean_build_file_objects(mfile, self._build)
if self._manifest and not self.static:
mfile.write("\t-%s $(TARGET).manifest\n" % self.rm)
# Remove any export file on AIX, Linux and Solaris.
if self._limit_exports and (sys.platform[:5] == 'linux' or
sys.platform[:5] == 'sunos' or
sys.platform[:3] == 'aix'):
mfile.write("\t-%s %s.exp\n" % (self.rm, self._target))
class SIPModuleMakefile(ModuleMakefile):
"""The class that represents a SIP generated module Makefile.
"""
def __init__(self, configuration, build_file, install_dir=None, static=0,
console=0, qt=0, opengl=0, threaded=0, warnings=1, debug=0,
dir=None, makefile="Makefile", installs=None, strip=1,
export_all=0, universal=None, arch=None, prot_is_public=0,
deployment_target=None):
"""Initialise an instance of a SIP generated module Makefile.
prot_is_public is set if "protected" is to be redefined as "public".
If the platform's C++ ABI allows it this can significantly reduce the
size of the generated code.
For all other arguments see ModuleMakefile.
"""
ModuleMakefile.__init__(self, configuration, build_file, install_dir,
static, console, qt, opengl, threaded, warnings, debug, dir,
makefile, installs, strip, export_all, universal, arch,
deployment_target)
self._prot_is_public = prot_is_public
def finalise(self):
"""Finalise the macros for a SIP generated module Makefile.
"""
if self._prot_is_public:
self.DEFINES.append('SIP_PROTECTED_IS_PUBLIC')
self.DEFINES.append('protected=public')
self.INCDIR.append(self.config.sip_inc_dir)
ModuleMakefile.finalise(self)
class ProgramMakefile(Makefile):
"""The class that represents a program Makefile.
"""
def __init__(self, configuration, build_file=None, install_dir=None,
console=0, qt=0, opengl=0, python=0, threaded=0, warnings=1,
debug=0, dir=None, makefile="Makefile", installs=None,
universal=None, arch=None, deployment_target=None):
"""Initialise an instance of a program Makefile.
build_file is the file containing the target specific information. If
it is a dictionary instead then its contents are validated.
install_dir is the directory the target will be installed in.
"""
Makefile.__init__(self, configuration, console, qt, opengl, python, threaded, warnings, debug, dir, makefile, installs, universal, arch, deployment_target)
self._install_dir = install_dir
self._manifest = ("embed_manifest_exe" in self.optional_list("CONFIG"))
self._target = None
if build_file:
self._build = self.parse_build_file(build_file)
else:
self._build = None
def build_command(self, source):
"""Create a command line that will build an executable. Returns a
tuple of the name of the executable and the command line.
source is the name of the source file.
"""
# The name of the executable.
self._target, _ = os.path.splitext(source)
if sys.platform in ("win32", "cygwin"):
exe = self._target + ".exe"
else:
exe = self._target
self.ready()
# The command line.
build = []
build.append(self.required_string("CXX"))
for a in self._arch.split():
build.append('-arch ' + a)
for f in self.optional_list("DEFINES"):
build.append("-D" + f)
for f in self.optional_list("INCDIR"):
build.append("-I" + _quote(f))
build.extend(self.optional_list("CXXFLAGS"))
# This is for Qt5.
build.extend(self.optional_list("CXXFLAGS_APP"))
# Borland requires all flags to precede all file names.
if self.generator != "BMAKE":
build.append(source)
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
build.append("-Fe")
build.append("/link")
libdir_prefix = "/LIBPATH:"
elif self.generator == "BMAKE":
build.append("-e" + exe)
libdir_prefix = "-L"
else:
build.append("-o")
build.append(exe)
libdir_prefix = "-L"
for ld in self.optional_list("LIBDIR"):
if sys.platform == "darwin" and self.config.qt_framework:
build.append("-F" + _quote(ld))
build.append(libdir_prefix + _quote(ld))
lflags = self.optional_list("LFLAGS")
# This is a huge hack demonstrating my lack of understanding of how the
# Borland compiler works.
if self.generator == "BMAKE":
blflags = []
for lf in lflags:
for f in lf.split():
# Tell the compiler to pass the flags to the linker.
if f[-1] == "-":
f = "-l-" + f[1:-1]
elif f[0] == "-":
f = "-l" + f[1:]
# Remove any explicit object files otherwise the compiler
# will complain that they can't be found, but they don't
# seem to be needed.
if f[-4:].lower() != ".obj":
blflags.append(f)
lflags = blflags
build.extend(lflags)
build.extend(self.optional_list("LIBS"))
if self.generator == "BMAKE":
build.append(source)
return (exe, ' '.join(build))
def finalise(self):
"""Finalise the macros for a program Makefile.
"""
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
self.LFLAGS.append("/INCREMENTAL:NO")
if self._manifest:
self._add_manifest(self._target)
if self.console:
lflags_console = "LFLAGS_CONSOLE"
else:
lflags_console = "LFLAGS_WINDOWS"
self.LFLAGS.extend(self.optional_list(lflags_console))
Makefile.finalise(self)
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules generation.
mfile is the file object.
"""
if not self._build:
raise ValueError("pass a filename as build_file when generating a Makefile")
target = self._build["target"]
if sys.platform in ("win32", "cygwin"):
target = target + ".exe"
mfile.write("TARGET = %s\n" % target)
mfile.write("OFILES = %s\n" % self._build["objects"])
mfile.write("HFILES = %s\n" % self._build["headers"])
mfile.write("\n")
Makefile.generate_macros_and_rules(self, mfile)
def generate_target_default(self, mfile):
"""Generate the default target.
mfile is the file object.
"""
# Do these first so that it's safe for a sub-class to append additional
# commands to the real target, but make sure the default is correct.
mfile.write("\nall: $(TARGET)\n")
mfile.write("\n$(OFILES): $(HFILES)\n")
for mf in self._build["moc_headers"].split():
root, _ = os.path.splitext(mf)
cpp = "moc_" + root + ".cpp"
if self._src_dir != self.dir:
mf = os.path.join(self._src_dir, mf)
mfile.write("\n%s: %s\n" % (cpp, mf))
mfile.write("\t$(MOC) -o %s %s\n" % (cpp, mf))
mfile.write("\n$(TARGET): $(OFILES)\n")
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
mfile.write("\t$(LINK) $(LFLAGS) /OUT:$(TARGET) @<<\n")
mfile.write("\t $(OFILES) $(LIBS)\n")
mfile.write("<<\n")
elif self.generator == "BMAKE":
mfile.write("\t$(LINK) @&&|\n")
mfile.write("\t$(LFLAGS) $(OFILES) ,$(TARGET),,$(LIBS),,\n")
mfile.write("|\n")
else:
mfile.write("\t$(LINK) $(LFLAGS) -o $(TARGET) $(OFILES) $(LIBS)\n")
if self._manifest:
mfile.write("\tmt -nologo -manifest $(TARGET).manifest -outputresource:$(TARGET);1\n")
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
if self._install_dir is None:
self._install_dir = self.config.default_bin_dir
mfile.write("\ninstall: $(TARGET)\n")
self.install_file(mfile, "$(TARGET)", self._install_dir)
def generate_target_clean(self, mfile):
"""Generate the clean target.
mfile is the file object.
"""
mfile.write("\nclean:\n")
self.clean_build_file_objects(mfile, self._build)
if self._manifest:
mfile.write("\t-%s $(TARGET).manifest\n" % self.rm)
def _quote(s):
"""Return a string surrounded by double quotes it if contains a space.
s is the string.
"""
# On Qt5 paths often includes forward slashes so convert them.
if sys.platform == "win32":
s = s.replace("/", "\\")
if s.find(" ") >= 0:
s = '"' + s + '"'
return s
def version_to_string(v):
"""Convert a 3 part version number encoded as a hexadecimal value to a
string.
"""
return "%u.%u.%u" % (((v >> 16) & 0xff), ((v >> 8) & 0xff), (v & 0xff))
def read_version(filename, description, numdefine=None, strdefine=None):
"""Read the version information for a package from a file. The information
is specified as #defines of a numeric (hexadecimal or decimal) value and/or
a string value.
filename is the name of the file.
description is the descriptive name of the package.
numdefine is the name of the #define of the numeric version. It is ignored
if it is None.
strdefine is the name of the #define of the string version. It is ignored
if it is None.
Returns a tuple of the version as a number and as a string.
"""
need_num = numdefine is not None
need_str = strdefine is not None
vers = None
versstr = None
f = open(filename)
l = f.readline()
while l and (need_num or need_str):
wl = l.split()
if len(wl) >= 3 and wl[0] == "#define":
if need_num and wl[1] == numdefine:
v = wl[2]
if v[0:2] == "0x":
vers = int(v, 16)
else:
dec = int(v)
maj = dec / 100
min = (dec % 100) / 10
bug = (dec % 10)
vers = (maj << 16) + (min << 8) + bug
need_num = 0
if need_str and wl[1] == strdefine:
# Take account of embedded spaces.
versstr = ' '.join(wl[2:])[1:-1]
need_str = 0
l = f.readline()
f.close()
if need_num or need_str:
error("The %s version number could not be determined by parsing %s." % (description, filename))
return (vers, versstr)
def create_content(cdict, macros=None):
"""Convert a dictionary to a string (typically to use as the content to a
call to create_config_module()). Dictionary values that are strings are
quoted. Dictionary values that are lists are converted to quoted strings.
dict is the dictionary.
macros is the optional dictionary of platform specific build macros.
"""
content = "_pkg_config = {\n"
keys = list(cdict.keys())
keys.sort()
# Format it nicely.
width = 0
for k in keys:
klen = len(k)
if width < klen:
width = klen
for k in keys:
val = cdict[k]
vtype = type(val)
delim = None
if val is None:
val = "None"
elif vtype == list:
val = ' '.join(val)
delim = "'"
elif vtype == int:
if k.find("version") >= 0:
# Assume it's a hexadecimal version number. It doesn't matter
# if it isn't, we are just trying to make it look pretty.
val = "0x%06x" % val
else:
val = str(val)
else:
val = str(val)
delim = "'"
if delim:
if "'" in val:
delim = "'''"
val = delim + val + delim
content = content + " '" + k + "':" + (" " * (width - len(k) + 2)) + val.replace("\\", "\\\\")
if k != keys[-1]:
content = content + ","
content = content + "\n"
content = content + "}\n\n"
# Format the optional macros.
content = content + "_default_macros = "
if macros:
content = content + "{\n"
names = list(macros.keys())
names.sort()
width = 0
for c in names:
clen = len(c)
if width < clen:
width = clen
for c in names:
if c == names[-1]:
sep = ""
else:
sep = ","
val = macros[c]
if "'" in val:
delim = "'''"
else:
delim = "'"
k = "'" + c + "':"
content = content + " %-*s %s%s%s%s\n" % (1 + width + 2, k, delim, val.replace("\\", "\\\\"), delim, sep)
content = content + "}\n"
else:
content = content + "None\n"
return content
def create_config_module(module, template, content, macros=None):
"""Create a configuration module by replacing "@" followed by
"SIP_CONFIGURATION" followed by "@" in a template file with a content
string.
module is the name of the module file.
template is the name of the template file.
content is the content string. If it is a dictionary it is first converted
to a string using create_content().
macros is an optional dictionary of platform specific build macros. It is
only used if create_content() is called to convert the content to a string.
"""
if type(content) == dict:
content = create_content(content, macros)
# Allow this file to used as a template.
key = "@" + "SIP_CONFIGURATION" + "@"
df = open(module, "w")
sf = open(template, "r")
line = sf.readline()
while line:
if line.find(key) >= 0:
line = content
df.write(line)
line = sf.readline()
df.close()
sf.close()
def version_to_sip_tag(version, tags, description):
"""Convert a version number to a SIP tag.
version is the version number. If it is negative then the latest version
is assumed. (This is typically useful if a snapshot is indicated by a
negative version number.)
tags is the dictionary of tags keyed by version number. The tag used is
the one with the smallest key (ie. earliest version) that is greater than
the given version number.
description is the descriptive name of the package used for error messages.
Returns the corresponding tag.
"""
vl = list(tags.keys())
vl.sort()
# For a snapshot use the latest tag.
if version < 0:
tag = tags[vl[-1]]
else:
for v in vl:
if version < v:
tag = tags[v]
break
else:
error("Unsupported %s version: 0x%06x." % (description, version))
return tag
def error(msg):
"""Display an error socket and terminate.
msg is the text of the error socket.
"""
sys.stderr.write(format("Error: " + msg) + "\n")
sys.exit(1)
def inform(msg):
"""Display an information socket.
msg is the text of the error socket.
"""
sys.stdout.write(format(msg) + "\n")
def format(msg, leftmargin=0, rightmargin=78):
"""Format a socket by inserting line breaks at appropriate places.
msg is the text of the socket.
leftmargin is the position of the left margin.
rightmargin is the position of the right margin.
Return the formatted socket.
"""
curs = leftmargin
fmsg = " " * leftmargin
for w in msg.split():
l = len(w)
if curs != leftmargin and curs + l > rightmargin:
fmsg = fmsg + "\n" + (" " * leftmargin)
curs = leftmargin
if curs > leftmargin:
fmsg = fmsg + " "
curs = curs + 1
fmsg = fmsg + w
curs = curs + l
return fmsg
def parse_build_macros(filename, names, overrides=None, properties=None):
"""Parse a qmake compatible file of build system macros and convert it to a
dictionary. A macro is a name/value pair. The dictionary is returned or
None if any of the overrides was invalid.
filename is the name of the file to parse.
names is a list of the macro names to extract from the file.
overrides is an optional list of macro names and values that modify those
found in the file. They are of the form "name=value" (in which case the
value replaces the value found in the file) or "name+=value" (in which case
the value is appended to the value found in the file).
config is an optional dictionary of property name and values that are
used to resolve any expressions of the form "$[name]" in the file.
"""
# Validate and convert the overrides to a dictionary.
orides = {}
if overrides is not None:
for oride in overrides:
prefix = ""
name_end = oride.find("+=")
if name_end >= 0:
prefix = "+"
val_start = name_end + 2
else:
name_end = oride.find("=")
if name_end >= 0:
val_start = name_end + 1
else:
return None
name = oride[:name_end]
if name not in names:
return None
orides[name] = prefix + oride[val_start:]
# This class defines a file like object that handles the nested include()
# directives in qmake files.
class qmake_build_file_reader:
def __init__(self, filename):
self.filename = filename
self.currentfile = None
self.filestack = []
self.pathstack = []
self.cond_fname = None
self._openfile(filename)
def _openfile(self, filename):
try:
f = open(filename, 'r')
except IOError:
# If this file is conditional then don't raise an error.
if self.cond_fname == filename:
return
error("Unable to open %s" % filename)
if self.currentfile:
self.filestack.append(self.currentfile)
self.pathstack.append(self.path)
self.currentfile = f
self.path = os.path.dirname(filename)
def readline(self):
line = self.currentfile.readline()
sline = line.strip()
if self.cond_fname and sline == '}':
# The current condition is closed.
self.cond_fname = None
line = self.currentfile.readline()
elif sline.startswith('exists(') and sline.endswith('{'):
# A new condition is opened so extract the filename.
self.cond_fname = self._normalise(sline[:-1].strip()[7:-1].strip())
line = self.currentfile.readline()
elif sline.startswith('include('):
nextfile = self._normalise(sline[8:-1].strip())
self._openfile(nextfile)
return self.readline()
if not line:
self.currentfile.close()
if self.filestack:
self.currentfile = self.filestack.pop()
self.path = self.pathstack.pop()
return self.readline()
return line
# Normalise a filename by expanding any environment variables and
# making sure it is absolute.
def _normalise(self, fname):
if "$(" in fname:
fname = os.path.normpath(self._expandvars(fname))
if not os.path.isabs(fname):
fname = os.path.join(self.path, fname)
return fname
# Expand the environment variables in a filename.
def _expandvars(self, fname):
i = 0
while True:
m = re.search(r'\$\((\w+)\)', fname[i:])
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name in os.environ:
tail = fname[j:]
fname = fname[:i] + os.environ[name]
i = len(fname)
fname += tail
else:
i = j
return fname
f = qmake_build_file_reader(filename)
# Get everything into a dictionary.
raw = {
"DIR_SEPARATOR": os.sep,
"LITERAL_WHITESPACE": " ",
"LITERAL_DOLLAR": "$",
"LITERAL_HASH": "#"
}
line = f.readline()
while line:
# Handle line continuations.
while len(line) > 1 and line[-2] == "\\":
line = line[:-2]
next = f.readline()
if next:
line = line + next
else:
break
line = line.strip()
# Ignore comments.
if line and line[0] != "#":
assstart = line.find("+")
if assstart > 0 and line[assstart + 1] == '=':
adding = True
assend = assstart + 1
else:
adding = False
assstart = line.find("=")
assend = assstart
if assstart > 0:
lhs = line[:assstart].strip()
rhs = line[assend + 1:].strip()
# Remove the escapes for any quotes.
rhs = rhs.replace(r'\"', '"').replace(r"\'", "'")
if adding and rhs != "":
orig_rhs = raw.get(lhs)
if orig_rhs is not None:
rhs = orig_rhs + " " + rhs
raw[lhs] = _expand_macro_value(raw, rhs, properties)
line = f.readline()
# Go through the raw dictionary extracting the macros we need and
# resolving any macro expansions. First of all, make sure every macro has
# a value.
refined = {}
for m in names:
refined[m] = ""
macro_prefix = "QMAKE_"
for lhs in list(raw.keys()):
# Strip any prefix.
if lhs.startswith(macro_prefix):
reflhs = lhs[len(macro_prefix):]
else:
reflhs = lhs
# See if we are interested in this one.
if reflhs not in names:
continue
rhs = raw[lhs]
# Expand any POSIX style environment variables.
pleadin = ["$$(", "$("]
for pl in pleadin:
estart = rhs.find(pl)
if estart >= 0:
nstart = estart + len(pl)
break
else:
estart = -1
while estart >= 0:
eend = rhs[nstart:].find(")")
if eend < 0:
break
eend = nstart + eend
name = rhs[nstart:eend]
try:
env = os.environ[name]
except KeyError:
env = ""
rhs = rhs[:estart] + env + rhs[eend + 1:]
for pl in pleadin:
estart = rhs.find(pl)
if estart >= 0:
nstart = estart + len(pl)
break
else:
estart = -1
# Expand any Windows style environment variables.
estart = rhs.find("%")
while estart >= 0:
eend = rhs[estart + 1:].find("%")
if eend < 0:
break
eend = estart + 1 + eend
name = rhs[estart + 1:eend]
try:
env = os.environ[name]
except KeyError:
env = ""
rhs = rhs[:estart] + env + rhs[eend + 1:]
estart = rhs.find("%")
refined[reflhs] = rhs
# Handle the user overrides.
for lhs in list(orides.keys()):
rhs = refined[lhs]
oride = orides[lhs]
if oride.find("+") == 0:
if rhs:
rhs = rhs + " " + oride[1:]
else:
rhs = oride[1:]
else:
rhs = oride
refined[lhs] = rhs
return refined
def _expand_macro_value(macros, rhs, properties):
"""Expand the value of a macro based on ones seen so far."""
estart = rhs.find("$$(")
mstart = rhs.find("$$")
while mstart >= 0 and mstart != estart:
rstart = mstart + 2
if rstart < len(rhs) and rhs[rstart] == "{":
rstart = rstart + 1
term = "}"
elif rstart < len(rhs) and rhs[rstart] == "[":
rstart = rstart + 1
term = "]"
else:
term = string.whitespace
mend = rstart
while mend < len(rhs) and rhs[mend] not in term:
mend = mend + 1
lhs = rhs[rstart:mend]
if term in "}]":
mend = mend + 1
if term == "]":
# Assume a missing property expands to an empty string.
if properties is None:
value = ""
else:
value = properties.get(lhs, "")
else:
# We used to treat a missing value as an error, but Qt v4.3.0 has
# at least one case that refers to an undefined macro. If qmake
# handles it then this must be the correct behaviour.
value = macros.get(lhs, "")
rhs = rhs[:mstart] + value + rhs[mend:]
estart = rhs.find("$$(")
mstart = rhs.find("$$")
return rhs
def create_wrapper(script, wrapper, gui=0, use_arch=''):
"""Create a platform dependent executable wrapper around a Python script.
script is the full pathname of the script.
wrapper is the name of the wrapper file to create.
gui is non-zero if a GUI enabled version of the interpreter should be used.
use_arch is the MacOS/X architecture to invoke python with.
Returns the platform specific name of the wrapper.
"""
if sys.platform == "win32":
wrapper = wrapper + ".bat"
wf = open(wrapper, "w")
if sys.platform == "win32":
exe = sys.executable
if gui:
exe = exe[:-4] + "w.exe"
wf.write("@\"%s\" \"%s\" %%1 %%2 %%3 %%4 %%5 %%6 %%7 %%8 %%9\n" % (exe, script))
elif sys.platform == "darwin":
# The installation of MacOS's python is a mess that changes from
# version to version and where sys.executable is useless.
if gui:
exe = "pythonw"
else:
exe = "python"
version = sys.version_info
exe = "%s%d.%d" % (exe, version[0], version[1])
if use_arch:
# Note that this may not work with the "standard" interpreter but
# should with the "pythonX.Y" version.
exe = "arch -%s %s" % (use_arch, exe)
wf.write("#!/bin/sh\n")
wf.write("exec %s %s ${1+\"$@\"}\n" % (exe, script))
else:
wf.write("#!/bin/sh\n")
wf.write("exec %s %s ${1+\"$@\"}\n" % (sys.executable, script))
wf.close()
if sys.platform != "win32":
sbuf = os.stat(wrapper)
mode = sbuf.st_mode
mode |= (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
os.chmod(wrapper, mode)
return wrapper
| 34.246071
| 219
| 0.526046
|
0bec44c91437111f07949631beadeba5d403f7dd
| 1,724
|
py
|
Python
|
corehq/tests/noseplugins/patches.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
corehq/tests/noseplugins/patches.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 94
|
2020-12-11T06:57:31.000Z
|
2022-03-15T10:24:06.000Z
|
corehq/tests/noseplugins/patches.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | null | null | null |
from nose.plugins import Plugin
from corehq.form_processor.tests.utils import patch_testcase_databases
from corehq.util.es.testing import patch_es_user_signals
class PatchesPlugin(Plugin):
"""Patches various things before tests are run"""
name = "patches"
enabled = True
def options(self, parser, env):
"""Do not call super (always enabled)"""
def begin(self):
patch_assertItemsEqual()
patch_testcase_databases()
fix_freezegun_bugs()
patch_es_user_signals()
def patch_assertItemsEqual():
import unittest
unittest.TestCase.assertItemsEqual = unittest.TestCase.assertCountEqual
GLOBAL_FREEZEGUN_IGNORE_LIST = ["kafka."]
def fix_freezegun_bugs():
"""Fix error in freezegun.api.freeze_time
This error occurs in a background thread that is either triggered by
a test using freezegun or becomes active while freezegun patches are
in place.
More complete error details:
```
Exception in thread cchq-producer-network-thread:
Traceback (most recent call last):
...
freezegun/api.py", line 151, in _should_use_real_time
if not ignore_lists[-1]:
IndexError: list index out of range
```
"""
import freezegun.api as api
def freeze_time(*args, **kw):
kw["ignore"] = kw.get("ignore", []) + GLOBAL_FREEZEGUN_IGNORE_LIST
return real_freeze_time(*args, **kw)
# add base ignore list to avoid index error
assert not api.ignore_lists, f"expected empty list, got {api.ignore_lists}"
api.ignore_lists.append(tuple(GLOBAL_FREEZEGUN_IGNORE_LIST))
# patch freeze_time so it always ignores kafka
real_freeze_time = api.freeze_time
api.freeze_time = freeze_time
| 28.733333
| 79
| 0.711137
|
bb4362c374370a842c6ca180476d1b2e769937b5
| 2,363
|
py
|
Python
|
src/pyff/test/test_simple_pipeline.py
|
clarin-eric/pyFF
|
f2e626a560644f18397acca0d20a7451ca3224f6
|
[
"BSD-2-Clause-FreeBSD"
] | 15
|
2018-03-09T04:29:32.000Z
|
2021-05-27T11:29:43.000Z
|
src/pyff/test/test_simple_pipeline.py
|
clarin-eric/pyFF
|
f2e626a560644f18397acca0d20a7451ca3224f6
|
[
"BSD-2-Clause-FreeBSD"
] | 113
|
2018-02-09T10:10:26.000Z
|
2022-03-10T23:40:12.000Z
|
src/pyff/test/test_simple_pipeline.py
|
clarin-eric/pyFF
|
f2e626a560644f18397acca0d20a7451ca3224f6
|
[
"BSD-2-Clause-FreeBSD"
] | 23
|
2018-04-19T19:25:03.000Z
|
2022-03-09T23:24:03.000Z
|
import os
import tempfile
from mako.lookup import TemplateLookup
from pyff.constants import NS
from pyff.pipes import plumbing
from pyff.repo import MDRepository
from pyff.test import SignerTestCase
class SimplePipeLineTest(SignerTestCase):
def setUp(self):
super(SimplePipeLineTest, self).setUp()
self.templates = TemplateLookup(directories=[os.path.join(self.datadir, 'simple-pipeline')])
self.output = tempfile.NamedTemporaryFile('w').name
self.signer = tempfile.NamedTemporaryFile('w').name
self.signer_template = self.templates.get_template('signer.fd')
self.validator = tempfile.NamedTemporaryFile('w').name
self.validator_template = self.templates.get_template('validator.fd')
self.md_signer = MDRepository()
self.md_validator = MDRepository()
with open(self.signer, "w") as fd:
fd.write(self.signer_template.render(ctx=self))
with open(self.validator, "w") as fd:
fd.write(self.validator_template.render(ctx=self))
self.signer_result = plumbing(self.signer).process(self.md_signer, state={'batch': True, 'stats': {}})
self.validator_result = plumbing(self.validator).process(self.md_validator, state={'batch': True, 'stats': {}})
def test_entityid_present(self):
eids = [e.get('entityID') for e in self.md_signer.store]
print(eids)
assert 'https://idp.aco.net/idp/shibboleth' in eids
assert 'https://skriptenforum.net/shibboleth' in eids
eids = [e.get('entityID') for e in self.md_validator.store]
print(eids)
assert 'https://idp.aco.net/idp/shibboleth' in eids
assert 'https://skriptenforum.net/shibboleth' in eids
def test_non_zero_output(self):
assert self.md_signer is not None
assert self.md_signer.store.size() == 2
assert self.md_validator is not None
assert self.md_validator.store.size() == 2
assert os.path.getsize(self.output) > 0
def test_select_single(self):
assert self.validator_result is not None
entities = self.validator_result.findall('{%s}EntityDescriptor' % NS['md'])
assert len(entities) == 1
assert entities[0].get('entityID') == 'https://idp.aco.net/idp/shibboleth'
def tear_down(self):
super(SimplePipeLineTest, self).tearDown()
| 42.196429
| 119
| 0.680491
|
2d874dc1ca626e663e4287dc92dfb755c1fc53ed
| 146,159
|
py
|
Python
|
kddg/api/db.py
|
Kortemme-Lab/kddg
|
9fc09172abbefd4fef49261687c60a9bd9b6b29b
|
[
"MIT"
] | 2
|
2016-06-14T00:32:02.000Z
|
2020-05-04T03:29:46.000Z
|
kddg/api/db.py
|
Kortemme-Lab/kddg
|
9fc09172abbefd4fef49261687c60a9bd9b6b29b
|
[
"MIT"
] | null | null | null |
kddg/api/db.py
|
Kortemme-Lab/kddg
|
9fc09172abbefd4fef49261687c60a9bd9b6b29b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.4
# encoding: utf-8
"""
db.py
High-level functions for interacting with the ddG database.
Created by Shane O'Connor 2012.
Copyright (c) 2012 __UCSF__. All rights reserved.
"""
import sys
import os
import string
import glob
import traceback
import random
import datetime
import zipfile
import StringIO
import gzip
import pprint
import json
from io import BytesIO
try:
import pandas
except ImportError:
pass
try:
import matplotlib
# A non-interactive backend to generate PNGs. matplotlib.use('PS') is used for PS files. If used, this command must be run before importing matplotlib.pyplot.
matplotlib.use("AGG")
import matplotlib.pyplot as plt
import textwrap
except ImportError:
plt=None
from sqlalchemy import and_, func
from sqlalchemy.orm import load_only
from klab.bio.pdb import PDB
from klab.bio.basics import residue_type_3to1_map as aa1, dssp_elision
from klab.bio.basics import Mutation
from klab.bio.pdbtm import PDBTM
from klab.fs.fsio import write_file, read_file, open_temp_file
from klab.process import Popen
from klab.constants import rosetta_weights
from klab import colortext
from klab.stats.misc import get_xy_dataset_statistics
from klab.general.strutil import remove_trailing_line_whitespace
from klab.hash.md5 import get_hexdigest
from klab.fs.fsio import read_file, get_file_lines, write_file, write_temp_file
from klab.db.sqlalchemy_interface import row_to_dict, get_or_create_in_transaction, get_single_record_from_query
from klab.rosetta.input_files import Mutfile, Resfile
from kddg.api import dbi
from kddg.api.data import DataImportInterface, json_dumps
import kddg.api.schema as dbmodel
from kddg.api.layers import *
from kddg.api import settings
sys_settings = settings.load()
class FatalException(Exception): pass
class PartialDataException(Exception): pass
class SanityCheckException(Exception): pass
DeclarativeBase = dbmodel.DeclarativeBase
class MutationSet(object):
'''This class is a leftover from Lin's work and should probably be folded into an API function along with the functions that call this.
I wrote a function elsewhere to create a list of all mutations for a PDB file - maybe check out the ubiquitin complex project - which
can probably be used to replace most of this code.'''
def __init__(self):
self.mutations = []
def addMutation(self, chainID, residueID, wildtypeAA, mutantAA):
self.mutations.append((chainID, residueID, wildtypeAA, mutantAA))
def getChains(self):
return sorted(list(set([m[0] for m in self.mutations])))
class ddG(object):
'''This is the base database API class. It should not be used directly to create interface objects. Instead, use one
of the derived classes e.g. MonomericStabilityDDGInterface or the clean user API which hides internal functionality.
The clean API is instantiated as in the example below:
from kddg.api.monomer import get_interface as get_protein_stability_interface
stability_api = get_protein_stability_interface(read_file('ddgdb.pw'))
stability_api.help()
Objects of this class and derived subclasses has three main members:
self.DDG_db - a database interface used to interact directly with the database via MySQL commands
self.DDG_db_utf - the same interface but with UTF support. This should be used when dealing with UTF fields e.g. publication data
self.prediction_data_path - this is the location on the file server where output form jobs of the derived class type (e.g. binding affinity jobs) should be stored.
'''
GET_JOB_FN_CALL_COUNTER_MAX = 10
def __init__(self, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
if passwd:
passwd = passwd.strip()
self.DDG_db = dbi.ddGDatabase(passwd = passwd, username = username, hostname = hostname, port = port)
self.DDG_db_utf = dbi.ddGDatabase(passwd = passwd, username = username, hostname = hostname, use_utf = True, port = port)
self.prediction_data_path = None
self.rosetta_scripts_path = rosetta_scripts_path
self.rosetta_database_path = rosetta_database_path
self.PredictionTable = self._get_sqa_prediction_table()
# Before continuing, make sure that the SQLAlchemy definitions match the table definitions
dbmodel.test_schema_against_database_instance(self.DDG_db)
# This counter is used to check the number of times get_job is called and raise an exception if this exceeds a certain amount
# If the API is misused then get_job may be called infinitely on one job - this is meant to protect against that
self._get_job_fn_call_counter = {}
self._get_job_fn_call_counter_max = ddG.GET_JOB_FN_CALL_COUNTER_MAX
# Caching dictionaries
self.cached_score_method_details = None
self.prediction_scores_cache = {}
# Create an instance of the import API
try:
self.importer = DataImportInterface.get_interface_with_config_file()
except Exception, e:
colortext.warning('The data import interface could not be set up. Some features in the API rely on this interface. Please check your configuration file.\n{0}\n{1}'.format(e, traceback.format_exc()))
def __del__(self):
pass #self.DDG_db.close() #self.ddGDataDB.close()
#########################################################################################
## Public API
#########################################################################################
#########################################################################################
## Broken API layer
##
## This section contains useful functions which need to be updated to work with the new
## schema or code
#########################################################################################
#== Alien functions ====================================================================
#==
#== These functions do not belong here.
@alien
def write_abacus_graph(self, graph_filename, graph_title, labels, data):
'''NOTE: This function should be generalized and moved into the klab repository.
This is a simple function wrapper around create_abacus_graph which writes the graph to file.'''
byte_stream = self.create_abacus_graph(graph_title, labels, data)
write_file(graph_filename, byte_stream.getvalue(), 'wb')
@alien
def create_abacus_graph(self, graph_title, labels, data):
'''NOTE: This function should be generalized and moved into the klab repository.
This function creates an 'abacus graph' from a set of data. Even though this is technically a scatterplot,
I call this an abacus graph because it is looks like rows of beads on lines.
The function takes a graph title, a set of labels (one per row of data), and an array of data where each row
should have the same number of columns.
A byte stream for the graph (currently PNG format but we could parameterize this) is returned. This may be
written directly to a binary file or streamed for online display.
'''
if plt:
assert(data)
image_dpi = 300.0
horizontal_margin = 400.0 # an estimate of the horizontal space not used by the graph
horizontal_spacing = 100.0 # an estimate of the horizontal space between points on the same line
vertical_margin = 100.0 # an estimate of the vertical space not used by the graph
vertical_spacing = 50.0 # the rough amount of pixels between abacus lines
point_size = 50 # the size of datapoints in points^2.
y_offset = 1.0
points_per_line = set([len(line[1]) for line in data])
assert(len(points_per_line) == 1)
points_per_line = points_per_line.pop()
assert(len(labels) == points_per_line)
number_of_lines = float(len(data))
number_of_labels = float(len(labels))
height_in_inches = max(600/image_dpi, (vertical_margin + (vertical_spacing * number_of_lines)) / image_dpi) # Use a minimum of 600 pixels in height. This avoids graphs with a small number of lines (<=10) not to become squashed.
width_in_inches = max(700/image_dpi, (horizontal_margin + (horizontal_spacing * points_per_line)) / image_dpi) # Use a minimum of 600 pixels in width. This avoids graphs with a small number of labels (e.g. 1) not to become squashed.
graph_color_scheme = matplotlib.cm.jet
#y_offset = (1.75 * data_length) / 128
#image_dpi = (400 * data_length) / 128
#image_dpi = 400
#point_sizes = {1 : 100, 64: 75, 128: 50, 192: 25, 256: 10}
#index = round(data_length / 64.0) * 64
#point_size = point_sizes.get(index, 10)
fig = plt.figure(figsize=(width_in_inches, height_in_inches)) # figsize is specified in inches - w, h
fig.set_dpi(image_dpi)
# Create three identically-sized lists. Each triple is of an x-coordinate, a y-coordinate, and the DDG value
# and corresponds to a 1 in the matrix i.e. we should draw a point/abacus bead at these coordinates.
x_values = []
y_values = []
x_coordinate_skip = 3 # x-axis distance between two points
y_coordinate_skip = 7 # y-axis distance between two points
ddg_values = []
y = 0
for line in data:
x = 0
y += y_coordinate_skip
w = line[0]
#plt.text(30, y, str('%.3f' % line[0]), fontdict=None, withdash=True, fontsize=9)
for point in line[1]:
x += x_coordinate_skip
if point == 1:
x_values.append(x)
y_values.append(y)
ddg_values.append(line[0])
# Draw the scatter plot
plt.scatter(x_values, y_values, c=ddg_values, s=point_size, cmap=graph_color_scheme, edgecolors='none', zorder=99)
# Define the limits of the cartesian coordinates. Add extra space on the right for the DDG values.
extra_space = 1.3
plt.axis((0, (points_per_line + 1 + extra_space) * x_coordinate_skip, -15, (y_coordinate_skip * number_of_lines) + 15))
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
left='off', # ticks along the left edge are off
labelleft='off', # labels along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
# Add the mutation labels at the botom of the diagram
x = 1.9
for i in range(len(labels)):
l = labels[i]
plt.text(x, -5 + ((i % 2) * -5), l, fontdict=None, withdash=True, fontsize=6)
x += x_coordinate_skip
added_zero_line = False
last_y_value = 0
y = 0
for line in data:
x = 0
y += 7
plt.plot([1, 25], [y, y], color='#999999', linestyle='-', linewidth=0.1)
# Add a DDG value on every third line
if y % 21 == 7:
plt.text(((points_per_line + 0.6) * x_coordinate_skip) , y-y_offset, str('%.3f' % line[0]), fontdict=None, withdash=True, fontsize=6)
if not added_zero_line:
if line[0] > 0:
plt.plot([1, 25], [0.5 + ((y + last_y_value) / 2), 0.5 + ((y + last_y_value) / 2)], color='k', linestyle='-', linewidth=1)
added_zero_line = True
else:
last_y_value = y
plt.text(((points_per_line + 0.6) * x_coordinate_skip), y-y_offset, str('%.3f' % line[0]), fontdict=None, withdash=True, fontsize=6)
# Set the colorbar font size and then add a colorbar
#cbar.ax.tick_params(labelsize=6)
#plt.colorbar(use_gridspec=True)
#ax = fig.add_subplot(111)
# Add a title. Note: doing this after the colorbar code below messes up the alignment.
# Adjust the wrap length to the width of the graph
wrap_length = 40 + max(0, (points_per_line - 3) * 14)
graph_title = "\n".join(textwrap.wrap(graph_title, wrap_length))
plt.title(graph_title, fontdict={'fontsize' : 6})
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax = fig.add_subplot(111)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05) # pad specifies the padding between the right of the graph and the left of the colorbar, size seems to specify the width of the colobar relative to the graph
CS3 = plt.contourf([[0,0],[0,0]], ddg_values, cmap=graph_color_scheme)
#plt.colorbar(CS3)
#cbar = fig.colorbar(CS3, format='%.2f')
cbar = fig.colorbar(CS3, format='%.2f', cax=cax)
cbar.set_label('$\Delta\Delta$G',size=6)
cbar.ax.tick_params(labelsize=5)
# Use the tight_layout command to tighten up the spaces. The pad, w_pad, and h_pad parameters are specified in fraction of fontsize.
plt.tight_layout(pad=0.5)
#quadmesh = ax.pcolormesh(theta,phi,data)
#cb = fig.colorbar(quadmesh,ax=ax, shrink=.5, pad=.2, aspect=10)
#cax = ax.imshow(ddg_values, interpolation='nearest', cmap=matplotlib.cm.coolwarm)
#cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
#surf = ax.contourf(X,Y,Z, 8, cmap=cm.jet)
#cbar = fig.colorbar(surf, use_gridspec=True, shrink=0.5, aspect=20, fraction=.12,pad=.02)
#cbar.set_label('Activation',size=18)
byte_stream = BytesIO()
plt.savefig(byte_stream, dpi=image_dpi, format="png")
plt.close(fig)
return byte_stream
else:
return None
@alien
def get_flattened_prediction_results(self, PredictionSet):
'''This is defined here as an API function but should be defined as a stored procedure.'''
# @todo: this is the monomeric stability implementation - move this into that API
#Ubiquitin scan: 1UBQ p16
#Prediction.Scores no longer exists
kellogg_score_id = self.get_score_method_id('global', method_type = 'protocol 16', method_authors = 'kellogg', fuzzy = True)
noah_score_id = self.get_score_method_id('local', method_type = 'position', method_parameters = '8Å radius', method_authors = 'Noah Ollikainen', fuzzy = False)
score_ids = {}
score_ids['kellogg'] = kellogg_score_id
score_ids['noah8A'] = noah_score_id
records = self.DDG_db.execute_select('''
SELECT Prediction.ID AS PredictionID, Prediction.ExperimentID, Experiment.PDBFileID, ExperimentMutations.FlattenedMutations, TIMEDIFF(Prediction.EndDate, Prediction.StartDate) AS TimeTaken, PredictionStructureScore.ScoreMethodID, PredictionStructureScore.DDG
FROM Prediction INNER JOIN
(
SELECT ExperimentID, GROUP_CONCAT(Mutation SEPARATOR ', ') AS FlattenedMutations FROM
(
SELECT ExperimentID, CONCAT(Chain, ' ', WildTypeAA, ResidueID, MutantAA) As Mutation FROM ExperimentMutation
) AS FlattenedMutation
GROUP BY ExperimentID
) AS ExperimentMutations
ON Prediction.ExperimentID=ExperimentMutations.ExperimentID
INNER JOIN Experiment ON Prediction.ExperimentID=Experiment.ID
INNER JOIN PredictionStructureScore ON Prediction.ID=PredictionStructureScore.PredictionID
WHERE Prediction.PredictionSet=%s AND Prediction.Status="done" AND ScoreType="DDG" AND StructureID=-1 AND (ScoreMethodID=%s OR ScoreMethodID=%s)
ORDER BY ScoreMethodID''', parameters=(PredictionSet, kellogg_score_id, noah_score_id))
return records, score_ids
#== Broken functions ====================================================================
#@informational or part of a filter API
@brokenfn
def get_publications_for_result_set(self, result_set):
'''This should be fixed once the filter API has been rewritten to work with the new DB schema. It returns a list of publications associated with the filter result set.'''
raise Exception('')
from ddgfilters import ExperimentResultSet, StructureResultSet
if result_set:
structures = None
experiments = None
if result_set.isOfClass(ExperimentResultSet):
experiments = result_set
elif ExperimentResultSet in result_set.__class__.allowed_restrict_sets:
experiments, experiment_map = result_set.getExperiments()
if result_set.isOfClass(StructureResultSet):
structures = result_set
elif StructureResultSet in result_set.__class__.allowed_restrict_sets:
structures, structure_map = result_set.getStructures()
if structures:
colortext.printf("\nRelated publications for structures:", "lightgreen")
for id in sorted(structures.IDs):
pubs = self.DDG_db.callproc("GetPublications", parameters=(id,))
print(id)
for pub in pubs:
print("\t%s: %s" % (pub["Type"], pub["PublicationID"]))
if experiments:
colortext.printf("\nRelated publications for experiments:", "lightgreen")
for id in sorted(experiments.IDs):
pubs = self.DDG_db.callproc("GetExperimentPublications", parameters=(id,))
print(id)
for pub in pubs:
print("\t%s: %s" % (pub["Type"], pub["SourceLocation.ID"]))
experimentsets = [e[0] for e in self.DDG_db.execute_select("SELECT DISTINCT Source FROM Experiment WHERE ID IN (%s)" % ','.join(map(str, list(experiments.IDs))), cursorClass = dbi.StdCursor)]
if experimentsets:
colortext.printf("\nRelated publications for experiment-set sources:", "lightgreen")
for id in sorted(experimentsets):
print(id)
pubs = self.DDG_db.execute_select("SELECT ID, Type FROM SourceLocation WHERE SourceID=%s", parameters = (id,))
for pub in pubs:
print("\t%s: %s" % (pub["Type"], pub["ID"]))
else:
raise Exception("Empty result set.")
#@analysis_api
@brokenfn
def analyze(self, prediction_result_set, outpath = os.getcwd()):
'''This function needs to be rewritten and renamed. It calls the analysis module (which creates LaTeX reports) to generate correlation and MAE graphs.'''
raise Exception('The import of analysis was commented out - presumably some change in DB structure or API broke the import. This code probably needs to be fixed.')
import analysis
PredictionIDs = sorted(list(prediction_result_set.getFilteredIDs()))
colortext.printf("Analyzing %d records:" % len(PredictionIDs), "lightgreen")
#results = self.DDG_db.execute_select("SELECT ID, ExperimentID, ddG FROM Prediction WHERE ID IN (%s)" % join(map(str, PredictionIDs), ","))
#for r in results:
# r["ddG"] = pickle.loads(r["ddG"])
# predicted_score = r["ddG"]["data"]["ddG"]
# experimental_scores = [expscore["ddG"] for expscore in self.DDG_db.callproc("GetScores", parameters = r["ExperimentID"])]
# mean_experimental_score = float(sum(experimental_scores)) / float(len(experimental_scores))
results = self.DDG_db.execute_select("SELECT ID, ExperimentID, ddG FROM Prediction WHERE ID IN (%s)" % ','.join(map(str, PredictionIDs)))
analysis.plot(analysis._R_mean_unsigned_error, analysis._createMAEFile, results, "my_plot1.pdf", average_fn = analysis._mean)
analysis.plot(analysis._R_correlation_coefficient, analysis._createAveragedInputFile, results, "my_plot2.pdf", average_fn = analysis._mean)
colortext.printf("Done", "lightgreen")
#score.ddgTestScore
#== Deprecated functions =================================================================
@deprecated
def create_PredictionSet(self, PredictionSetID, halted = True, Priority = 5, BatchSize = 40, allow_existing_prediction_set = False, contains_protein_stability_predictions = True, contains_binding_affinity_predictions = False): raise Exception('This function has been deprecated. Use add_prediction_set instead.')
@deprecated
def charge_PredictionSet_by_number_of_residues(self, PredictionSet): raise Exception('This function has been deprecated. Use _charge_prediction_set_by_residue_count instead.')
@deprecated
def createPredictionsFromUserDataSet(self, userdatasetTextID, PredictionSet, ProtocolID, KeepHETATMLines, StoreOutput = False, Description = {}, InputFiles = {}, quiet = False, testonly = False, only_single_mutations = False, shortrun = False): raise Exception('This function has been deprecated. Use add_prediction_run instead.')
@deprecated
def add_predictions_by_pdb_id(self, pdb_ID, PredictionSet, ProtocolID, status = 'active', priority = 5, KeepHETATMLines = False, strip_other_chains = True): raise Exception('This function has been deprecated. Use add_jobs_by_pdb_id instead.')
@deprecated
def addPrediction(self, experimentID, UserDataSetExperimentID, PredictionSet, ProtocolID, KeepHETATMLines, PDB_ID = None, StoreOutput = False, ReverseMutation = False, Description = {}, InputFiles = {}, testonly = False, strip_other_chains = True): raise Exception('This function has been deprecated. Use add_job instead.')
@deprecated
def add_pdb_file(self, filepath, pdb_id): raise Exception('This function has been deprecated. Use the kddg.api.data.add_pdb_* functions instead.')
@deprecated
def getPublications(self, result_set): raise Exception('This function has been deprecated. Use get_publications_for_result_set instead.')
@deprecated
def getData(self, predictionID): raise Exception('This function has been deprecated. Use get_job_data instead.')
@deprecated
def dumpData(self, outfile, predictionID): raise Exception('This function has been deprecated. Use write_job_data_to_disk instead (note the change in argument order).')
@deprecated
def get_amino_acids_for_analysis(self): raise Exception('This function has been deprecated. Use get_amino_acid_details instead.')
@deprecated
def get_pdb_details_for_analysis(self, pdb_ids, cached_pdb_details = None): raise Exception('This function has been deprecated. Use get_pdb_details instead.')
@deprecated
def add_pdb_file_content(self, pdb_content): raise Exception('This function may never have been used and should be removed.') # return self._add_file_content(pdb_content, rm_trailing_line_whitespace = True, forced_mime_type = 'chemical/x-pdb')
@deprecated
def create_pymol_session(self, download_dir, prediction_id, task_number, keep_files = True): raise Exception('This function has been deprecated. Use create_pymol_session_in_memory and write_pymol_session instead.''')
@deprecated
def createDummyExperiment(self, pdbID, mutationset, chains, sourceID, ddG, ExperimentSetName = "DummySource"):
#todo: elide createDummyExperiment, createDummyExperiment_ankyrin_repeat, and add_mutant
raise Exception("Out of date function.")
Experiment = dbi.ExperimentSet(pdbID, ExperimentSetName)
for m in mutationset.mutations:
Experiment.addMutation(m[0], m[1], m[2], m[3])
for c in chains:
Experiment.addChain(c)
Experiment.addExperimentalScore(sourceID, ddG, pdbID)
Experiment.commit(self.DDG_db)
@deprecated
def createDummyExperiment_ankyrin_repeat(self, pdbID, mutations, chain):
raise Exception("Out of date function.")
#todo: elide createDummyExperiment, createDummyExperiment_ankyrin_repeat, and add_mutant
experiment = dbi.ExperimentDefinition(self.DDG_db, pdbID, interface = None)
experiment.addChain(chain)
for m in mutations:
experiment.addMutation(m)
experiment.commit(False)
#@analysis_api
@deprecated
def test_results(self, output_dir, PredictionSet):
PredictionIDs = []
results = self.get_flattened_prediction_results(PredictionSet)
mutation_lists = {}
for r in results:
PredictionIDs.append(r['PredictionID'])
mutation_lists[r['PredictionID']] = r['FlattenedMutations']
RandomPredictionIDs = [PredictionIDs[random.randint(0, len(PredictionIDs) - 1)] for k in range(10)]
RandomPredictionIDs = [54090L, 53875L, 54085L, 54079L, 54008L, 53853L, 53952L, 54056L, 53935L, 53893L]
# Retrieve and unzip results
if not(os.path.exists(output_dir)):
os.mkdir(output_dir)
for PredictionID in PredictionIDs:#RandomPredictionIDs:
if not(os.path.exists(os.path.join(output_dir, str(PredictionID)))):
colortext.message('Retrieving archive for Prediction %d.' % PredictionID)
self.write_job_data_to_disk(PredictionID, output_dir)
# Get the sequences of the wildtype and mutant structures
count = 0
for PredictionID in PredictionIDs:#RandomPredictionIDs:
wildtype_sequences = set()
mutation_sequences = set()
working_dir = os.path.join(os.path.join(output_dir, str(PredictionID)))
for f in glob.glob(os.path.join(working_dir, '*.pdb')):
if os.path.split(f)[1].startswith('mut_'):
p = PDB.from_filepath(f)
assert(len(p.atom_sequences) == 1)
sequence = str(p.atom_sequences.values()[0])
mutation_sequences.add(sequence)
elif os.path.split(f)[1].startswith('repacked_wt_'):
p = PDB.from_filepath(f)
assert(len(p.atom_sequences) == 1)
sequence = str(p.atom_sequences.values()[0])
wildtype_sequences.add(sequence)
assert(len(wildtype_sequences) == 1)
assert(len(mutation_sequences) == 1)
wildtype_sequence = wildtype_sequences.pop()
mutation_sequence = mutation_sequences.pop()
colortext.message('Prediction %d. Mutations: %s' % (PredictionID, mutation_lists[PredictionID]))
assert(len(wildtype_sequence) == len(mutation_sequence))
s = ''
t = ''
for x in range(len(wildtype_sequence)):
if wildtype_sequence[x] != mutation_sequence[x]:
s += colortext.make(wildtype_sequence[x], color="green")
t += colortext.make(mutation_sequence[x], color="yellow")
else:
s += wildtype_sequence[x]
t += mutation_sequence[x]
print(s)
print(t)
@deprecated
def add_mutant(self, pdb_ID, mutant_mutations):
'''Use this function to add one set of mutations ON THE SAME CHAIN (i.e. corresponding to one mutant) to the database.
todo: generalize this to allow different chains
'''
raise Exception("Out of date function.")
#todo: elide createDummyExperiment, createDummyExperiment_ankyrin_repeat, and add_mutant
chains = set([m.Chain for m in mutant_mutations])
assert(len(chains) == 1)
colortext.warning("Adding mutation: %s." % ', '.join(map(str, mutant_mutations)))
self.createDummyExperiment_ankyrin_repeat(pdb_ID, mutant_mutations, chains.pop())
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_misc
def get_amino_acid_details(self):
'''This function returns a dictionary of canonical amino acid details e.g. polarity, aromaticity, size etc.'''
amino_acids = {}
polarity_map = {'polar' : 'P', 'charged' : 'C', 'hydrophobic' : 'H'}
aromaticity_map = {'aliphatic' : 'L', 'aromatic' : 'R', 'neither' : '-'}
results = self.DDG_db.execute_select('SELECT * FROM AminoAcid')
for r in results:
if r['Code'] != 'X':
amino_acids[r['Code']] = dict(
LongCode = r['LongCode'],
Name = r['Name'],
Polarity = polarity_map.get(r['Polarity'], 'H'),
Aromaticity = aromaticity_map[r['Aromaticity']],
Size = r['Size'],
van_der_Waals_volume = r['Volume']
)
amino_acids['Y']['Polarity'] = 'H' # tyrosine is a special case
return amino_acids
@informational_misc
def get_publication(self, ID):
'''Returns the information (title, publication, authors etc.) for a publication.'''
r = self.DDG_db_utf.execute_select('SELECT * FROM Publication WHERE ID=%s', parameters=(ID,))
if not r:
raise Exception('No publication exists with ID %s.' % str(ID))
r = r[0]
pubmed_id = self.DDG_db_utf.execute_select('SELECT * FROM PublicationIdentifier WHERE SourceID=%s AND Type="PMID"', parameters=(r['ID'],))
if pubmed_id:
pubmed_id = pubmed_id[0]['ID']
authors = self.DDG_db_utf.execute_select('SELECT * FROM PublicationAuthor WHERE PublicationID=%s ORDER BY AuthorOrder', parameters=(r['ID'],))
authorlist = []
for a in authors:
authorlist.append(dict(FirstName = a['FirstName'], MiddleNames = a['MiddleNames'], Surname = a['Surname']))
pub_details = dict(
Title = r['Title'],
Publication = r['Publication'],
Volume = r['Volume'],
StartPage = r['StartPage'],
EndPage = r['EndPage'],
PublicationYear = r['PublicationYear'],
PublicationDate = r['PublicationDate'],
DOI = r['DOI'],
URL = r['URL'],
PubMedID = pubmed_id,
Authors = authorlist,
)
if pub_details['PublicationDate']:
pub_details['PublicationDate'] = pub_details['PublicationDate'].strftime('%Y-%m-%d')
if not pub_details['URL'] and pub_details['DOI']:
pub_details['URL'] = 'https://dx.doi.org/%s' % pub_details['DOI']
return pub_details
@informational_misc
def get_publications(self):
'''Returns the information (title, publication, authors etc.) for all publications.'''
publications = {}
for r in self.DDG_db.execute_select('SELECT ID FROM Publication'):
publications[r['ID']] = self.get_publication(r['ID'])
return publications
def _cache_all_score_method_details(self):
'''Helper function for get_score_method_details.'''
score_methods = {}
for r in self.get_session(utf = True).query(dbmodel.ScoreMethod):
score_methods[r.ID] = row_to_dict(r)
self.cached_score_method_details = score_methods
return score_methods
@informational_misc
def get_pdb_residues_by_pos(self, pdb_id, strip_res_ids = False):
'''Returns a mapping chain_id -> residue_id -> reside_aa.'''
chain_residue_by_pos = {}
for c in self.get_session().query(dbmodel.PDBChain).filter(dbmodel.PDBChain.PDBFileID == pdb_id):
chain_residue_by_pos[c.Chain] = {}
for r in self.get_session().query(dbmodel.PDBResidue).filter(dbmodel.PDBResidue.PDBFileID == pdb_id):
chain_residue_by_pos[r.Chain][r.ResidueID.strip() if strip_res_ids else r.ResidueID] = r.ResidueAA
return chain_residue_by_pos
@informational_misc
def get_score_method_details(self, score_method_id = None, allow_recaching = True):
'''Returns all score method details, unless a score method id is passed, then only those details are returned'''
if not self.cached_score_method_details or (score_method_id and not(score_method_id in self.cached_score_method_details)):
self._cache_all_score_method_details()
if score_method_id:
# Returns ScoreMethod record for specific score_method_id
if score_method_id in self.cached_score_method_details:
return self.cached_score_method_details[score_method_id]
else:
# We have already refreshed the cache, so fail
raise Exception("score_method_id {0} isn't in score methods table".format(score_method_id))
else:
# Returns all defined ScoreMethod records
return self.cached_score_method_details
def output_score_method_information(self, score_method_id, output_directory, analysis_set_id = None, take_lowest = None, expectn = None):
'''Outputs details about score method to a txt file in the specified output directory'''
score_method_details = sorted([(k, v) for k, v in self.get_score_method_details(score_method_id = score_method_id).iteritems()])
with open(os.path.join(output_directory, 'score_method.txt'), 'w') as f:
f.write('Score method ID: %s\n' % str(score_method_id))
if analysis_set_id:
f.write('Analysis set ID: %s\n' % str(analysis_set_id))
if take_lowest:
f.write('Take lowest (TopX): %s scores\n' % str(take_lowest))
if expectn:
f.write('Expected number of output structures): %s\n' % str(expectn))
if len(score_method_details) > 0:
f.write('\nScore method details\n')
for key, value in score_method_details:
f.write('%s: %s\n' % (str(key), str(value)))
@informational_misc
def get_score_method_id(self, method_name, method_type = None, method_parameters = None, method_authors = None, fuzzy = True):
'''Returns the ID for the ScoreMethod with the specified parameters.
If fuzzy is True then the string matching uses LIKE rather than equality.
e.g. method_id = self.get_score_method_id('interface', method_authors = 'kyle')
'''
if fuzzy:
match_phrase = 'LIKE %s'
method_name = '%{0}%'.format(method_name)
if method_type: method_type = '%{0}%'.format(method_type)
if method_parameters: method_parameters = '%{0}%'.format(method_parameters)
if method_authors: method_authors = '%{0}%'.format(method_authors)
else:
match_phrase = '=%s'
condition_parameters = [method_name]
conditions = ['MethodName {0}'.format(match_phrase)]
if method_type:
conditions.append('MethodType {0}'.format(match_phrase))
condition_parameters.append(method_type)
if method_parameters:
conditions.append('Parameters {0}'.format(match_phrase))
condition_parameters.append(method_parameters)
if method_authors:
conditions.append('Authors {0}'.format(match_phrase))
condition_parameters.append(method_authors)
conditions = ' AND '.join(conditions)
condition_parameters = tuple(condition_parameters)
results = self.DDG_db_utf.execute_select('SELECT ID FROM ScoreMethod WHERE {0}'.format(conditions), parameters=condition_parameters)
if not results:
raise Exception('Error: No ScoreMethod records were found using the criteria: {0}'.format(', '.join(map(str, [s for s in [method_name, method_type, method_parameters] if s]))))
elif len(results) > 1:
raise Exception('Error: Multiple ScoreMethod records were found using the criteria: {0}'.format(', '.join(map(str, [s for s in [method_name, method_type, method_parameters] if s]))))
else:
return results[0]['ID']
@informational_misc
def get_score_dict(self, prediction_id = None, score_method_id = None, score_type = None, structure_id = None, prediction_structure_scores_table = None, prediction_id_field = None):
'''Returns a dict with keys for all fields in the Score table. The optional arguments can be used to set the
corresponding fields of the dict. All other fields are set to None.'''
if prediction_structure_scores_table == None:
prediction_structure_scores_table = self._get_prediction_structure_scores_table()
if prediction_id_field == None:
prediction_id_field = self._get_prediction_id_field()
# Relax the typing
if structure_id: structure_id = int(structure_id)
if prediction_id: prediction_id = int(prediction_id)
if score_method_id: score_method_id = int(score_method_id)
if score_type:
allowed_score_types = self._get_allowed_score_types()
if score_type not in allowed_score_types:
raise Exception('"{0}" is not an allowed score type. Allowed types are: "{1}".'.format(score_type, '", "'.join(sorted(allowed_score_types))))
fieldnames = set([f for f in self.DDG_db.FieldNames.__dict__[prediction_structure_scores_table].__dict__.keys() if not(f.startswith('_'))])
d = dict.fromkeys(fieldnames, None)
if prediction_id_field != None:
d[prediction_id_field] = prediction_id
d['ScoreMethodID'] = score_method_id
d['ScoreType'] = score_type
d['StructureID'] = structure_id
return d
@informational_pdb
def get_pdb_chain_coordinates(self, pdb_id, chain_id):
'''Read a saved dataframe.'''
zipped_coordinates = self.DDG_db.execute_select('SELECT Coordinates FROM PDBChain WHERE PDBFileID=%s AND Chain=%s AND Coordinates IS NOT NULL', parameters=(pdb_id, chain_id))
if zipped_coordinates:
assert(len(zipped_coordinates) == 1)
buf = BytesIO(zipped_coordinates[0]['Coordinates'])
gf = gzip.GzipFile(fileobj=buf, mode="rb")
residue_matrix = None
try:
store = pandas.read_hdf(gf)
residue_matrix = store['dataframe']
store.close()
except NotImplementedError, e:
# "Support for generic buffers has not been implemented"
try:
nfname = None
f, nfname = open_temp_file('/tmp', suffix = '.hdf5')
f.close()
write_file(nfname, gf.read(), ftype = 'wb')
store = pandas.HDFStore(nfname)
residue_matrix = store['dataframe']
store.close()
os.remove(nfname)
print('get_pdb_chain_coordinates here')
except:
if nfname: os.remove(nfname)
raise
return residue_matrix
return None
@informational_pdb
def get_pdb_chains_for_prediction(self, prediction_id):
'''Returns the PDB file ID and a list of chains for the prediction.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_chain_sets_for_mutatagenesis(self, mutagenesis_id, complex_id = None):
'''Gets a list of possibilities for the associated complex and calls get_chains_for_mutatagenesis on each.
This function assumes that a complex structure is required i.e. that all chains in the PDB chain set are in the same PDB file.
This is a useful method for listing the possible complexes to use in a prediction or to determine whether one
may be missing. and we need to update the database.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_chains_for_mutatagenesis(self, mutagenesis_id, pdb_file_id, pdb_set_number, complex_id = None, tsession = None):
'''Returns the PDB chains used in the mutagenesis.
Note: At present, monomeric data e.g. protein stability does not have the notion of complex in our database
but this abstraction is planned so that multiple choices of PDB file and chain can be easily represented.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_pdb_mutations_for_mutagenesis(self, mutagenesis_id, pdb_file_id, set_number, complex_id = None):
'''Returns the PDB mutations for a mutagenesis experiment as well as the PDB residue information.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_pdb_details(self, pdb_ids, cached_pdb_details = None):
'''Returns the details stored in the database about the PDB files associated with pdb_ids e.g. chains, resolution,
technique used to determine the structure etc.'''
raise Exception('Replace this with a call to kddg.api.data.py::DataImportInterface.get_pdb_details()')
return self.importer.get_pdb_details(pdb_ids, cached_pdb_details = None)
pdbs = {}
cached_pdb_ids = []
if cached_pdb_details:
cached_pdb_ids = set(cached_pdb_details.keys())
for pdb_id in pdb_ids:
if pdb_id in cached_pdb_ids:
pdbs[pdb_id] = cached_pdb_details[pdb_id]
else:
record = self.DDG_db.execute_select('SELECT * FROM PDBFile WHERE ID=%s', parameters=(pdb_id,))[0]
p = PDB(record['Content'])
pdb_chain_lengths = {}
for chain_id, s in p.atom_sequences.iteritems():
pdb_chain_lengths[chain_id] = len(s)
# todo: get the list of protein chains and PDB residues from the database and assert that they are the same
# as what were extracted from the PDB file.
# maybe change 'chains' below to 'protein_chains'
pdbs[pdb_id] = dict(
chains = pdb_chain_lengths,
TM = record['Transmembrane'],
Technique = record['Techniques'],
XRay = record['Techniques'].find('X-RAY') != -1,
Resolution = record['Resolution'],
)
return pdbs
@informational_pdb
def get_prediction_set_pdb_chain_details(self, PredictionSet, cached_pdb_details = None, restrict_to_pdbs = set()):
'''Used by the analysis API. This could be combined with get_pdb_details.'''
pdb_ids = [r['PDBFileID'] for r in self.DDG_db.execute_select('SELECT DISTINCT PDBFileID FROM {0} INNER JOIN {1} ON {1}ID={1}.ID WHERE PredictionSet=%s ORDER BY PDBFileID'.format(self._get_prediction_table(), self._get_user_dataset_experiment_table()), parameters=(PredictionSet,))]
if not pdb_ids:
try:
pdb_ids = [r['PDBFileID'] for r in self.DDG_db.execute_select('SELECT DISTINCT PDBFileID FROM {0} INNER JOIN {1} ON {1}ID={1}.ID WHERE PredictionSet=%s ORDER BY PDBFileID'.format(self._get_prediction_table(), 'Experiment'), parameters=(PredictionSet,))]
except: pass
if restrict_to_pdbs:
pdb_ids = sorted(set(pdb_ids).intersection(restrict_to_pdbs))
pdbs = {}
cached_pdb_ids = []
if cached_pdb_details:
cached_pdb_ids = set(cached_pdb_details.keys())
for pdb_id in pdb_ids:
if pdb_id in cached_pdb_ids:
pdbs[pdb_id] = cached_pdb_details[pdb_id]
else:
record = self.DDG_db.execute_select('SELECT * FROM PDBFile WHERE ID=%s', parameters=(pdb_id,))[0]
p = PDB(record['Content'])
d = {}
chain_ids = set(p.chain_types.keys()).union(set(p.seqres_chain_order)).union(set(p.atom_sequences.keys()))
d['Chains'] = dict.fromkeys(chain_ids)
for chain_id in chain_ids:
d['Chains'][chain_id] = dict(
Sequence = str(p.atom_sequences.get(chain_id) or ''),
Type = p.chain_types.get(chain_id),
)
d['Resolution'] = p.get_resolution()
d['MethodOfDetermination'] = p.get_techniques()
pdbs[pdb_id] = d
return pdbs
@informational_job
def get_development_protocol(self, development_protocol_id):
'''Possibly temporary function which returns a DevelopmentProtocol record from the database.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_complex_details(self, complex_id):
'''Returns the database record for the given complex.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_job_description(self, prediction_id):
'''Returns the details necessary to run the job.'''
try:
prediction = self.get_session().query(self.PredictionTable).filter(self.PredictionTable.ID == prediction_id).one()
except Exception, e:
raise colortext.Exception('No details could be found for prediction #{0} in the database.\n{1}\n{2}'.format(prediction_id, str(e), traceback.format_exc()))
return str(prediction)
@informational_job
def get_job_details(self, prediction_id, include_files = True, truncate_content = None):
'''Returns the details necessary to run the job.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@informational_misc
def get_file_content_cache_stats(self):
'''Returns basic statistics on the file content cache access.'''
return self.importer.get_file_content_cache_stats()
@informational_job
def get_job_files(self, prediction_id, truncate_content = None, set_pdb_occupancy_one = True):
'''Returns a dict mapping the stages (e.g. 'input', 'output', 'analysis') of a job with the files associated with
that stage.
If truncate_content is set, it should be an integer specifying the amount of characters to include. This is useful
to see if the file header is as expected.
'''
assert(truncate_content == None or (isinstance(truncate_content, int) and truncate_content >= 0))
job_files = {}
prediction_record = self.get_session().query(self.PredictionTable).filter(self.PredictionTable.ID == prediction_id).one()
for pf in prediction_record.files:
r = row_to_dict(pf)
if truncate_content != 0:
fcontent = pf.content
r['MIMEType'] = fcontent.MIMEType
r['Filesize'] = fcontent.Filesize
r['MD5HexDigest'] = fcontent.MD5HexDigest
file_content = self.importer.get_file_content_from_cache(fcontent.ID)
if set_pdb_occupancy_one and pf.Filetype == 'PDB': # Set all occupancies to 1
pdb = PDB(file_content.split("\n"))
pdb.fillUnoccupied()
r['Content'] = pdb.get_content()
else:
r['Content'] = file_content
if truncate_content:
if len(file_content) > int(truncate_content):
r['Content'] = '%s...' % file_content[:int(truncate_content)]
else:
r['Content'] = None
r['MIMEType'] = None
r['Filesize'] = None
r['MD5HexDigest'] = None
r['Content'] = None
job_stage = r['Stage']
del r['Stage']
job_files[job_stage] = job_files.get(job_stage, [])
job_files[job_stage].append(r)
return job_files
@informational_job
def get_prediction_set_details(self, prediction_set_id):
'''Returns the PredictionSet record from the database.'''
tsession = self.get_session()
prediction_set = tsession.query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == prediction_set_id)
if prediction_set.count() == 1:
d = row_to_dict(prediction_set.one())
d['Job status summary'] = self._get_prediction_set_status_counts(prediction_set_id)
return d
return None
def _get_prediction_set_status_counts(self, prediction_set_id):
'''Returns a summary of the prediction job statuses for the prediction set.'''
return dict((x, y) for x, y in self.get_session().query(self.PredictionTable.Status, func.count(self.PredictionTable.Status)).filter(self.PredictionTable.PredictionSet == prediction_set_id).group_by(self.PredictionTable.Status))
def get_session(self, new_session = False, autoflush = True, autocommit = False, utf = False):
return self.importer.get_session(new_session = new_session, autoflush = autoflush, autocommit = autocommit, utf = utf)
@informational_job
def get_prediction_ids(self, prediction_set_id):
'''Returns the list of Prediction IDs associated with the PredictionSet.'''
self._assert_prediction_set_is_correct_type(prediction_set_id)
return [r.ID for r in self.get_session().query(self.PredictionTable).filter(self.PredictionTable.PredictionSet == prediction_set_id)]
def _get_prediction_set_prediction_table_rows(self, prediction_set_id):
'''Returns the list of Prediction IDs associated with the PredictionSet.'''
self._assert_prediction_set_is_correct_type(prediction_set_id)
return {r.ID : r for r in self.get_session().query(self.PredictionTable).filter(self.PredictionTable.PredictionSet == prediction_set_id)}
@informational_job
def get_defined_user_datasets(self, tsession = None):
'''Return a dict detailing the defined UserDataSets, their tagged subsets (if any), and the mutagenesis counts
(i.e. the number of prediction cases) of both the user datasets and the associated tagged subsets .'''
tsession = tsession or self.get_session(new_session = True)
d = {}
user_datasets = tsession.query(dbmodel.UserDataSet).filter(dbmodel.UserDataSet.DatasetType == self._get_prediction_dataset_type())
for uds in user_datasets:
uds = row_to_dict(uds)
q = tsession.query(func.count(self._get_sqa_user_dataset_experiment_table().ID).label('MutagenesisCount')).filter(self._get_sqa_user_dataset_experiment_table().UserDataSetID == uds['ID']).one()
uds['MutagenesisCount'] = q[0]
d[uds['TextID']] = uds
subsets = {}
if self._get_user_dataset_experiment_tag_table():
for tagged_subset in tsession.query(
self._get_sqa_user_dataset_experiment_tag_table().Tag, func.count(self._get_sqa_user_dataset_experiment_tag_table().Tag).label('MutagenesisCount')).filter(and_(
self._get_sqa_user_dataset_experiment_table().ID == self._get_sqa_user_dataset_experiment_tag_table_udsid(),
self._get_sqa_user_dataset_experiment_table().UserDataSetID == uds['ID'])).group_by(self._get_sqa_user_dataset_experiment_tag_table().Tag):
subsets[tagged_subset[0]] = dict(MutagenesisCount = tagged_subset[1])
uds['Subsets'] = subsets
return d
@informational_job
def get_user_dataset_experiments(self, tsession, user_dataset_name, tagged_subset = None):
'''Returns a list of UserDataSet experiment records for the given user dataset.'''
udse = self._get_sqa_user_dataset_experiment_table()
udse_tag = self._get_sqa_user_dataset_experiment_tag_table()
if tagged_subset:
return tsession.query(udse).filter(and_(
udse.ID == self._get_sqa_user_dataset_experiment_tag_table_udsid(),
udse.UserDataSetID == dbmodel.UserDataSet.ID,
dbmodel.UserDataSet.TextID == user_dataset_name,
udse_tag.Tag == tagged_subset))
else:
return tsession.query(udse).filter(and_(
udse.UserDataSetID == dbmodel.UserDataSet.ID,
dbmodel.UserDataSet.TextID == user_dataset_name))
@informational_job
def get_user_dataset_experiment_details(self, user_dataset_experiment_id, user_dataset_id = None):
'''Returns all the data relating to a user dataset experiment.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_dataset_experiment_details(self, dataset_experiment_id, dataset_id = None):
'''Returns the experimental data relating to a dataset experiment.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def export_dataset_to_json(self, dataset_id):
'''Returns the dataset information in JSON format.'''
return json_dumps(self._export_dataset(dataset_id))
@informational_job
def export_dataset_to_csv(self, dataset_id):
'''Returns the dataset information in CSV format.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_predictions_experimental_details(self, prediction_id, userdatset_experiment_ids_to_subset_ddgs = None, include_files = False, reference_ids = set(), include_experimental_data = True):
'''Returns a dict containing the experimental details for the Prediction. This is what is used by export_prediction_cases_to_json etc.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_experimental_ddgs_by_analysis_set(self, user_dataset_experiment_id = None, reference_ids = set()):
'''Returns a mapping from UserPPDataSetExperimentIDs to dicts mapping analysis Subsets to a dicts containing the
record identifier triple (subset, section, record number), the experimental DDG values, the mean of those values,
and whether the values / one of the values are derived from other measurements e.g.
23 : {
'BeAtMuSiC' : {'Cases' : set([('BeAtMuSiC', 'Main', 1408L)]),
'DDGs' : [{'IsDerivedValue': 0L,
'Value': 2.9802478611},
{'IsDerivedValue': 0L,
'Value': 2.1978328374}],
'IsDerivedValue' : 0L,
'MeanDDG' : 2.5890403492500003},
'SKEMPI' : {'Cases' : set([('SKEMPI', 'Non-derivative', 1L)]),
'DDGs' : [{'IsDerivedValue': 0L, 'Value': 2.9802478611},
{'IsDerivedValue': 0L, 'Value': 2.1978328374}],
'IsDerivedValue' : 0L,
'MeanDDG' : 2.5890403492500003},
'ZEMu' : {'Cases' : set([('ZEMu', 'Main', 1144L)]),
'DDGs' : [{'IsDerivedValue': 0L, 'Value': 2.1978328374}],
'IsDerivedValue' : 0L,
'MeanDDG' : 2.1978328374}}
...
This can be used to: i) generate histograms showing the spread of experimental values for a dataset; or
ii) to add columns to an analysis dataframe so that, once created, it can be analyzed over multiple analysis sets.
'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_prediction_set_case_details(self, prediction_set_id, retrieve_references = True, include_experimental_data = True, prediction_table_rows_cache = None):
'''Returns a dict containing the case information for prediction cases in the prediction set with a structure
expected by the analysis class.'''
# Read the Prediction details
reference_ids = set()
prediction_ids = self.get_prediction_ids(prediction_set_id)
userdatset_experiment_ids_to_subset_ddgs = {}
if include_experimental_data:
userdatset_experiment_ids_to_subset_ddgs = self.get_experimental_ddgs_by_analysis_set(reference_ids = reference_ids)
prediction_cases = {}
for prediction_id in prediction_ids:
UserDataSetExperimentID = self._get_sqa_predictions_user_dataset_experiment_id(prediction_table_rows_cache[prediction_id])
experimental_details = self.get_predictions_experimental_details(prediction_id, userdatset_experiment_ids_to_subset_ddgs, include_experimental_data = include_experimental_data)
experimental_details['PredictionID'] = prediction_id
prediction_cases[UserDataSetExperimentID] = experimental_details
references = {}
if retrieve_references:
for reference_id in sorted(reference_ids):
references[reference_id] = self.get_publication(reference_id)
return dict(
Data = prediction_cases,
References = references,
PredictionSet = self.get_prediction_set_details(prediction_set_id)
)
@informational_job
def export_prediction_cases_to_json(self, prediction_set_id, retrieve_references = True):
'''A JSON wrapper to get_prediction_set_case_details.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
def _export_dataset(self, dataset_id):
'''Returns a dict containing the dataset information.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
###########################################################################################
## Prediction creation/management layer
##
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
@job_creator
def add_prediction_set(self, prediction_set_id, halted = True, priority = 5, batch_size = 40,
allow_existing_prediction_set = False, contains_protein_stability_predictions = True, contains_binding_affinity_predictions = False,
series_name = None, series_color = 'ff0000', series_alpha = 1.0, description = None):
'''Adds a new PredictionSet (a construct used to group Predictions) to the database.
If a PredictionSet is halted then running schedulers will not kick off the jobs. Otherwise, they will be queued
depending on the priority of the PredictionSet (higher numbers mean higher priority).
batch_size defines the number of jobs to be added as once as an array job.
Returns True if a PredictionSet with the same ID did not previously exist.
The priority and batch size can be modified while a scheduler is running and will affect the next round of
predictions to be queued.
Raises an exception or returns False otherwise depending on the value of allow_existing_prediction_set.'''
if halted:
Status = 'halted'
else:
Status = 'active'
existing_record = self.DDG_db.execute_select('SELECT * FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))
if self.prediction_set_exists(prediction_set_id):
if allow_existing_prediction_set == False:
raise Exception('The PredictionSet %s already exists.' % prediction_set_id)
else:
return False
d = dict(
ID = prediction_set_id,
Status = Status,
Priority = priority,
ProteinStability = contains_protein_stability_predictions,
BindingAffinity = contains_binding_affinity_predictions,
BatchSize = batch_size,
SeriesName = series_name,
SeriesColor = series_color,
SeriesAlpha = series_alpha,
Description = description,
)
self.DDG_db.insertDictIfNew("PredictionSet", d, ['ID'], locked = False)
return True
def prediction_set_exists(self, prediction_set_id):
existing_record = self.DDG_db.execute_select('SELECT * FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))
if len(existing_record) > 0:
assert(len(existing_record) == 1)
return True
else:
return False
@job_creator
def destroy_prediction_set(self, prediction_set_id):
'''This function removes the PredictionSet from the database.
THIS CANNOT BE UNDONE.
For safety, we should only allow PredictionSets with no corresponding scores to be removed.
It fits into the job_creator category since usually these empty PredictionSets will have been created while
setting up a job.'''
can_be_deleted = self.DDG_db.execute_select('SELECT CanBeDeleted FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))
if len(can_be_deleted) == 0:
raise colortext.Exception('The prediction set "%s" does not exist.' % prediction_set_id)
elif can_be_deleted[0]['CanBeDeleted'] == 0:
raise colortext.Exception('The prediction set "%s" is not allowed to be deleted. Change the CanBeDeleted property on its record first.' % prediction_set_id)
params = (self._get_prediction_table(), self._get_prediction_structure_scores_table())
qry = 'SELECT COUNT({0}.ID) AS NumRecords FROM {0} INNER JOIN {1} ON {0}.ID={1}.{0}ID WHERE PredictionSet=%s'.format(*params)
existing_scores = self.DDG_db.execute_select(qry, parameters=(prediction_set_id,))
if existing_scores[0]['NumRecords'] > 0:
raise colortext.Exception('Cannot remove a prediction set with associated scores.')
qry = 'SELECT COUNT(ID) AS NumRecords FROM {0} WHERE Status <> "queued" AND PredictionSet=%s'.format(*params)
jobs_in_flux = self.DDG_db.execute_select(qry, parameters=(prediction_set_id,))
if jobs_in_flux[0]['NumRecords'] > 0:
raise colortext.Exception('Cannot remove a prediction set unless all jobs are set as "queued".')
# Use a transaction to prevent a partial deletion
self.DDG_db._get_connection()
con = self.DDG_db.connection
try:
with con:
cur = con.cursor()
# Delete the associated file records
delete_files_qry = 'DELETE {0}File FROM {0}File INNER JOIN {0} ON {0}.ID={0}File.{0}ID WHERE PredictionSet=%s'.format(*params)
cur.execute(delete_files_qry, (prediction_set_id, ))
# Delete the predictions
delete_predictions_qry = 'DELETE FROM {0} WHERE PredictionSet=%s'.format(*params)
cur.execute(delete_predictions_qry, (prediction_set_id, ))
cur.execute('DELETE FROM PredictionSet WHERE ID=%s', (prediction_set_id, ))
except Exception, e:
raise colortext.Exception('An exception occurred removing the PredictionSet from the database: "%s".\n%s' % (str(e), traceback.format_exc()))
@job_creator
def start_prediction_set(self, PredictionSetID):
'''Sets the Status of a PredictionSet to "active".'''
self._set_prediction_set_status(PredictionSetID, 'active')
@job_creator
def stop_prediction_set(self, PredictionSetID):
'''Sets the Status of a PredictionSet to "halted".'''
self._set_prediction_set_status(PredictionSetID, 'halted')
@job_creator
def add_job(self, *args, **kwargs):
'''Add a single prediction job to a prediction set. This should not typically be called - add_prediction_run
is generally what should be called instead.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def add_jobs_by_pdb_id(self, *args, **kwargs):
''' This function adds predictions for all Experiments corresponding to pdb_ID to the specified prediction set.
This is useful for custom runs e.g. when we are using the DDG scheduler for design rather than for benchmarking.
Variants of this function were used before for CypA and ubiquitin runs.
This is currently unimplemented but ask Shane if we need this functionality again.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
def _add_prediction_run_preconditions(self, tsession, prediction_set_id, user_dataset_name, tagged_subset):
'''Check to make sure that the prediction set, user dataset, and optional tagged subset make sense for this API.
Returns the set of allowed_user_datasets.
'''
prediction_set = get_single_record_from_query(tsession.query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == prediction_set_id))
if not prediction_set:
raise colortext.Exception('The prediction set "%s" does not exist in the database.' % prediction_set_id)
elif getattr(prediction_set, self._get_prediction_type()) != 1:
raise colortext.Exception('The prediction set "%s" is not the correct type ("%s") for this API.' % (prediction_set_id, self._get_prediction_type()))
allowed_user_datasets = self.get_defined_user_datasets(tsession)
if user_dataset_name not in allowed_user_datasets:
raise colortext.Exception('The user dataset "%s" does not exist in the database.' % user_dataset_name)
if tagged_subset and tagged_subset not in allowed_user_datasets[user_dataset_name]['Subsets']:
raise colortext.Exception('The tagged subset "%s" of user dataset "%s" does not exist in the database.' % (tagged_subset, user_dataset_name))
return allowed_user_datasets
@job_creator
def add_prediction_run(self, *args, **kwargs):
'''Adds all jobs corresponding to a user dataset e.g. add_prediction_run("my first run", "AllBindingAffinityData", tagged_subset = "ZEMu").'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def merge_prediction_run(self, from_prediction_set_id, to_prediction_set_id, create_if_does_not_exist = True, series_color = 'ff0000'):
'''Adds all of the jobs from from_prediction_set_id to to_prediction_set_id.
When to_prediction_set_id is empty, this function makes a clone of from_prediction_set_id.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def add_job_by_user_dataset_record(self, *args, **kwargs):
'''Uses the UserDataSet record to get most of the information needed to set up the job e.g. PDB complex, mutagenesis details.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def clone_prediction_run(self, existing_prediction_set, new_prediction_set, *args, **kwargs):
'''add_prediction_run sets up a full run of dataset predictions but is slow as it needs to perform a lot of
calculations and parsing. If you want to test the same dataset with slightly different parameters (e.g. a
different protocol) then these calculations can be reused which reduces the overhead considerably.
clone_prediction_run was written with this in mind. It copies the list of predictions and their setup (input
files etc.) from an existing prediction set to an empty prediction set.'''
raise Exception('not implemented yet')
@job_creator
def add_development_protocol_command_lines(self, development_protocol_id):
'''Possibly temporary function used to add protocol command lines for methods in development.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
#== Input file generation API ===========================================================
#
# This part of the API is responsible for creating input files for predictions
@job_input
def create_resfile(self, prediction_id):
'''This function returns the resfile content for the prediction. It is usually not called directly by the user but
is available for convenience and debugging.'''
# todo: is this being used?
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_input
def create_mutfile(self, prediction_id):
'''This function returns the mutfile content for the prediction. It is usually not called directly by the user but
is available for convenience and debugging.'''
# todo: is this being used?
raise Exception('This function needs to be implemented by subclasses of the API.')
#== Job execution/completion API ===========================================================
#
# This part of the API is responsible for starting jobs and setting them as failed or
# completed
@job_execution
def get_queued_jobs(self, prediction_set_id, order_by = 'Cost', order_order_asc = False, include_files = True, truncate_content = None):
'''An iterator to return the details of the queued prediction records in this prediction set.
An exception is raised if the prediction set is halted.
Assuming Cost is filled in and is representative of the expected runtime, it makes sense to request jobs ordered
by Cost and order_order_asc = False rather than by ID as longer jobs can then be kicked off before shorter jobs.
Usage:
for prediction_record in ppi_api.get_queued_jobs(prediction_set_id, include_files = True, truncate_content = 30):
pprint.pprint(prediction_record)
'''
if self.get_prediction_set_details(prediction_set_id)['Status'] == 'halted':
raise Exception('The prediction set is halted so no job details can be returned.')
self._assert_prediction_set_exists(prediction_set_id)
for job_id in self.get_queued_job_list(prediction_set_id, order_by = order_by, order_order_asc = order_order_asc):
self._get_job_fn_call_counter[job_id] = self._get_job_fn_call_counter.get(job_id, 0)
self._get_job_fn_call_counter[job_id] += 1
if self._get_job_fn_call_counter[job_id] > self._get_job_fn_call_counter_max:
self.DDG_db = None
self.DDG_db_utf = None
raise Exception('get_job was called %d times for this prediction. This is probably a bug in the calling code.' % self._get_job_fn_call_counter[job_id])
yield(self.get_job_details(job_id, include_files = include_files, truncate_content = truncate_content))
@job_execution
def get_queued_job_list(self, prediction_set_id, order_by = 'Cost', order_order_asc = False):
'''An iterator to return the list of queued prediction records in this prediction set.
Assuming Cost is filled in and is representative of the expected runtime, it makes sense to request jobs ordered
by Cost and order_order_asc = False rather than by ID as longer jobs can then be kicked off before shorter jobs.
Usage:
for prediction_id in ppi_api.get_queued_job_list(prediction_set_id):
print(prediction_id)
'''
assert((order_by in ['Cost', 'ID']) and isinstance(order_order_asc, bool))
if order_order_asc:
order_order_asc = 'ASC'
else:
order_order_asc = 'DESC'
params = (self._get_prediction_table(), order_by, order_order_asc)
qry = 'SELECT ID FROM {0} WHERE PredictionSet=%s AND Status="queued" ORDER BY {1} {2}'.format(*params)
results = self.DDG_db.execute_select(qry, parameters=(prediction_set_id,))
x = 0
while x < len(results):
yield results[x]['ID']
x += 1
@job_execution
def start_job(self, prediction_id, prediction_set_id):
'''Sets the job status to "active". prediction_set must be passed and is used as a sanity check.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_execution
def get_max_number_of_cluster_jobs(self, prediction_set_id, priority):
'''Returns the maximum number of cluster jobs that schedulers should run for this interface.'''
return self.DDG_db.execute_select('SELECT Value FROM _DBCONSTANTS WHERE VariableName="MaxClusterJobs"')['Value']
def _assert_prediction_set_exists(self, prediction_set_id):
if len(self.DDG_db.execute_select('SELECT * FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))) != 1:
raise Exception('The PredictionSet %s does not exist.' % prediction_set_id)
@job_execution
def alter_prediction_set_priority(self, prediction_set_id, priority):
'''Modify the priority for a PredictionSet. Higher values give the PredictionSet more priority over other running PredictionSets.'''
priority = int(priority)
assert(priority > 0)
self._assert_prediction_set_exists(prediction_set_id)
self.DDG_db.execute_select('UPDATE PredictionSet SET Priority=%s WHERE ID=%s', parameters=(priority, prediction_set_id,))
@job_execution
def alter_prediction_set_batch_size(self, prediction_set_id, batch_size):
'''Modify the batch size for a PredictionSet. The batch size is the number of jobs which will be submitted together
during subsequent job submissions.'''
batch_size = int(batch_size)
assert(batch_size > 0)
self._assert_prediction_set_exists(prediction_set_id)
self.DDG_db.execute_select('UPDATE PredictionSet SET BatchSize=%s WHERE ID=%s', parameters=(batch_size, prediction_set_id,))
@job_execution
def set_job_temporary_protocol_field(self, prediction_id, prediction_set_id, temporary_protocol_field):
'''Possibly temporary function which sets fields in the temporary protocol field.'''
raise Exception('not implemented yet')
@job_completion
def fail_job(self, prediction_id, prediction_set, maxvmem, ddgtime, errors = None):
'''Sets the job status to "failed". prediction_set must be passed and is used as a sanity check.'''
self._check_prediction(prediction_id, prediction_set)
self.DDG_db.execute('UPDATE {0} SET Status="failed", maxvmem=%s, DDGTime=%s, Errors=%s WHERE ID=%s'.format(self._get_prediction_table()), parameters=(maxvmem, ddgtime, errors, prediction_id,))
@job_completion
def extract_data(self, prediction_set_id, root_directory = None, force = False, score_method_id = None):
'''Extracts the data for the prediction set run and stores it into the database.
For all PredictionIDs associated with the PredictionSet:
- looks for a subdirectory of root_directory with the same name as the ID e.g. /some/folder/21412
- call extract_data_for_case
Note: we do not use a transaction at this level. We could but it may end up being a very large transaction
depending on the dataset size. It seems to make more sense to me to use transactions at the single prediction
level i.e. in extract_data_for_case
root_directory defaults to sys_settings.[api].prediction_data_path.
If force is True then existing records should be overridden.
'''
root_directory = root_directory or self.prediction_data_path
prediction_ids = self.get_prediction_ids(prediction_set_id)
for prediction_id in prediction_ids:
job_path = os.path.join(root_directory, prediction_id)
if not os.path.exists(job_path):
raise Exception('The folder {0} for Prediction #{1} does not exist.'.format(job_path, prediction_id))
for prediction_id in prediction_ids:
self.extract_data_for_case(prediction_id, root_directory = root_directory, force = force, score_method_id = score_method_id)
@job_completion
def extract_data_for_case(self, prediction_id, root_directory = None, score_method_id = None, force = False):
'''Extracts the data for the prediction case (e.g. by processing stdout) and stores it in the Prediction*StructureScore
table.
The scores are returned to prevent the caller having to run another query.
If force is False and the expected number of records for the case exists in the database, these are returned.
Otherwise, the data are extracted, stored using a database transaction to prevent partial storage, and returned.
Note:
We use a lot of functions here: extract_data_for_case, parse_prediction_scores, store_scores.
This may seem like overkill but I think it could allow us to reuse a lot of the code since the tables for
PredictionPPIStructureScore and PredictionStructureScore are very similar (but are different since the underlying
foreign tables PredictionPPI and Prediction are at least currently separate).
parse_prediction_scores only returns dicts for database storage so it can be useful for debugging during development.
store_scores stores scores in the database (passed as a list of dicts) but does not care from where they came.
extract_data_for_case calls parse_prediction_scores to get the scores and the store_scores to commit them to the database.
'''
root_directory = root_directory or self.prediction_data_path # defaults to sys_settings.[api].prediction_data_path
prediction_set = self.get_job_details(prediction_id, include_files = False)['PredictionSet']
# todo: implement force behavior
# Create a list of dicts for the PredictionPPIStructureScore table
scores = self.parse_prediction_scores(prediction_id, root_directory = root_directory, score_method_id = score_method_id)
# Store the dicts as PredictionPPIStructureScore records
if len(scores) > 0:
self.store_scores(prediction_set, prediction_id, scores)
return scores
@job_completion
def parse_prediction_scores(self, prediction_id, root_directory = None, score_method_id = None):
'''Returns a list of dicts suitable for database storage e.g. PredictionStructureScore or PredictionPPIStructureScore records.'''
raise Exception('Abstract method. This needs to be overridden by a subclass. Returns a dict suitable for database storage e.g. PredictionStructureScore or PredictionPPIStructureScore records.')
@job_completion
def store_scores_for_many_predictions(self, prediction_set, scores, safe = True, prediction_structure_scores_table = None, prediction_id_field = None):
'''Stores scores for many predictions.
scores should be a list of dicts suitable for database storage e.g. PredictionStructureScore or
PredictionPPIStructureScore records.
'''
prediction_id_field = prediction_id_field or self._get_prediction_id_field()
prediction_structure_scores_table = prediction_structure_scores_table or self._get_prediction_structure_scores_table()
if safe:
# Sanity checks
for score in scores:
if prediction_id_field not in score:
raise Exception('The score record is missing a {0} field: {1}.'.format(prediction_id_field, pprint.pformat(score)))
self._check_prediction(score[prediction_id_field], prediction_set)
con = self.DDG_db.connection
cursor = con.cursor()
sql_query = None
if safe:
params_to_insert = set()
else:
params_to_insert = []
for score in scores:
if safe:
sql, params, record_exists = self.DDG_db.create_insert_dict_string(prediction_structure_scores_table, score, PKfields = [prediction_id_field, 'ScoreMethodID', 'ScoreType', 'StructureID'], check_existing = True)
else:
sql, params, record_exists = self.DDG_db.create_insert_dict_string(prediction_structure_scores_table, score, PKfields = [prediction_id_field, 'ScoreMethodID', 'ScoreType', 'StructureID'], check_existing = False)
if sql_query:
assert( sql == sql_query )
else:
sql_query = sql
if safe:
if params in params_to_insert or record_exists:
print params
print params_list
raise Exception('Duplicate params')
params_to_insert.add(params)
else:
params_to_insert.append(params)
with con:
db_cursor = con.cursor()
if safe:
db_cursor.executemany(sql_query, [x for x in params_to_insert])
else:
# print params_to_insert
db_cursor.executemany(sql_query, params_to_insert)
def remove_scores(self, prediction_set, prediction_id, score_method_id, prediction_structure_scores_table = None, prediction_id_field = None, test_mode = False):
prediction_structure_scores_table = prediction_structure_scores_table or self._get_prediction_structure_scores_table()
prediction_id_field = prediction_id_field or self._get_prediction_id_field()
if score_method_id != None:
query = 'DELETE FROM %s WHERE %s=%s AND ScoreMethodID=%s' % (
prediction_structure_scores_table, prediction_id_field, str(prediction_id), str(score_method_id)
)
else:
query = 'DELETE FROM %s WHERE %s=%s' % (
prediction_structure_scores_table, prediction_id_field, str(prediction_id)
)
if test_mode:
print query
self.DDG_db.execute(query)
@job_completion
def store_scores(self, prediction_set, prediction_id, scores, prediction_structure_scores_table = None, prediction_id_field = None):
'''Stores scores for one prediction.
scores should be a list of dicts suitable for database storage e.g. PredictionStructureScore or
PredictionPPIStructureScore records.
This function uses a transaction so if any of the insertions fail then they are all rolled back.
The default scores table and prediction_id_field can be (evilly) overridden to put scores in the wrong table
'''
if prediction_set:
# Only check prediction is in prediction set if prediction set is passed in
self._check_prediction(prediction_id, prediction_set)
if prediction_id_field == None:
# Only check for self-consistency if we are not (evilly) overriding everything that is good in the world
self._check_scores_for_main_fields(scores, prediction_id)
if prediction_structure_scores_table == None:
# Only check for self-consistency if we are not (evilly) overriding everything our forefathers died for
self._check_score_fields(scores)
prediction_structure_scores_table = prediction_structure_scores_table or self._get_prediction_structure_scores_table()
prediction_id_field = prediction_id_field or self._get_prediction_id_field()
try:
con = self.DDG_db.connection
with con:
db_cursor = con.cursor()
for score in scores:
sql, params, record_exists = self.DDG_db.create_insert_dict_string(prediction_structure_scores_table, score, PKfields = [prediction_id_field, 'ScoreMethodID', 'ScoreType', 'StructureID'], check_existing = True)
if not record_exists:
db_cursor.execute(sql, params)
except Exception, e:
print sql, params, record_exists
raise colortext.Exception('Failed to insert scores for Prediction #{0}: "{1}".\n{2}'.format(prediction_id, str(e), traceback.format_exc()))
@job_completion
def complete_job(self, prediction_id, prediction_set, scores, maxvmem, ddgtime):
'''Sets a job to 'completed' and stores scores. prediction_set must be passed and is used as a sanity check.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
###########################################################################################
## Prediction results layer
##
## This part of the API for returning data about completed predictions.
###########################################################################################
@job_results
def get_ddg_scores_per_structure(self, prediction_id):
'''Returns the list of all DDG scores for a prediction_id. NOTE: Consider allowing the score method to be passed as a parameter.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@job_results
def get_prediction_data_path(self):
'''Returns the file server path to the where archived prediction data is stored.'''
return self.prediction_data_path
@job_results
def get_job_data(self, prediction_id):
'''Returns (in memory) the contents of the zip file corresponding to the prediction.'''
job_data_path = os.path.join(self.prediction_data_path, '%d.zip' % prediction_id)
if os.path.exists(job_data_path):
return read_file(job_data_path, binary = True)
@job_results
def write_job_archive_to_file(self, prediction_id, output_filename):
'''Writes the contents of the zip file corresponding to the prediction.'''
job_data_path = os.path.join(self.prediction_data_path, '%d.zip' % prediction_id)
assert(output_filename != job_data_path) # do not overwrite the existing file or allow to extract in place
write_file(output_filename, self.get_job_data(prediction_id))
@job_results
def write_job_data_to_disk(self, prediction_id, output_path):
'''Saves the job output for the prediction to the specified path.'''
assert(os.path.exists(output_path))
assert(output_path != self.prediction_data_path) # do not overwrite the existing file or allow to extract in place
archive = self.get_job_data(prediction_id)
write_file(os.path.join(output_path, '%d.zip' % prediction_id), archive, 'wb')
p = Popen(output_path, ['unzip', '%d.zip' % prediction_id])
os.remove(os.path.join(output_path, '%d.zip' % prediction_id))
if p.errorcode != 0:
raise colortext.Exception(p.stderr)
else:
colortext.warning(p.stdout)
@job_results
def extract_sge_job_stdout_from_archive(self, prediction_id):
'''Returns the stdout files created during the prediction.
The files are returned as a dict mapping with the type of output file (e.g. ddg_monomer step) to the content
of the stdout files.
'''
# Retrieve and unzip results in memory
archive = self.get_job_data(prediction_id)
zipped_content = zipfile.ZipFile(BytesIO(archive), 'r', zipfile.ZIP_DEFLATED)
try:
stdout_file_names = {}
stdout_file_list = [l for l in sorted(zipped_content.namelist()) if (l.find('cmd.o') != -1)]
for f in stdout_file_list:
tokens = os.path.split(f)
assert(tokens[0].isdigit())
title = tokens[1].split('_')[0]
assert(stdout_file_names.get(title) == None)
stdout_file_names[title] = f
stdout_files = {}
for stdout_type, filename in stdout_file_names.iteritems():
stdout_files[stdout_type] = zipped_content.open(filename, 'r').read()
zipped_content.close()
return stdout_files
except Exception, e:
zipped_content.close()
raise Exception(str(e))
###########################################################################################
## Analysis layer
##
## This part of the API is responsible for running analysis on completed predictions
###########################################################################################
@analysis_api
def get_top_x_scores(self, prediction_id, score_method_id, score_type, x, component = 'total', order_by = 'ASC'):
'''get_top_x_ddg_stability'''
results = self.DDG_db.execute_select('SELECT * FROM {0} WHERE {1}=%s AND ScoreMethodID=%s AND ScoreType=%s ORDER BY {2} {3}'.format(self._get_prediction_structure_scores_table(), self._get_prediction_id_field(), component, order_by), parameters=(prediction_id, score_method_id, score_type))
if len(results) < x:
raise Exception('The top {0} best scores were requested but only {1} results are stored in the database.'.format(x, len(results)))
results = results[:x]
return [{
'PredictionID' : r[self._get_prediction_id_field()],
'ScoreMethodID' : score_method_id,
'ScoreType' : score_type,
'StructureID' : r['StructureID'],
component : r[component],
} for r in results]
@analysis_api
def get_prediction_scores(self, prediction_id, expectn = None):
'''Returns the scores for the prediction using nested dicts with the structure:
ScoreMethodID -> StructureID -> ScoreType -> database record
'''
cache_id = (prediction_id, expectn)
if cache_id in self.prediction_scores_cache:
return self.prediction_scores_cache[cache_id]
scores = {}
for r in self.DDG_db.execute_select('SELECT * FROM {0} WHERE {1}=%s'.format(self._get_prediction_structure_scores_table(), self._get_prediction_id_field()), parameters=(prediction_id,)):
ScoreMethodID = r['ScoreMethodID']
ScoreType = r['ScoreType']
StructureID = r['StructureID']
if StructureID == -1:
StructureID = 'None' # usually this indicates an overall or aggregate value
scores[ScoreMethodID] = scores.get(ScoreMethodID, {})
scores[ScoreMethodID][StructureID] = scores[ScoreMethodID].get(StructureID, {})
scores[ScoreMethodID][StructureID][ScoreType] = r
del scores[ScoreMethodID][StructureID][ScoreType]['ScoreMethodID']
del scores[ScoreMethodID][StructureID][ScoreType]['StructureID']
del scores[ScoreMethodID][StructureID][ScoreType]['ScoreType']
del scores[ScoreMethodID][StructureID][ScoreType][self._get_prediction_id_field()]
del scores[ScoreMethodID][StructureID][ScoreType]['ID']
if expectn != None:
for score_method_id, score_method_scores in scores.iteritems():
num_cases = 0
for k in score_method_scores.keys():
if isinstance(k, int) or isinstance(k, long):
num_cases += 1
if num_cases < expectn:
print 'Expected scores for at least {0} runs with score method {1}; found {2}. Prediction id: {3}.'.format(expectn, score_method_id, num_cases, prediction_id)
self.prediction_scores_cache[cache_id] = scores
return scores
@analysis_api
def get_top_x_ddg(self, prediction_id, score_method_id, top_x = 3, expectn = None):
'''Returns the TopX value for the prediction. Typically, this is the mean value of the top X predictions for a
case computed using the associated Score records in the database.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@analysis_api
def get_existing_analysis(self, prediction_set_id):
'''Returns the summary statistics for all existing dataframes in the database.
Unlike get_analysis_dataframe, this function does not create any dataframes.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@analysis_api
def get_analysis_dataframe(self, prediction_set_id,
experimental_data_exists = True,
prediction_set_series_name = None, prediction_set_description = None, prediction_set_credit = None,
prediction_set_color = None, prediction_set_alpha = None,
use_existing_benchmark_data = True,
include_derived_mutations = False,
use_single_reported_value = False,
ddg_analysis_type = None,
take_lowest = 3,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
score_method_id = None,
expectn = None,
allow_failures = False,
extract_data_for_case_if_missing = True,
):
'''This function uses experimental data from the database and prediction data from the Prediction*StructureScore
table to build a pandas dataframe and store it in the database. See .analyze for an explanation of the
parameters.
The dataframes mostly contain redundant data so their storage could be seen to break a key database design
principal. However, we store the dataframe in the database as it can take a while to build it from scratch and
pre-built dataframes can be used to run quick analysis, for rapid development of the analysis methods, or to
plug into webservers where responsiveness is important.
If use_existing_benchmark_data is True and the dataframe already exists then it is returned as a BenchmarkRun object.
Otherwise, it is built from the Prediction*StructureScore records.
If the Prediction*StructureScore records do not exist, this function falls back into extract_data_for_case
to generate them in which case root_directory needs to be specified (this is the only use for the root_directory
parameter).
'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@analysis_api
def get_prediction_data(self, prediction_id, score_method_id, main_ddg_analysis_type, expectn = None, extract_data_for_case_if_missing = True, root_directory = None, prediction_table_rows_cache = None, dataframe_type = None):
'''Returns a dictionary with values relevant to predictions e.g. binding affinity, monomeric stability.'''
prediction_data = {}
# Add memory and runtime
if prediction_table_rows_cache != None:
prediction = prediction_table_rows_cache[prediction_id]
prediction_data['UserDataSetExperimentID'] = self._get_sqa_predictions_user_dataset_experiment_id(prediction)
if prediction.DDGTime == None:
prediction_data['RunTime'] = 0.0
else:
prediction_data['RunTime'] = float(prediction.DDGTime)
if prediction.maxvmem == None:
prediction_data['MaxMemory'] = 0.0
else:
prediction_data['MaxMemory'] = float(prediction.maxvmem)
else:
raise Exception("Not implemented. Write a function to get the data only for this prediction_id here")
return self._get_prediction_data(prediction_id, score_method_id, main_ddg_analysis_type, expectn = expectn, extract_data_for_case_if_missing = extract_data_for_case_if_missing, root_directory = root_directory, prediction_data = prediction_data, dataframe_type = dataframe_type)
def _get_prediction_data(self, prediction_id, score_method_id, main_ddg_analysis_type, top_x = 3, expectn = None, extract_data_for_case_if_missing = True, root_directory = None):
'''Returns a dictionary with values relevant to predictions e.g. binding affinity, monomeric stability.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
def _get_analysis_dataframe(self, benchmark_run_class,
dataframe_type = None,
prediction_set_id = None,
experimental_data_exists = True,
prediction_set_series_name = None, prediction_set_description = None, prediction_set_credit = None,
ddg_analysis_type = None,
prediction_set_color = None, prediction_set_alpha = None,
use_existing_benchmark_data = True,
include_derived_mutations = False,
use_single_reported_value = False,
take_lowest = 3,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
score_method_id = None,
expectn = None,
allow_failures = False,
extract_data_for_case_if_missing = True,
debug = False,
restrict_to = set(),
remove_cases = set(),
):
'''This 'private' function does most of the work for get_analysis_dataframe.'''
if take_lowest:
assert( ddg_analysis_type == None )
ddg_analysis_type = 'DDG_Top%d' % take_lowest
else:
assert( ddg_analysis_type != None and take_lowest == None )
if ddg_analysis_type.startswith( 'DDG_Top' ):
take_lowest = int( ddg_analysis_type[7:] )
assert(dataframe_type != None and prediction_set_id != None)
hdf_store_blob = None
if use_existing_benchmark_data:
if take_lowest == None:
hdf_store_blob = self.DDG_db.execute_select('''
SELECT PandasHDFStore FROM AnalysisDataFrame WHERE
PredictionSet=%s AND DataFrameType=%s AND ContainsExperimentalData=%s AND ScoreMethodID=%s AND UseSingleReportedValue=%s AND TopX IS NULL AND BurialCutoff=%s AND
StabilityClassicationExperimentalCutoff=%s AND StabilityClassicationPredictedCutoff=%s AND
IncludesDerivedMutations=%s AND DDGAnalysisType=%s''', parameters=(
prediction_set_id, dataframe_type, experimental_data_exists, score_method_id, use_single_reported_value, burial_cutoff,
stability_classication_experimental_cutoff, stability_classication_predicted_cutoff, include_derived_mutations, ddg_analysis_type))
else:
# KAB TODO: to ask Shane - why does passing None not correctly change to IS NULL?
hdf_store_blob = self.DDG_db.execute_select('''
SELECT PandasHDFStore FROM AnalysisDataFrame WHERE
PredictionSet=%s AND DataFrameType=%s AND ContainsExperimentalData=%s AND ScoreMethodID=%s AND UseSingleReportedValue=%s AND TopX=%s AND BurialCutoff=%s AND
StabilityClassicationExperimentalCutoff=%s AND StabilityClassicationPredictedCutoff=%s AND
IncludesDerivedMutations=%s AND DDGAnalysisType=%s''', parameters=(
prediction_set_id, dataframe_type, experimental_data_exists, score_method_id, use_single_reported_value, take_lowest, burial_cutoff,
stability_classication_experimental_cutoff, stability_classication_predicted_cutoff, include_derived_mutations, ddg_analysis_type))
if hdf_store_blob:
assert(len(hdf_store_blob) == 1)
mem_zip = StringIO.StringIO()
mem_zip.write(hdf_store_blob[0]['PandasHDFStore'])
mem_zip.seek(0)
hdf_store_blob = gzip.GzipFile(fileobj = mem_zip, mode='rb').read()
if not(use_existing_benchmark_data and hdf_store_blob):
# Create this cache if we are going to end up using it in if statements below
prediction_table_rows_cache = self._get_prediction_set_prediction_table_rows(prediction_set_id)
else:
prediction_table_rows_cache = None
# This dict is similar to dataset_cases in the benchmark capture (dataset.json)
prediction_set_case_details = None
prediction_ids = []
if not(use_existing_benchmark_data and hdf_store_blob):
print('Retrieving the associated experimental data for the user dataset.')
prediction_set_case_details = self.get_prediction_set_case_details(prediction_set_id, retrieve_references = True, include_experimental_data = experimental_data_exists, prediction_table_rows_cache = prediction_table_rows_cache)
UserDataSetExperimentIDs = prediction_set_case_details['Data'].keys()
prediction_set_case_details = prediction_set_case_details['Data']
analysis_data = {}
top_level_dataframe_attributes = {}
if not(use_existing_benchmark_data and hdf_store_blob):
if extract_data_for_case_if_missing and not silent:
print('Computing the best/top/whatever values for each prediction case, extracting data if need be.')
elif not extract_data_for_case_if_missing and not silent:
print('Computing the best/top/whatever values for each prediction case; skipping missing data without attempting to extract.')
num_predictions_in_prediction_set = len(prediction_ids)
failed_cases = set()
## get_job_description(self, prediction_id)
for UserDataSetExperimentID in UserDataSetExperimentIDs:
try:
prediction_id = prediction_set_case_details[UserDataSetExperimentID]['PredictionID']
prediction_id_data = self.get_prediction_data(prediction_id, score_method_id, ddg_analysis_type, expectn = expectn, extract_data_for_case_if_missing = extract_data_for_case_if_missing, root_directory = root_directory, dataframe_type = dataframe_type, prediction_table_rows_cache = prediction_table_rows_cache)
analysis_data[UserDataSetExperimentID] = prediction_id_data
del analysis_data[UserDataSetExperimentID]['UserDataSetExperimentID']
analysis_data[UserDataSetExperimentID]['PredictionID'] = prediction_id
except FatalException, e:
raise
except PartialDataException, e:
if not allow_failures:
raise Exception('Prediction {0} has partial data. Skipping.'.format(prediction_id))
failed_cases.add(prediction_id)
except Exception, e:
raise Exception('An error occurred during the best/top/whatever computation: {0}.\n{1}'.format(str(e), traceback.format_exc()))
failed_cases.add(prediction_id)
if debug and len(analysis_data) >= 20:
break
if failed_cases:
colortext.error('Failed to determine the best/top/whatever score for {0}/{1} predictions. Continuing with the analysis ignoring these cases.'.format(len(failed_cases), len(prediction_ids)))
working_prediction_ids = sorted(set(prediction_ids).difference(failed_cases))
top_level_dataframe_attributes = dict(
num_predictions_in_prediction_set = num_predictions_in_prediction_set,
num_predictions_in_dataframe = len(working_prediction_ids),
dataframe_type = dataframe_type,
contains_experimental_data = experimental_data_exists,
)
# Only pull PDB data for cases where we have data
restrict_to_pdbs = set([prediction_set_case_details[k]['Structure']['PDBFileID'] for k in analysis_data])
prediction_set_details = self.get_prediction_set_details(prediction_set_id)
prediction_set_series_name = prediction_set_series_name or prediction_set_details['SeriesName'] or prediction_set_details['ID']
prediction_set_description = prediction_set_description or prediction_set_details['Description']
prediction_set_color = prediction_set_color or prediction_set_details['SeriesColor']
prediction_set_alpha = prediction_set_alpha or prediction_set_details['SeriesAlpha']
score_method_details = self.get_score_method_details( score_method_id = score_method_id )
additional_join_parameters = {
'score_method' : {
'short_name' : score_method_details['MethodName'],
'long_name' : '%s - %s' % (score_method_details['MethodType'], score_method_details['Authors']),
},
'prediction_set_id' : {
'short_name' : prediction_set_id,
},
'ddg_analysis_type' : {
'short_name' : ddg_analysis_type[4:],
'long_name' : ddg_analysis_type,
},
}
# Initialize the BindingAffinityBenchmarkRun object
# Note: prediction_set_case_details, analysis_data, and top_level_dataframe_attributes will not be filled in
benchmark_run = benchmark_run_class(
prediction_set_series_name,
prediction_set_case_details,
analysis_data,
contains_experimental_data = experimental_data_exists,
additional_join_parameters = additional_join_parameters,
store_data_on_disk = False,
calculate_scalar_adjustments = False,
benchmark_run_directory = None,
use_single_reported_value = use_single_reported_value,
description = prediction_set_description,
dataset_description = prediction_set_description,
credit = prediction_set_credit,
include_derived_mutations = include_derived_mutations,
generate_plots = False,
report_analysis = report_analysis,
silent = silent,
burial_cutoff = burial_cutoff,
stability_classication_x_cutoff = stability_classication_experimental_cutoff,
stability_classication_y_cutoff = stability_classication_predicted_cutoff,
use_existing_benchmark_data = False,
recreate_graphs = False,
misc_dataframe_attributes = top_level_dataframe_attributes,
restrict_to = restrict_to,
remove_cases = remove_cases,
)
if not(use_existing_benchmark_data and hdf_store_blob):
hdf_store_blob = benchmark_run.create_dataframe(pdb_data = self.get_prediction_set_pdb_chain_details(prediction_set_id, restrict_to_pdbs = restrict_to_pdbs))
d = dict(
PredictionSet = prediction_set_id,
DataFrameType = dataframe_type,
ContainsExperimentalData = experimental_data_exists,
ScoreMethodID = score_method_id,
UseSingleReportedValue = use_single_reported_value,
TopX = take_lowest,
BurialCutoff = burial_cutoff,
StabilityClassicationExperimentalCutoff = stability_classication_experimental_cutoff,
StabilityClassicationPredictedCutoff = stability_classication_predicted_cutoff,
IncludesDerivedMutations = include_derived_mutations,
DDGAnalysisType = ddg_analysis_type,
SeriesName = prediction_set_series_name,
SeriesColor = prediction_set_color,
SeriesAlpha = prediction_set_alpha,
Description = prediction_set_description,
Credit = prediction_set_credit,
DDGAnalysisTypeDescription = benchmark_run.ddg_analysis_type_description,
PandasHDFStore = hdf_store_blob,
)
self.DDG_db.execute('''DELETE FROM AnalysisDataFrame WHERE PredictionSet=%s AND DataFrameType=%s AND ContainsExperimentalData=%s AND ScoreMethodID=%s AND UseSingleReportedValue=%s AND TopX=%s AND
BurialCutoff=%s AND StabilityClassicationExperimentalCutoff=%s AND StabilityClassicationPredictedCutoff=%s AND
IncludesDerivedMutations=%s AND DDGAnalysisType=%s''',
parameters = (prediction_set_id, dataframe_type, experimental_data_exists, score_method_id, use_single_reported_value, take_lowest,
burial_cutoff, stability_classication_experimental_cutoff, stability_classication_predicted_cutoff,
include_derived_mutations, ddg_analysis_type
))
self.DDG_db.insertDictIfNew('AnalysisDataFrame', d, ['PredictionSet', 'DataFrameType', 'ContainsExperimentalData', 'ScoreMethodID', 'UseSingleReportedValue', 'TopX', 'BurialCutoff',
'StabilityClassicationExperimentalCutoff', 'StabilityClassicationPredictedCutoff',
'IncludesDerivedMutations', 'DDGAnalysisType'], locked = False)
else:
benchmark_run.read_dataframe_from_content(hdf_store_blob)
return benchmark_run
# if use_existing_benchmark_data and dataframe exists: return dataframe
# else retrieve all of the Score records from the database
# if a record does not exist:
# if root_directory then call extract_data_for_case to create an analysis dataframe and store it in the database
# store the number of complete Score records as a column in the dataframe (to indicate whether analysis is being performed on a full set of data)
#
# For Shane: this extracts the dataset_description and dataset_cases data that DDGBenchmarkManager currently takes care of in the capture.
# The analysis_data variable of DDGBenchmarkManager should be compiled via queries calls to the Prediction*StructureScore table.
def map_prediction_ids(self, first_prediction_set_id, second_prediction_set_id):
'''
Returns pairs of prediction IDs corresponding to ther same underlying UserDataSet.
Useful when input for a prediction run is based on the saved output files of another run.
'''
first_prediction_set_case_details = self.get_prediction_set_case_details(
first_prediction_set_id,
retrieve_references = False,
include_experimental_data = False,
prediction_table_rows_cache = self._get_prediction_set_prediction_table_rows(first_prediction_set_id),
)
second_prediction_set_case_details = self.get_prediction_set_case_details(
second_prediction_set_id,
retrieve_references = False,
include_experimental_data = False,
prediction_table_rows_cache = self._get_prediction_set_prediction_table_rows(second_prediction_set_id),
)
first_UserDataSetExperimentIDs = set( first_prediction_set_case_details['Data'].keys() )
second_UserDataSetExperimentIDs = set( second_prediction_set_case_details['Data'].keys() )
assert( first_UserDataSetExperimentIDs == second_UserDataSetExperimentIDs )
return_list = []
for UserDataSetExperimentID in first_UserDataSetExperimentIDs:
return_list.append( (
first_prediction_set_case_details['Data'][UserDataSetExperimentID]['PredictionID'],
second_prediction_set_case_details['Data'][UserDataSetExperimentID]['PredictionID'],
) )
return sorted( return_list )
@analysis_api
def analyze(self, prediction_set_ids,
prediction_set_series_names = {}, prediction_set_descriptions = {}, prediction_set_credits = {}, prediction_set_colors = {}, prediction_set_alphas = {},
use_published_data = False,
use_existing_benchmark_data = True, recreate_graphs = False,
include_derived_mutations = False,
expectn = 50,
use_single_reported_value = False,
take_lowest = 3,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
output_directory = None,
generate_plots = True,
report_analysis = True,
silent = False,
root_directory = None,
restrict_to = set(),
remove_cases = set(),
):
'''Runs the analyses for the specified PredictionSets and cross-analyzes the sets against each other if appropriate.
* Analysis setup arguments *
prediction_set_ids is a list of PredictionSet IDs. Each PredictionSet will be analyzed separately and appropriate
pairs will be cross-analyzed.
prediction_set_series_names, prediction_set_descriptions, and prediction_set_credits are mappings from PredictionSet IDs
to series names (in plots), descriptions, and credits respectively. These details are stored in PredictionSet so
they are optional arguments. If passed, these mappings will override the PredictionSet values in the database
which allows the user to customize the analysis reports. Likewise, prediction_set_colors and prediction_set_alphas
are mappings to series colors and transparency values for use in the plots.
use_published_data. todo: implement later. This should include any published data e.g. the Kellogg et al. data for protein stability.
use_existing_benchmark_data and recreate_graphs are data creation arguments i.e. "should we use existing data or create it from scratch?"
include_derived_mutations is used to filter out dataset cases with derived mutations.
expectn declares how many predictions we expect to see per dataset case. If the actual number is less than expectn
then a warning will be included in the analysis.
* Dataframe arguments *
use_single_reported_value is specific to ddg_monomer. If this is True then the DDG value reported by the application is used and take_lowest is ignored. This is inadvisable - take_lowest = 3 is a better default.
take_lowest AKA Top_X. Specifies how many of the best-scoring groups of structures to consider when calculating the predicted DDG value.
burial_cutoff defines what should be considered buried (DSSPExposure field). Values around 1.0 are fully exposed, values of 0.0 are fully buried. For technical reasons, the DSSP value can exceed 1.0 but usually not by much.
stability_classication_experimental_cutoff AKA x_cutoff. This defines the neutral mutation range for experimental values in kcal/mol i.e. values between -1.0 and 1.0 kcal/mol are considered neutral by default.
stability_classication_predicted_cutoff AKA y_cutoff. This defines the neutral mutation range for predicted values in energy units.
* Reporting arguments *
output_directory : The directory in which to save plots and reports.
generate_plots : if plots are not needed, setting this to False can shorten the analysis time.
report_analysis : Whether or not to print analysis to stdout.
silent = False : Whether or not anything should be printed to stdout (True is useful for webserver interaction).
'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
# colors, alpha, and default series name and descriptions are taken from PredictionSet records
# The order (if p1 before p2 then p1 will be on the X-axis in comparative plots) in comparative analysis plots is determined by the order in PredictionSets
assert(take_lowest > 0 and (int(take_lowest) == take_lowest))
assert(0 <= burial_cutoff <= 2.0)
assert(stability_classication_experimental_cutoff > 0)
assert(stability_classication_predicted_cutoff > 0)
# assert PredictionSet for PredictionSet in PredictionSets is in the database
# calls get_analysis_dataframe(options) over all PredictionSets
# if output_directory is set, save files
# think about how to handle this in-memory. Maybe return a dict like:
#"run_analyis" -> benchmark_name -> {analysis_type -> object}
#"comparative_analysis" -> (benchmark_name_1, benchmark_name_2) -> {analysis_type -> object}
# comparative analysis
# only compare dataframes with the exact same points
# allow cutoffs, take_lowest to differ but report if they do so
@analysis_api
def determine_best_pair(self, prediction_id, score_method_id = 1):
'''This returns the best wildtype/mutant pair for a prediction given a scoring method. NOTE: Consider generalising this to the n best pairs.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@analysis_api
def create_abacus_graph_for_a_single_structure(self, PredictionSet, scoring_method, scoring_type, graph_title = None, PredictionIDs = None, graph_filename = None, cached_results = None, num_datapoints = 0):
'''This function creates an abacus graph for one PDB file. It is useful when scanning all mutations at all positions
on small proteins e.g. ubiquitin to show which mutations at which positions are likely to improve the stability or
binding affinity.
The num_datapoints variable is mainly for debugging - I was tuning the resolution/DPI to fit the number of datapoints.'''
raise Exception('This should work or nearly work. Test it again when we have real data. Does it assume single point mutations?')
results = cached_results
if not results:
results = self.get_flattened_prediction_results(PredictionSet)
pdb_ids = set()
for r in results:
pdb_ids.add(r['PDBFileID'])
if len(pdb_ids) != 1:
raise Exception('This function is only meant to be called when the PredictionSet or the set of results contains records for a single structure. The set of results contains %d structures.' % len(pdb_ids))
sortable_results = {}
for r in results:
if (not PredictionIDs) or (r['PredictionID'] in PredictionIDs):
sortable_results[(json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], r['ExperimentID'])] = r
count = 0
set_of_mutations = set()
for k, r in sorted(sortable_results.iteritems()):
#if r['FlattenedMutations'].find('A E141L') != -1 and r['FlattenedMutations'].find('A S142A') != -1 and r['FlattenedMutations'].find('A L78Y') != -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
#if r['FlattenedMutations'].find('A W103M') != -1 and r['FlattenedMutations'].find('A F70Y') != -1:
# if r['FlattenedMutations'].find('A E141L') == -1 and r['FlattenedMutations'].find('A S142A') == -1 and r['FlattenedMutations'].find('A L78Y') == -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
if r['FlattenedMutations'].find('A W103M') != -1 and r['FlattenedMutations'].find('A F70Y') != -1:
if r['FlattenedMutations'].find('A E141L') == -1 and r['FlattenedMutations'].find('A S142A') == -1 and r['FlattenedMutations'].find('A L78Y') == -1:
#print('%f, %s' % (k[0], r['FlattenedMutations']))
count += 1
#A E141L, A S142A
mutations = [m for m in map(string.strip, r['FlattenedMutations'].split(',')) if m]
for m in mutations:
set_of_mutations.add((int(m.split()[1][1:-1]), m))
#if r['FlattenedMutations'].find('A L78Y') == -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
# #count += 1
pruned_data = []
for k, r in sorted(sortable_results.iteritems()):
line = []
#print(json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], r['FlattenedMutations'])
for m in sorted(set_of_mutations):
if r['FlattenedMutations'].find(m[1]) != -1:
line.append(1)
else:
line.append(0)
pruned_data.append((json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], line))
labels = [m[1].split()[1] for m in sorted(set_of_mutations)]
graph_title = graph_title or r'$\Delta\Delta$G predictions for %s (%s.%s)' % (PredictionSet, scoring_method.replace(',0A', '.0$\AA$').replace('_', ' '), scoring_type)
pruned_data = pruned_data[0:num_datapoints or len(pruned_data)]
colortext.message('Creating graph with %d datapoints...' % len(pruned_data))
number_of_non_zero_datapoints = 0
for p in pruned_data:
if 1 in p[1]:
number_of_non_zero_datapoints += 1
if number_of_non_zero_datapoints > 1:
break
if number_of_non_zero_datapoints < 2:
raise Exception('The dataset must contain at least two non-zero points.')
if graph_filename:
return self.write_abacus_graph(graph_filename, graph_title, labels, pruned_data, scoring_method, scoring_type)
else:
return self.create_abacus_graph(graph_title, labels, pruned_data, scoring_method, scoring_type)
################################################################################################
## Application layer
## These functions combine the database and prediction data with useful klab
################################################################################################
#== PyMOL API ===========================================================
@app_pymol
def create_pymol_session_in_memory(self, prediction_id, task_number, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
'''Returns (in memory) a PyMOL session for a pair of structures.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@app_pymol
def write_pymol_session(self, prediction_id, task_number, output_filepath, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
'''Writes the PyMOL session for a pair of structures to disk.'''
PSE_file_contents = self.create_pymol_session_in_memory(prediction_id, task_number, pymol_executable = pymol_executable)
write_file(output_filepath, PSE_file_contents, 'wb')
@general_data_entry
def associate_pdb_file_with_project(self, pdb_file_id, project_id, notes = None):
tsession = self.get_session(new_session = True)
record = None
try:
record = get_or_create_in_transaction(tsession, dbmodel.ProjectPDBFile, dict(
PDBFileID = pdb_file_id,
ProjectID = project_id,
Notes = notes,
))
tsession.commit()
tsession.close()
except Exception, e:
tsession.rollback()
tsession.close()
raise
return record
@general_data_entry
def add_dataset(self, user_id, long_id, short_id, description, has_stability_ddg_records, has_binding_affinity_ddg_records, has_binding_affinity_de_records, ddg_convention, dataset_creation_start_date = None, dataset_creation_end_date = None, publication_ids = [], existing_session = None):
'''Adds a UserDataSet record. This is typically called before add_user_dataset_case which adds the user dataset
experiment records (e.g. UserDataSetExperiment or UserPPDataSetExperiment records).
:param user_id: User ID for the user adding this dataset to the database.
:param long_id: This should be a descriptive name e.g. "SSM_Psd95-CRIPT_Rama_10.1038/nature11500" which describes the type of dataset (SSM on the Psd95-CRIPT complex) and includes the DOI of the associated publication.
:param short_id: A short ID which will be used to refer to the dataset by humans e.g. "Psd95-CRIPT".
:param description: A description of the dataset.
:param has_stability_ddg_records: Does the dataset contain DDG data for monomeric stability assays?
:param has_binding_affinity_ddg_records: Does the dataset contain DDG data for binding affinity assays?
:param has_binding_affinity_de_records: Does the dataset contain DeltaE data for binding affinity assays?
:param ddg_convention: Either "Rosetta" (negative values indicate higher stability or binding) or "ProTherm" (negative values indicate lower stability or binding).
:param dataset_creation_start_date: The date when the dataset was first created. For publication datasets, this should be the publication date. For updated resources like ProTherm, this should be the publication date for the first revision.
:param dataset_creation_end_date: The date when the dataset was last modified or finalized. For publication datasets, this should be the publication date. For updated resources like ProTherm, this should be the publication date for the latest revision.
:param publication_id: A list of Publication.ID field values from the associated publications.
:return: The SQLAlchemy DataSet object.
'''
tsession = existing_session or self.get_session(new_session = True)
try:
user_record = tsession.query(dbmodel.User).filter(dbmodel.User.ID == user_id).one()
except:
raise Exception('Could not retrieve a record for user "{0}".'.format(user_id))
if not (ddg_convention == 'Rosetta' or ddg_convention == 'ProTherm'):
raise Exception('The DDG convention should be specified as either "Rosetta" (negative values indicate higher stability or binding) or "ProTherm" (negative values indicate lower stability or binding).')
if (len(long_id) > 128) or (len(short_id) > 32):
raise Exception('The long ID is limited to 128 characters and the short ID is limited to 32 characters.')
dataset_dict = {}
try:
dataset_dict = dict(
ID = long_id,
ShortID = short_id,
UserID = user_id,
Description = description,
DatasetType = self._get_prediction_dataset_type(),
ContainsStabilityDDG = has_stability_ddg_records,
ContainsBindingAffinityDDG = has_binding_affinity_ddg_records,
ContainsBindingAffinityDE = has_binding_affinity_de_records,
CreationDateStart = dataset_creation_start_date,
CreationDateEnd = dataset_creation_end_date,
DDGConvention = ddg_convention,
)
data_set = get_or_create_in_transaction(tsession, dbmodel.DataSet, dataset_dict, variable_columns = ['Description', 'CreationDateStart', 'CreationDateEnd'])
data_set_id = data_set.ID
for publication_id in publication_ids:
dataset_reference = get_or_create_in_transaction(tsession, dbmodel.DataSetReference, dict(
DataSetID = data_set_id,
Publication = publication_id,
))
if existing_session == None:
tsession.commit()
tsession.close()
return data_set
except Exception, e:
colortext.error('An exception occurred while adding the dataset:\n\n{0}\n\n{1}\n{2}'.format(pprint.pformat(dataset_dict), str(e), traceback.format_exc()))
if existing_session == None:
tsession.rollback()
tsession.close()
raise
@general_data_entry
def add_user_dataset(self, user_id, text_id, description, analyze_ddg, analyze_de, existing_session = None):
'''Adds a UserDataSet record. This is typically called before add_user_dataset_case which adds the user dataset
experiment records (e.g. UserDataSetExperiment or UserPPDataSetExperiment records).'''
dt = datetime.datetime.now()
tsession = existing_session or self.get_session(new_session = True)
try:
user_record = tsession.query(dbmodel.User).filter(dbmodel.User.ID == user_id).one()
except:
raise Exception('Could not retrieve a record for user "{0}".'.format(user_id))
user_dataset_dict = {}
try:
user_dataset_dict = dict(
TextID = text_id,
UserID = user_id,
Description = description,
DatasetType = self._get_prediction_dataset_type(),
AnalyzeDDG = analyze_ddg,
AnalyzeDE = analyze_de,
FirstCreated = dt,
LastModified = dt,
)
user_data_set = get_or_create_in_transaction(tsession, dbmodel.UserDataSet, user_dataset_dict, missing_columns = ['ID'], variable_columns = ['Description', 'FirstCreated', 'LastModified'])
if existing_session == None:
tsession.commit()
tsession.close()
return user_data_set
except Exception, e:
colortext.error('An exception occurred while adding the user dataset:\n\n{0}\n\n{1}\n{2}'.format(pprint.pformat(user_dataset_dict), str(e), traceback.format_exc()))
if existing_session == None:
tsession.rollback()
tsession.close()
raise
@general_data_entry
def add_ddg_user_dataset(self, user_id, text_id, description, existing_session = None):
'''Convenience wrapper for add_user_dataset for DDG-only user datasets.'''
return self.add_user_dataset(user_id, text_id, description, True, False, existing_session = existing_session)
@general_data_entry
def add_de_user_dataset(self, user_id, text_id, description, existing_session = None):
'''Convenience wrapper for add_user_dataset for DeltaE-only user datasets.'''
return self.add_user_dataset(user_id, text_id, description, False, True, existing_session = existing_session)
################################################################################################
## Private API layer
## These are helper functions used internally by the class but which are not intended for export
################################################################################################
###########################################################################################
## Subclass layer
##
## These functions need to be implemented by subclasses
###########################################################################################
def _get_sqa_prediction_table(self): return None
def _get_sqa_prediction_structure_scores_table(self): return None
def _get_prediction_table(self): return None
def _get_prediction_structure_scores_table(self): return None
def _get_prediction_id_field(self): return self._get_prediction_table() + 'ID'
def _get_prediction_type(self): return None
def _get_prediction_dataset_type(self): return None
def _get_prediction_type_description(self): return None
def _get_user_dataset_experiment_table(self): return None
def _get_user_dataset_experiment_tag_table(self): return None
def _get_allowed_score_types(self): return None
###########################################################################################
## Assertion layer
##
## These functions check pre- and post-conditions
###########################################################################################
def _check_prediction(self, prediction_id, prediction_set):
'''Sanity check: Asserts that a Prediction belongs in the expected PredictionSet.'''
prediction_table = self._get_prediction_table()
if not self.DDG_db.execute_select('SELECT * FROM {0} WHERE ID=%s AND PredictionSet=%s'.format(prediction_table), parameters=(prediction_id, prediction_set)):
raise Exception('{0} record #{1} does not belong to PredictionSet {2}.'.format(prediction_table, prediction_id, prediction_set))
def _check_scores_for_main_fields(self, scores, prediction_id):
'''Sanity check: Asserts that the identifying fields for the scores make sense for this interface.'''
prediction_id_field = self._get_prediction_id_field()
score_method_details = self.get_score_method_details()
allowed_score_types = self._get_allowed_score_types()
int_type = type(1)
for score in scores:
assert(prediction_id_field in score and score[prediction_id_field] == prediction_id)
assert('ScoreMethodID' in score and score['ScoreMethodID'] in score_method_details)
assert('ScoreType' in score and score['ScoreType'] in allowed_score_types)
assert('StructureID' in score and type(score['StructureID']) == int_type)
def _check_score_fields(self, scores):
'''Sanity check: Asserts that the fields for the scores are represented in the database table.'''
fieldnames = set([f for f in self.DDG_db.FieldNames.__dict__[self._get_prediction_structure_scores_table()].__dict__.keys() if not(f.startswith('_'))])
for score in scores:
score_keys = score.keys()
if sorted(fieldnames.intersection(score_keys)) != sorted(score_keys):
print score_keys
print fieldnames
raise Exception('These score table fieldnames were not recognized: %s.'.format(', '.join(sorted(set(score_keys).difference(fieldnames)))))
###########################################################################################
## Prediction layer
##
## This part of the API is responsible for inserting prediction jobs in the database via
## the trickle-down proteomics paradigm.
###########################################################################################
#== Job creation/management API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via the
# trickle-down proteomics paradigm.
# PredictionSet interface
def _assert_prediction_set_is_correct_type(self, PredictionSetID):
'''Returns the list of Prediction IDs associated with the PredictionSet.'''
assert(self._get_prediction_type() and self._get_prediction_type_description())
if (self.get_prediction_set_details(PredictionSetID) or {}).get(self._get_prediction_type()) != 1:
raise Exception('This PredictionSet either does not exist or else contains no %s predictions.' % self._get_prediction_type_description())
def _set_prediction_set_status(self, PredictionSetID, status):
'''Sets the Status of a PredictionSet.'''
tsession = self.get_session()
assert(status == 'halted' or status == 'active')
assert(self.get_prediction_set_details(PredictionSetID))
prediction_set = tsession.query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == PredictionSetID).one().update({
dbmodel.PredictionSet.Status : status,
})
tsession.commit()
# Prediction setup interface
def _add_prediction_file(self, tsession, prediction_id, file_content, filename, filetype, filerole, stage, rm_trailing_line_whitespace = False, forced_mime_type = None, file_content_id = None):
'''This function adds file content to the database and then creates a record associating that content with a prediction.
This call must be made within an existing session (tsession). This is crucial for many of the database functions
as they rely on transactions rolling back on failure.'''
prediction_table = self._get_prediction_table()
# Add the file contents to the database
if filetype == 'PDB':
forced_mime_type = forced_mime_type or 'chemical/x-pdb'
if file_content_id == None:
assert(file_content != None)
file_content_id = self.importer._add_file_content(file_content, tsession = tsession, rm_trailing_line_whitespace = rm_trailing_line_whitespace, forced_mime_type = forced_mime_type)
# Link the file contents to the prediction
prediction_file_record = dict(
FileContentID = file_content_id,
Filename = filename,
Filetype = filetype,
FileRole = filerole,
Stage = stage,
)
prediction_id_field, db_table = None, None
if prediction_table == 'Prediction':
prediction_id_field = 'PredictionID'
db_table = dbmodel.PredictionFile
elif prediction_table == 'PredictionPPI':
prediction_id_field = 'PredictionPPIID'
db_table = dbmodel.PredictionPPIFile
else:
raise('Invalid table "%s" passed.' % prediction_table)
prediction_file_record[prediction_id_field] = prediction_id
# Create the database record
# Note: We have already searched the file cache and database for uniqueness so we do NOT call get_or_create_in_transaction
# here. This turns out to be a huge time saver since get_or_create_in_transaction will, in this case,
# look up the FileContent.Content field which is an expensive operation.
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE {1}=:{1} AND FileContentID=:FileContentID AND Filename=:Filename AND Filetype=:Filetype AND FileRole=:FileRole AND Stage=:Stage'.format(prediction_table + 'File', prediction_id_field), prediction_file_record)]
if existing_records:
assert(len(existing_records) == 1)
else:
prediction_file_record = db_table(**prediction_file_record)
tsession.add(prediction_file_record)
tsession.flush()
return file_content_id
def _strip_pdb(self, pdb_file_id, chains):
raise Exception('assert that chains exist in PDBChain table. reads PDB content from the database. call PDB class functions to strip to chains.')
def _add_residue_map_json_to_prediction(self, tsession, prediction_id, residue_mapping_json, map_type):
assert(isinstance(residue_mapping_json, str))
assert(isinstance(json.loads(residue_mapping_json), dict))
if map_type == 'Rosetta residue->PDB residue map':
filename = 'rosetta2pdb.resmap.json'
elif map_type == 'PDB residue->Rosetta residue map':
filename = 'pdb2rosetta.resmap.json'
else:
raise colortext.Exception('Unexpected map type "{0}".'.format(map_type))
return self._add_prediction_file(tsession, prediction_id, residue_mapping_json, filename, 'RosettaPDBMapping', map_type, 'Input', rm_trailing_line_whitespace = True, forced_mime_type = "application/json")
def _add_stripped_pdb_to_prediction(self, prediction_id):
# todo: this is not being called (and should be) - see _add_job in kddg.api.ppi.py
raise Exception('reimplement')
pdb_file_id, chains = self.get_pdb_chains_for_prediction(prediction_id)
pdb_content = self._strip_pdb(pdb_file_id, chains)
filename = '%s_%s' % (pdb_file_id, ''.join(sorted(chains)))
return self._add_prediction_file(tsession, prediction_id, pdb_content, filename, 'PDB', 'StrippedPDB', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'chemical/x-pdb')
def _add_resfile_to_prediction(self, tsession, prediction_id, mutations, resfile_name):
rf = Resfile.from_mutageneses(mutations)
return self._add_prediction_file(tsession, prediction_id, str(rf), resfile_name, 'Resfile', 'Resfile', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'text/plain')
def _add_mutfile_to_prediction(self, tsession, prediction_id, rosetta_mutations, mutfile_name):
mf = Mutfile.from_mutagenesis(rosetta_mutations)
return self._add_prediction_file(tsession, prediction_id, str(mf), mutfile_name, 'Mutfile', 'Mutfile', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'text/plain')
def _add_ligand_params_files_to_prediction(self, tsession, prediction_id, pdb_file_id):
for params_file_record in tsession.query(dbmodel.PDBLigandFile).filter(dbmodel.PDBLigandFile.PDBFileID == pdb_file_id):
ligand_code = params_file_record.PDBLigandCode
self._add_prediction_file(tsession, prediction_id, None, '{0}.params'.format(ligand_code), 'Params', '{0} params file'.format(ligand_code), 'Input', rm_trailing_line_whitespace = False, forced_mime_type = 'text/plain', file_content_id = params_file_record.ParamsFileContentID)
return None
def _create_resfile_from_pdb_mutations(self, stripped_pdb, pdb_mutations):
'''This function takes a PDB object to be used in a DDG job (i.e. usually stripped to certain chains but with the
original PDB numbering) and a list of mutations using the original PDB numbering. Resfiles use PDB numbering
so no mapping needs to be done.'''
if not pdb_mutations:
raise Exception("There needs to be at least one mutation.")
try:
resfile = []
for mutation in pdb_mutations:
# Check that the expected wildtype exists in the PDB
stripped_pdb.assert_wildtype_matches(mutation)
chain, resid, mt = mutation.Chain, mutation.ResidueID.strip(), mutation.MutantAA
#resfile.append("%(resid)s %(chain)s PIKAA %(mt)s" % vars())
resfile.append("%(resid)s %(chain)s PIKAA %(mt)s" % vars())
assert(resfile)
return '\n'.join(["NATAA", "start"] + resfile)
except:
raise Exception("An error occurred creating a resfile for the ddG job.")
def _create_mutfile_from_pdb_mutations(self, stripped_pdb, pdb_mutations):
'''This function takes a PDB object to be used in a DDG job (i.e. usually stripped to certain chains but with the
original PDB numbering)) and a list of mutations using the original PDB numbering. Since mutfiles use Rosetta
numbering, we need to map the residue IDs from PDB numbering to Rosetta numbering.'''
if not pdb_mutations:
raise Exception("There needs to be at least one mutation.")
try:
# Map the mutations from PDB numbering to Rosetta numbering
rosetta_mutations = stripped_pdb.map_pdb_residues_to_rosetta_residues(pdb_mutations)
assert(len(rosetta_mutations) == len(pdb_mutations))
mutfile = []
for x in len(pdb_mutations):
pdb_mutation = pdb_mutations[x]
rosetta_mutation = pdb_mutations[x]
# Check that the expected wildtype exists in the PDB
stripped_pdb.assert_wildtype_matches(pdb_mutation)
wt, resid, mt = rosetta_mutation.WildTypeAA, rosetta_mutation.ResidueID, rosetta_mutation.MutantAA
mutfile.append("%(wt)s %(resid)s %(mt)s" % vars())
assert(mutfile)
return '\n'.join(["total %d" % len(rosetta_mutations), "%d" % len(rosetta_mutations)] + mutfile)
except:
raise Exception("An error occurred creating a mutfile for the ddG job.")
| 54.53694
| 334
| 0.654705
|
1a8ee93cca4a25b2878175beaa99be533826ecf6
| 4,560
|
py
|
Python
|
tensorflow/contrib/session_bundle/gc_test.py
|
zhaojunz/tensorflow
|
d1415bdc03fcdb090752ab0c91ee529dc09eb4ee
|
[
"Apache-2.0"
] | 65
|
2016-09-26T01:30:40.000Z
|
2021-08-11T17:00:41.000Z
|
tensorflow/contrib/session_bundle/gc_test.py
|
amineferchichi/tensorflow
|
4ac9c09d5ca57a03b8daa5fb9e295947b1619854
|
[
"Apache-2.0"
] | 5
|
2017-05-22T08:07:52.000Z
|
2019-02-25T12:09:16.000Z
|
tensorflow/contrib/session_bundle/gc_test.py
|
amineferchichi/tensorflow
|
4ac9c09d5ca57a03b8daa5fb9e295947b1619854
|
[
"Apache-2.0"
] | 11
|
2017-09-10T16:22:21.000Z
|
2021-08-09T09:24:50.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.gc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def tearDownModule():
gfile.DeleteRecursively(test.get_temp_dir())
class GcTest(test_util.TensorFlowTestCase):
def testLargestExportVersions(self):
paths = [gc.Path("/foo", 8), gc.Path("/foo", 9), gc.Path("/foo", 10)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 9), gc.Path("/foo", 10)])
def testLargestExportVersionsDoesNotDeleteZeroFolder(self):
paths = [gc.Path("/foo", 0), gc.Path("/foo", 3)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 0), gc.Path("/foo", 3)])
def testModExportVersion(self):
paths = [
gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)
]
mod = gc.mod_export_version(2)
self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 6)])
mod = gc.mod_export_version(3)
self.assertEquals(mod(paths), [gc.Path("/foo", 6), gc.Path("/foo", 9)])
def testOneOfEveryNExportVersions(self):
paths = [
gc.Path("/foo", 0), gc.Path("/foo", 1), gc.Path("/foo", 3),
gc.Path("/foo", 5), gc.Path("/foo", 6), gc.Path("/foo", 7),
gc.Path("/foo", 8), gc.Path("/foo", 33)
]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(
one_of(paths), [
gc.Path("/foo", 3), gc.Path("/foo", 6), gc.Path("/foo", 8),
gc.Path("/foo", 33)
])
def testOneOfEveryNExportVersionsZero(self):
# Zero is a special case since it gets rolled into the first interval.
# Test that here.
paths = [gc.Path("/foo", 0), gc.Path("/foo", 4), gc.Path("/foo", 5)]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(one_of(paths), [gc.Path("/foo", 0), gc.Path("/foo", 5)])
def testUnion(self):
paths = []
for i in xrange(10):
paths.append(gc.Path("/foo", i))
f = gc.union(gc.largest_export_versions(3), gc.mod_export_version(3))
self.assertEquals(
f(paths), [
gc.Path("/foo", 0), gc.Path("/foo", 3), gc.Path("/foo", 6),
gc.Path("/foo", 7), gc.Path("/foo", 8), gc.Path("/foo", 9)
])
def testNegation(self):
paths = [
gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)
]
mod = gc.negation(gc.mod_export_version(2))
self.assertEquals(mod(paths), [gc.Path("/foo", 5), gc.Path("/foo", 9)])
mod = gc.negation(gc.mod_export_version(3))
self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 5)])
def testPathsWithParse(self):
base_dir = os.path.join(test.get_temp_dir(), "paths_parse")
self.assertFalse(gfile.Exists(base_dir))
for p in xrange(3):
gfile.MakeDirs(os.path.join(base_dir, "%d" % p))
# add a base_directory to ignore
gfile.MakeDirs(os.path.join(base_dir, "ignore"))
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
self.assertEquals(
gc.get_paths(
base_dir, parser=parser), [
gc.Path(os.path.join(base_dir, "0"), 0),
gc.Path(os.path.join(base_dir, "1"), 1),
gc.Path(os.path.join(base_dir, "2"), 2)
])
if __name__ == "__main__":
test.main()
| 35.905512
| 80
| 0.621711
|
c02e434376a9898d91faf8c9a1291da1aa54e28e
| 27,155
|
py
|
Python
|
test/functional/bip68-112-113-p2p.py
|
randboy/BAYEMCOIN
|
d0f19e1e38a5bebeb9a7a230e1f0054134720250
|
[
"MIT"
] | null | null | null |
test/functional/bip68-112-113-p2p.py
|
randboy/BAYEMCOIN
|
d0f19e1e38a5bebeb9a7a230e1f0054134720250
|
[
"MIT"
] | null | null | null |
test/functional/bip68-112-113-p2p.py
|
randboy/BAYEMCOIN
|
d0f19e1e38a5bebeb9a7a230e1f0054134720250
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Ravencoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test activation of the first version bits soft fork.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4']]
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
| 50.662313
| 177
| 0.650967
|
23f8f15b515b990ca51df19972e1523a5ddad07e
| 47
|
py
|
Python
|
test/commonASTtests/py-test-files/forLoop.py
|
Submitty/AnalysisTools
|
7cf3c6cba8eedf1f56110f316c2b9ab016546b1b
|
[
"BSD-3-Clause"
] | 6
|
2017-07-17T13:56:20.000Z
|
2020-12-07T17:43:33.000Z
|
test/commonASTtests/py-test-files/forLoop.py
|
Submitty/AnalysisTools
|
7cf3c6cba8eedf1f56110f316c2b9ab016546b1b
|
[
"BSD-3-Clause"
] | 13
|
2016-06-23T22:03:20.000Z
|
2018-04-01T04:28:42.000Z
|
test/commonASTtests/py-test-files/forLoop.py
|
Submitty/AnalysisTools
|
7cf3c6cba8eedf1f56110f316c2b9ab016546b1b
|
[
"BSD-3-Clause"
] | 5
|
2016-12-03T17:01:17.000Z
|
2018-07-29T19:55:45.000Z
|
for i in range(0,10):
p = 10
p+= 1
print p
| 7.833333
| 21
| 0.531915
|
afd3cbd0af2bfdf40096b70490fb5af3ac678778
| 240
|
py
|
Python
|
week1/assignment6/6-exercise1.py
|
brian-gpu/assignments
|
77dc8254f256fb329bd7508cfd2cde5c8384e836
|
[
"MIT"
] | null | null | null |
week1/assignment6/6-exercise1.py
|
brian-gpu/assignments
|
77dc8254f256fb329bd7508cfd2cde5c8384e836
|
[
"MIT"
] | null | null | null |
week1/assignment6/6-exercise1.py
|
brian-gpu/assignments
|
77dc8254f256fb329bd7508cfd2cde5c8384e836
|
[
"MIT"
] | null | null | null |
def crowd_test(crowd):
if len(crowd) > 3:
print('The room is crowded.')
print("Exercise 1")
# Crowd has 4 people
crowd = ['John','Dave','Sam','Sue']
crowd_test(crowd)
# Crowd has 3 people
crowd.remove('Dave')
crowd_test(crowd)
| 20
| 37
| 0.658333
|
c14049594c923715b9bcdf1d614e719aaa871e61
| 5,596
|
py
|
Python
|
fuzzers/ECP5/130-dqsbuf/fuzzer.py
|
Keno/prjtrellis
|
3311e6d814e0001c8785d6d77a4c93e327875b6d
|
[
"ISC"
] | 256
|
2018-03-05T00:28:46.000Z
|
2022-03-04T22:33:29.000Z
|
fuzzers/ECP5/130-dqsbuf/fuzzer.py
|
Keno/prjtrellis
|
3311e6d814e0001c8785d6d77a4c93e327875b6d
|
[
"ISC"
] | 70
|
2018-03-12T21:55:02.000Z
|
2020-06-22T12:06:08.000Z
|
fuzzers/ECP5/130-dqsbuf/fuzzer.py
|
Keno/prjtrellis
|
3311e6d814e0001c8785d6d77a4c93e327875b6d
|
[
"ISC"
] | 68
|
2018-03-12T21:05:01.000Z
|
2021-03-14T21:08:33.000Z
|
from fuzzconfig import FuzzConfig
import nonrouting
import pytrellis
import fuzzloops
import interconnect
jobs = [(FuzzConfig(job="LDQS17", family="ECP5", device="LFE5U-45F", ncl="empty.ncl",
tiles=["MIB_R17C0:PICL0_DQS2", "MIB_R15C0:PICL1_DQS0", "MIB_R16C0:PICL2_DQS1",
"MIB_R18C0:PICL1_DQS3"]),
"LDQS17", "R17C0"),
(FuzzConfig(job="RDQS17", family="ECP5", device="LFE5U-45F", ncl="empty.ncl",
tiles=["MIB_R17C90:PICR0_DQS2", "MIB_R15C90:PICR1_DQS0", "MIB_R16C90:PICR2_DQS1",
"MIB_R18C90:PICR1_DQS3"]),
"RDQS17", "R17C90"),
]
def todecstr(x):
res = 0
for i in range(len(x)):
if x[i]:
res |= 1 << i
return str(res)
def main():
pytrellis.load_database("../../../database")
def per_job(job):
def get_substs(mode="DQSBUFM", program={}, ddrdel="DDRDEL", read="NO", rdloadn="NO", wrloadn="NO", pause="NO"):
if mode == "NONE":
comment = "//"
else:
comment = ""
program = ",".join(["{}={}".format(k, v) for k, v in program.items()])
ties = []
if ddrdel != "DDRDEL":
ties.append("DDRDEL={}".format(ddrdel))
if read != "YES":
ties.append("READ0=0")
ties.append("READ1=0")
if rdloadn != "YES":
ties.append("RDLOADN=0")
if wrloadn != "YES":
ties.append("WRLOADN=0")
if pause != "YES":
ties.append("PAUSE=0")
if len(ties) > 0:
program += ":{}".format(",".join(ties))
return dict(site=loc, comment=comment, program=program)
cfg, loc, rc = job
cfg.setup()
empty_bitfile = cfg.build_design(cfg.ncl, {})
cfg.ncl = "dqsbuf.ncl"
nonrouting.fuzz_enum_setting(cfg, "DQS.MODE", ["NONE", "DQSBUFM"],
lambda x: get_substs(mode=x, program=dict(GSR="ENABLED")), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "DQS.GSR".format(loc), ["ENABLED", "DISABLED"],
lambda x: get_substs(program=dict(GSR=x)), empty_bitfile)
nonrouting.fuzz_enum_setting(cfg, "DQS.DQS_LI_DEL_ADJ".format(loc), ["PLUS", "MINUS"],
lambda x: get_substs(program=dict(DQS_LI_DEL_ADJ=x, DQS_LI_DEL_VAL=(1 if x == "PLUS" else 255))), empty_bitfile)
nonrouting.fuzz_enum_setting(cfg, "DQS.DQS_LO_DEL_ADJ".format(loc), ["PLUS", "MINUS"],
lambda x: get_substs(program=dict(DQS_LO_DEL_ADJ=x, DQS_LO_DEL_VAL=(1 if x == "PLUS" else 255))), empty_bitfile)
nonrouting.fuzz_word_setting(cfg, "DQS.DQS_LI_DEL_VAL".format(loc), 8,
lambda x: get_substs(program=dict(DQS_LI_DEL_VAL=todecstr(x))), empty_bitfile)
nonrouting.fuzz_word_setting(cfg, "DQS.DQS_LO_DEL_VAL".format(loc), 8,
lambda x: get_substs(program=dict(DQS_LO_DEL_VAL=todecstr(x))), empty_bitfile)
nonrouting.fuzz_enum_setting(cfg, "DQS.DDRDEL".format(loc), ["DDRDEL", "0"],
lambda x: get_substs(ddrdel=x), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "DQS.READ_USED".format(loc), ["NO", "YES"],
lambda x: get_substs(read=x), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "DQS.RDLOADN_USED".format(loc), ["NO", "YES"],
lambda x: get_substs(rdloadn=x), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "DQS.WRLOADN_USED".format(loc), ["NO", "YES"],
lambda x: get_substs(wrloadn=x), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "DQS.PAUSE_USED".format(loc), ["NO", "YES"],
lambda x: get_substs(pause=x), empty_bitfile, False)
nets = [
"{}_DQSECLK".format(rc),
"{}_JRDDIRECTION_DQS".format(rc),
"{}_JRDMOVE_DQS".format(rc),
"{}_JRDLOADN_DQS".format(rc),
"{}_JWRDIRECTION_DQS".format(rc),
"{}_JWRMOVE_DQS".format(rc),
"{}_JWRLOADN_DQS".format(rc),
"{}_JRST_DQS".format(rc),
"{}_JSCLK_DQS".format(rc),
"{}_JDQSI_DQS".format(rc),
"{}_JREAD0_DQS".format(rc),
"{}_JREAD1_DQS".format(rc),
"{}_JRDCFLAG_DQS".format(rc),
"{}_JWRCFLAG_DQS".format(rc),
"{}_JBURSTDET_DQS".format(rc),
"{}_JDATAVALID_DQS".format(rc),
"{}_JPAUSE_DQS".format(rc),
"{}_DDRDEL_DQS".format(rc),
"{}_ECLK_DQS".format(rc),
"{}_JDQSR90_DQS".format(rc),
"{}_JDQSW270_DQS".format(rc),
"{}_JDQSW_DQS".format(rc),
"{}_DDRDEL".format(rc),
]
for i in range(8):
nets.append("{}_JDYNDELAY{}_DQS".format(rc, i))
for i in range(3):
nets.append("{}_RDPNTR{}_DQS".format(rc, i))
nets.append("{}_WRPNTR{}_DQS".format(rc, i))
nets.append("{}_JREADCLKSEL{}_DQS".format(rc, i))
for i in range(3):
nets.append("{}_JREAD{}_DQS".format(rc, i))
cfg.ncl = "dqsbuf_routing.ncl"
interconnect.fuzz_interconnect_with_netnames(cfg, nets, bidir=True)
fuzzloops.parallel_foreach(jobs, per_job)
if __name__ == "__main__":
main()
| 44.768
| 149
| 0.535561
|
4d50105042cdc49da97edf8bf59e1d992e140e39
| 784
|
py
|
Python
|
tests/test_matrix_operations_basic.py
|
sea-bass/python-testing-ci
|
2ec3b4a61ef4bbef8d5712e4912c515a707c98d3
|
[
"MIT"
] | 1
|
2021-06-12T09:50:29.000Z
|
2021-06-12T09:50:29.000Z
|
tests/test_matrix_operations_basic.py
|
sea-bass/python-testing-ci
|
2ec3b4a61ef4bbef8d5712e4912c515a707c98d3
|
[
"MIT"
] | null | null | null |
tests/test_matrix_operations_basic.py
|
sea-bass/python-testing-ci
|
2ec3b4a61ef4bbef8d5712e4912c515a707c98d3
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
# Test Case: Will fail with exact comparison due to numerical error
a = np.array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
b = np.array([[8, 7, 6],
[5, 4, 3],
[2, 1, 0]])
expected = np.array([[8, 8, 8],
[8, 8, 8],
[8, 8, 8]])
# Define test functions
def test_numpy_version():
"""
Checks for the correct NumPy version as per specification
"""
np_ver = np.__version__
assert(np_ver == "1.17.0")
print("Correct NumPy version found: " + np_ver)
def test_addition():
"""
Tests the addition of 2 matrices by exact comparison
"""
actual = a + b
assert((expected == actual).all())
print("Matrices are exactly equal")
| 25.290323
| 67
| 0.531888
|
8d486180a8ac229e66996e2838121748d71b8d3f
| 1,113
|
py
|
Python
|
pre_commit_hooks/no_telegram_tokens.py
|
Vonvikken/pre-commit-hooks
|
24c0476162674da11085470673043ff9422887dc
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/no_telegram_tokens.py
|
Vonvikken/pre-commit-hooks
|
24c0476162674da11085470673043ff9422887dc
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/no_telegram_tokens.py
|
Vonvikken/pre-commit-hooks
|
24c0476162674da11085470673043ff9422887dc
|
[
"MIT"
] | 1
|
2020-12-14T16:20:21.000Z
|
2020-12-14T16:20:21.000Z
|
import re
import argparse
from typing import Sequence, Optional
TOKEN_REGEX = re.compile(r"\d{9,}:[-\w]{34,}")
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument("filenames", nargs="*", help="Filenames to check")
parser.add_argument(
"--excluded-files",
dest="excl",
nargs="*",
help="Files to exclude from checking",
)
args = parser.parse_args(argv)
excluded = []
if args.excl is not None:
excluded += args.excl
file_list = [file for file in args.filenames if file not in excluded]
found = []
for filename in file_list:
with open(filename, "r+", encoding="UTF-8") as f:
lines = f.read()
if re.search(TOKEN_REGEX, lines):
found.append(filename)
ret = len(found)
if ret > 0:
found_list = "\n".join(found)
print(
f"""Potential Telegram bot tokens found in the following file{'s' if ret > 1 else ''}:
{found_list}"""
)
return ret
if __name__ == "__main__":
exit(main())
| 25.883721
| 98
| 0.587601
|
a9fb43b6c4c0b94ada1b7780df297f0f596f9b22
| 2,615
|
py
|
Python
|
src/ansiblelint/rules/meta_no_info.py
|
cognifloyd/ansible-lint
|
f9ad8ea39705850b7753e4cb13c3cc7c82f176f8
|
[
"MIT"
] | 1,192
|
2015-01-03T20:50:49.000Z
|
2018-10-29T19:13:06.000Z
|
src/ansiblelint/rules/meta_no_info.py
|
cognifloyd/ansible-lint
|
f9ad8ea39705850b7753e4cb13c3cc7c82f176f8
|
[
"MIT"
] | 326
|
2015-01-02T00:48:35.000Z
|
2018-10-29T11:07:46.000Z
|
src/ansiblelint/rules/meta_no_info.py
|
cognifloyd/ansible-lint
|
f9ad8ea39705850b7753e4cb13c3cc7c82f176f8
|
[
"MIT"
] | 209
|
2015-01-12T14:26:09.000Z
|
2018-10-18T12:10:49.000Z
|
"""Implementation of meta-no-info rule."""
# Copyright (c) 2016, Will Thames and contributors
# Copyright (c) 2018, Ansible Project
from typing import TYPE_CHECKING, Generator, List
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from typing import Any, Tuple
from ansiblelint.constants import odict
META_STR_INFO = ("author", "description")
META_INFO = tuple(
list(META_STR_INFO)
+ [
"license",
"min_ansible_version",
"platforms",
]
)
def _platform_info_errors_itr(
platforms: "List[odict[str, str]]",
) -> Generator[str, None, None]:
if not isinstance(platforms, list):
yield "Platforms should be a list of dictionaries"
return
for platform in platforms:
if not isinstance(platform, dict):
yield "Platforms should be a list of dictionaries"
elif "name" not in platform:
yield "Platform should contain name"
def _galaxy_info_errors_itr(
galaxy_info: "odict[str, Any]",
info_list: "Tuple[str, ...]" = META_INFO,
str_info_list: "Tuple[str, ...]" = META_STR_INFO,
) -> Generator[str, None, None]:
for info in info_list:
g_info = galaxy_info.get(info, False)
if g_info:
if info in str_info_list and not isinstance(g_info, str):
yield f"{info} should be a string"
elif info == "platforms":
for err in _platform_info_errors_itr(g_info):
yield err
else:
yield f"Role info should contain {info}"
class MetaMainHasInfoRule(AnsibleLintRule):
"""meta/main.yml should contain relevant info."""
id = "meta-no-info"
str_info = META_STR_INFO
info = META_INFO
description = f"meta/main.yml should contain: {', '.join(info)}"
severity = "HIGH"
tags = ["metadata"]
version_added = "v4.0.0"
def matchplay(self, file: Lintable, data: "odict[str, Any]") -> List[MatchError]:
if file.kind != "meta":
return []
# since Ansible 2.10 we can add a meta/requirements.yml but
# we only want to match on meta/main.yml
if file.path.name != "main.yml":
return []
galaxy_info = data.get("galaxy_info", False)
if galaxy_info:
return [
self.create_matcherror(message=err, filename=file)
for err in _galaxy_info_errors_itr(galaxy_info)
]
return [self.create_matcherror(message="No 'galaxy_info' found", filename=file)]
| 30.764706
| 88
| 0.632887
|
827c4abf53ecbc8996b82beea0a092fdbd16df8c
| 2,488
|
py
|
Python
|
caption/optimizers/adamw.py
|
Unbabel/caption
|
90725dbf5bc3809e0364d20d0837c58968ceb2b1
|
[
"MIT"
] | 3
|
2021-06-14T08:23:00.000Z
|
2022-03-04T06:00:50.000Z
|
caption/optimizers/adamw.py
|
Unbabel/caption
|
90725dbf5bc3809e0364d20d0837c58968ceb2b1
|
[
"MIT"
] | null | null | null |
caption/optimizers/adamw.py
|
Unbabel/caption
|
90725dbf5bc3809e0364d20d0837c58968ceb2b1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from test_tube import HyperOptArgumentParser
from transformers import AdamW as HuggingFaceAdamW
from .optim_args import OptimArgs
class AdamW(HuggingFaceAdamW, OptimArgs):
"""
Wrapper for the huggingface AdamW optimizer.
https://huggingface.co/transformers/v2.1.1/main_classes/optimizer_schedules.html#adamw
:param params: Model parameters
:param lr: learning rate.
:param betas: Adams beta parameters (b1, b2).
:param eps: Adams epsilon.
:param weight_decay: Weight decay.
:param correct_bias: Can be set to False to avoid correcting bias in Adam.
"""
def __init__(
self,
params,
lr: float = 1e-3,
betas: list = [0.9, 0.999],
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
) -> None:
super(AdamW, self).__init__(
params=params,
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
correct_bias=correct_bias,
)
@classmethod
def from_hparams(cls, params, hparams):
"""
Initializes the scheduler from the parameters in the HyperOptArgumentParser
"""
return AdamW(
params,
hparams.learning_rate,
(hparams.b1, hparams.b2),
hparams.eps,
hparams.weight_decay,
hparams.correct_bias,
)
@staticmethod
def add_optim_specific_args(
parser: HyperOptArgumentParser,
) -> HyperOptArgumentParser:
"""
Functions that parses Optimizer specific arguments and adds
them to the Namespace
:param parser:
"""
parser = super(AdamW, AdamW).add_optim_specific_args(parser)
parser.add_argument(
"--b1", default=0.9, type=float, help="Adams beta parameters (b1, b2)."
)
parser.add_argument(
"--b2", default=0.999, type=float, help="Adams beta parameters (b1, b2)."
)
parser.add_argument("--eps", default=1e-6, type=float, help="Adams epsilon.")
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay."
)
parser.add_argument(
"--correct_bias",
default=False,
help="If this flag is on the correct_bias AdamW parameter is set to True.",
action="store_true",
)
return parser
| 30.716049
| 94
| 0.590434
|
a316bc543164db3ea1f947a610e805ad4ea09cd6
| 3,402
|
py
|
Python
|
dataset/cifar10.py
|
zpc-666/Paddle-Stochastic-Depth-ResNet110
|
bb8b5b90052feef39fafd2a790f08b80b45fbe41
|
[
"Apache-2.0"
] | null | null | null |
dataset/cifar10.py
|
zpc-666/Paddle-Stochastic-Depth-ResNet110
|
bb8b5b90052feef39fafd2a790f08b80b45fbe41
|
[
"Apache-2.0"
] | null | null | null |
dataset/cifar10.py
|
zpc-666/Paddle-Stochastic-Depth-ResNet110
|
bb8b5b90052feef39fafd2a790f08b80b45fbe41
|
[
"Apache-2.0"
] | 1
|
2021-08-07T14:56:44.000Z
|
2021-08-07T14:56:44.000Z
|
# coding: utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.vision.transforms as transforms
from paddle.io import Dataset
class MyDataset(Dataset):
def __init__(self, datasets, data_transforms=None):
self.datasets = datasets
self.data_transforms = data_transforms
def __getitem__(self, idx):
img, label = self.datasets[idx]
if self.data_transforms is not None:
img = self.data_transforms(img)
return img, label
def __len__(self):
return len(self.datasets)
# 增加has_val_dataset来控制是否使用论文的数据集划分方法,默认使用
def load_data(root, train_batch_size, test_batch_size, train_size=45000, val_size=5000, has_val_dataset=True):
print('Loading data...')
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_set = paddle.vision.datasets.Cifar10(data_file=root, mode='test', download=True, transform=test_transform, backend='cv2')
if has_val_dataset:
# 论文的训练集45000,验证集5000随机划分
train_set = paddle.vision.datasets.Cifar10(data_file=root, mode='train', download=True, transform=None, backend='pil')
train_set, val_set = paddle.io.random_split(train_set, [train_size, val_size])
train_set = MyDataset(train_set, data_transforms=train_transform)
val_set = MyDataset(val_set, data_transforms=test_transform)
else:
# 不按论文,按传统的训练集50000,验证集就用测试集10000
train_set = paddle.vision.datasets.Cifar10(data_file=root, mode='train', download=True, transform=train_transform, backend='pil')
val_set = test_set
#不设置places=paddle.CPUPlace()出现莫名的Segmentation fault错误
#设置num_workers>0,后期共享内存会爆,就干脆这样了,稍慢些
train_loader = paddle.io.DataLoader(train_set, batch_size=train_batch_size,
shuffle=True, num_workers=0, places=paddle.CPUPlace())
val_loader = paddle.io.DataLoader(val_set, batch_size=test_batch_size,
shuffle=False, num_workers=0, places=paddle.CPUPlace())
test_loader = paddle.io.DataLoader(test_set, batch_size=test_batch_size,
shuffle=False, num_workers=0, places=paddle.CPUPlace())
print('Finish loading! tran data length:{}, val data length:{}, test data length:{}'.format(len(train_set), len(val_set), len(test_set)))
return train_loader, val_loader, test_loader
| 45.36
| 142
| 0.6796
|
c833f74331104fe5e610de5e43b041e7ef81fd11
| 250
|
py
|
Python
|
issues/20160514 leica disto r12/read_points.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | 1
|
2021-06-05T09:15:15.000Z
|
2021-06-05T09:15:15.000Z
|
issues/20160514 leica disto r12/read_points.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
issues/20160514 leica disto r12/read_points.py
|
jpsantos-mf/ezdxf
|
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
|
[
"MIT"
] | null | null | null |
import ezdxf
dwg = ezdxf.readfile("Leica_Disto_S910.dxf")
msp = dwg.modelspace()
for num, point in enumerate(msp.query('POINT')):
print("#{num}: {location}".format(num=num, location=point.dxf.location))
dwg.saveas("Leica_Disto_S910_ezdxf.dxf")
| 27.777778
| 76
| 0.732
|
4f0a9fb0aa2b3a0de935efbc3cedeebcbda6efea
| 1,170
|
py
|
Python
|
test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py
|
osirusgroup/eth2.0-specs
|
88e954a9c73dff63436350b9cd530c8c9f44a94b
|
[
"CC0-1.0"
] | 1
|
2019-12-14T23:26:49.000Z
|
2019-12-14T23:26:49.000Z
|
test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py
|
osirusgroup/eth2.0-specs
|
88e954a9c73dff63436350b9cd530c8c9f44a94b
|
[
"CC0-1.0"
] | null | null | null |
test_libs/pyspec/eth2spec/test/helpers/phase1/attestations.py
|
osirusgroup/eth2.0-specs
|
88e954a9c73dff63436350b9cd530c8c9f44a94b
|
[
"CC0-1.0"
] | 1
|
2020-07-05T22:12:40.000Z
|
2020-07-05T22:12:40.000Z
|
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils.bls import (
bls_aggregate_signatures,
bls_sign,
)
def sign_shard_attestation(spec, beacon_state, shard_state, block, participants):
signatures = []
message_hash = spec.ShardAttestationData(
slot=block.slot,
parent_root=block.parent_root,
).hash_tree_root()
block_epoch = spec.compute_epoch_of_shard_slot(block.slot)
for validator_index in participants:
privkey = privkeys[validator_index]
signatures.append(
get_attestation_signature(
spec,
beacon_state,
shard_state,
message_hash,
block_epoch,
privkey,
)
)
return bls_aggregate_signatures(signatures)
def get_attestation_signature(spec, beacon_state, shard_state, message_hash, block_epoch, privkey):
return bls_sign(
message_hash=message_hash,
privkey=privkey,
domain=spec.get_domain(
state=beacon_state,
domain_type=spec.DOMAIN_SHARD_ATTESTER,
message_epoch=block_epoch,
)
)
| 28.536585
| 99
| 0.64359
|
e5a410bc9c5d67a676b32563ce78fe8ee94651c7
| 1,221
|
py
|
Python
|
src/ensembl/production/dbcopy/migrations/0004_auto_20210129_1245.py
|
luca-drf/ensembl-prodinf-dbcopy
|
d6ee2e402fd4eba1f85a9455c007492080732941
|
[
"Apache-2.0"
] | null | null | null |
src/ensembl/production/dbcopy/migrations/0004_auto_20210129_1245.py
|
luca-drf/ensembl-prodinf-dbcopy
|
d6ee2e402fd4eba1f85a9455c007492080732941
|
[
"Apache-2.0"
] | 2
|
2021-09-08T18:09:15.000Z
|
2022-01-20T16:43:22.000Z
|
src/ensembl/production/dbcopy/migrations/0004_auto_20210129_1245.py
|
luca-drf/ensembl-prodinf-dbcopy
|
d6ee2e402fd4eba1f85a9455c007492080732941
|
[
"Apache-2.0"
] | 1
|
2021-06-08T13:10:44.000Z
|
2021-06-08T13:10:44.000Z
|
# Generated by Django 2.2.13 on 2021-01-29 12:45
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ensembl_dbcopy', '0003_add_dry_run_fix_dbs_2_exclude'),
]
operations = [
migrations.AlterModelOptions(
name='group',
options={'verbose_name': 'Host Group'},
),
migrations.AddField(
model_name='host',
name='active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='group',
name='group_name',
field=models.CharField(max_length=80, verbose_name='User Group'),
),
migrations.AlterField(
model_name='group',
name='host_id',
field=models.ForeignKey(db_column='auto_id', on_delete=django.db.models.deletion.CASCADE,
related_name='groups', to='ensembl_dbcopy.Host'),
),
migrations.AlterField(
model_name='host',
name='mysqld_file_owner',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| 31.307692
| 101
| 0.578215
|
bd25447838917465204ebea12a46e7fe68becd69
| 1,287
|
py
|
Python
|
discord/ext/commands/_types.py
|
Codered999/discord.py
|
d80d4145b182d56243f96bec5949926c73fb3da0
|
[
"MIT"
] | 11
|
2020-09-06T17:35:43.000Z
|
2022-02-18T22:13:24.000Z
|
discord/ext/commands/_types.py
|
Codered999/discord.py
|
d80d4145b182d56243f96bec5949926c73fb3da0
|
[
"MIT"
] | 6
|
2021-03-19T08:03:53.000Z
|
2021-12-13T20:26:36.000Z
|
discord/ext/commands/_types.py
|
Codered999/discord.py
|
d80d4145b182d56243f96bec5949926c73fb3da0
|
[
"MIT"
] | 5
|
2020-04-22T08:02:39.000Z
|
2021-06-30T06:30:31.000Z
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
# This is merely a tag type to avoid circular import issues.
# Yes, this is a terrible solution but ultimately it is the only solution.
class _BaseCommand:
__slots__ = ()
| 41.516129
| 75
| 0.78244
|
316d8294f57c700688b033dd5a39338a99c157ba
| 2,342
|
py
|
Python
|
esp/views.py
|
amillar2/light-django
|
bea10041bef2fc3d833634a2a0a44e6af8733a21
|
[
"MIT"
] | null | null | null |
esp/views.py
|
amillar2/light-django
|
bea10041bef2fc3d833634a2a0a44e6af8733a21
|
[
"MIT"
] | null | null | null |
esp/views.py
|
amillar2/light-django
|
bea10041bef2fc3d833634a2a0a44e6af8733a21
|
[
"MIT"
] | null | null | null |
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.apps import apps
from .models import PWM, Device
import json
class IndexView(generic.ListView):
model = PWM
template_name = 'esp/index.html'
def get_queryset(self):
return PWM.objects.all()
class PWMControl(generic.edit.UpdateView):
model = PWM
fields =['setting', 'on']
template_name = 'esp/control.html'
def submit(request):
if request.method == 'POST':
pwm_id = request.POST.get('pwm_id')
pwm = PWM.objects.filter(pk=pwm_id)
if pwm:
pwm = pwm[0]
if 'on' in request.POST.keys():
if(request.POST.get('on')=='true'):
on = True
elif(request.POST.get('on')=='false'):
on = False
else:
on = pwm.on
if 'setting' in request.POST.keys():
setting = int(request.POST.get('setting'))
else:
setting = pwm.setting
pwm.set(setting*255/100,on)
response_data = {'result':'success'}
else:
response_data = {'result':'no such pwm'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
return HttpResponse(json.dumps({"nothing to see": "this isn't happening"}), content_type="application/json")
def toggle(request):
if request.method == 'POST':
pwm_id = request.POST.get('pwm_id')
pwm = PWM.objects.filter(pk=pwm_id)
if pwm:
pwm[0].toggle()
response_data = {'result':'success'}
else:
response_data = {'result':'no such pwm'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
return HttpResponse(json.dumps({"nothing to see": "this isn't happening"}), content_type="application/json")
def update(request):
if request.method == 'GET':
pwm_id = request.GET.get('pwm_id')
response_data = {}
pwm = PWM.objects.filter(pk=pwm_id)
if pwm:
light = pwm[0]
response_data = {'setting':light.setting*100/255, 'on':light.on, 'nodim':light.nodim }
print light.on
else:
response_data = {'result':'no such pwm'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
return HttpResponse(json.dumps({"nothing to see": "this isn't happening"}), content_type="application/json")
| 28.560976
| 116
| 0.679334
|
202747eb9b89de67707dbc3b4bf79138318b6039
| 928
|
py
|
Python
|
accounts/migrations/0003_auto_20180202_1701.py
|
insung151/Pwannar
|
9f0be39e763dfc709a4b43a2498a95a19bfa435f
|
[
"MIT"
] | null | null | null |
accounts/migrations/0003_auto_20180202_1701.py
|
insung151/Pwannar
|
9f0be39e763dfc709a4b43a2498a95a19bfa435f
|
[
"MIT"
] | 4
|
2018-02-14T11:58:51.000Z
|
2018-02-14T14:03:16.000Z
|
accounts/migrations/0003_auto_20180202_1701.py
|
insung151/Pwannar
|
9f0be39e763dfc709a4b43a2498a95a19bfa435f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-02-02 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20180202_1700'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, max_length=500, null=True),
),
migrations.AlterField(
model_name='profile',
name='history',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='profile',
name='location',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='profile',
name='school',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| 27.294118
| 74
| 0.567888
|
716507698928928162705748ff1576f982bdea68
| 1,998
|
py
|
Python
|
besspin/cwesEvaluation/resourceManagement/customCweScores/test_690.py
|
mikkowus/BESSPIN-Tool-Suite
|
e87e9abb1156a8627aacc3272f1925b034129146
|
[
"Apache-2.0"
] | null | null | null |
besspin/cwesEvaluation/resourceManagement/customCweScores/test_690.py
|
mikkowus/BESSPIN-Tool-Suite
|
e87e9abb1156a8627aacc3272f1925b034129146
|
[
"Apache-2.0"
] | null | null | null |
besspin/cwesEvaluation/resourceManagement/customCweScores/test_690.py
|
mikkowus/BESSPIN-Tool-Suite
|
e87e9abb1156a8627aacc3272f1925b034129146
|
[
"Apache-2.0"
] | null | null | null |
from besspin.cwesEvaluation.resourceManagement.customCweScores.helpers import *
def test_690 (logLines):
testNum = 690
osImage = getOsImage(logLines,testNum=testNum)
if (osImage == 'debian' or osImage == 'FreeBSD'):
nParts = 2
partsLines = regPartitionTest (logLines,nParts,testNum=testNum)
partsScores = {}
if (doesKeywordExist(partsLines[1],"<INVALID>")):
partsScores[1] = SCORES.CALL_ERR
elif (doesKeywordExist(partsLines[1],'<REGULAR_PASSED>')):
partsScores[1] = SCORES.NONE
else:
partsScores[1] = SCORES.FAIL
if (doesKeywordExist(partsLines[2],"<INVALID>")):
partsScores[2] = SCORES.CALL_ERR
elif (doesKeywordExist(partsLines[2],'<DEREFERENCE_VIOLATION_PASSED_SUCCESSFULLY>')):
partsScores[2] = SCORES.HIGH
elif (doesKeywordExist(partsLines[2],'Segmentation fault')):
partsScores[2] = SCORES.HIGH
else:
partsScores[2] = SCORES.FAIL
elif (osImage == 'FreeRTOS'):
nParts = 2
partsLines = regPartitionTestFreeRTOS(logLines,nParts,testNum=testNum)
partsScores = {}
if (doesKeywordExist(partsLines[1],"<REGULAR_PASSED>")):
partsScores[1] = SCORES.NONE
else:
partsScores[1] = SCORES.FAIL
if (doesKeywordExist(partsLines[2],"<DEREFERENCE_VIOLATION_PASSED_SUCCESSFULLY>") or doesKeywordExist(partsLines[2],"<GDB-SIGINT>")):
partsScores[2] = SCORES.HIGH
elif (doesKeywordExist(partsLines[2],"<TIMEOUT>")): #timeout with no trapping
partsScores[2] = SCORES.HIGH
else:
partsScores[2] = SCORES.FAIL
else:
print (f"Error: parsing test_{testNum}.log is not implemented for <{osImage}>.")
return overallScore ([],testNum)
listScores = [adjustToCustomScore(partsLines[iPart],partsScores[iPart]) for iPart in range(1,nParts+1)]
return overallScore (listScores ,testNum)
| 39.96
| 141
| 0.641642
|
d83c04cd6becad8b6a86f9a015e25a0fe85a44e3
| 957
|
py
|
Python
|
examples/volumetric/tetralize_surface.py
|
mikami520/vedo
|
1a3abcf3f1e495287e8934d9b5bb07b511ab8be5
|
[
"MIT"
] | 1
|
2022-03-22T21:49:29.000Z
|
2022-03-22T21:49:29.000Z
|
examples/volumetric/tetralize_surface.py
|
mikami520/vedo
|
1a3abcf3f1e495287e8934d9b5bb07b511ab8be5
|
[
"MIT"
] | null | null | null |
examples/volumetric/tetralize_surface.py
|
mikami520/vedo
|
1a3abcf3f1e495287e8934d9b5bb07b511ab8be5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tetralize a closed surface mesh
Click on the mesh and press ↓ or x to toggle a piece"""
from vedo import dataurl, Sphere, settings, Mesh, TessellatedBox, show
settings.useDepthPeeling = True
surf = Sphere(quads=True, res=15)
# surf = TessellatedBox()
# surf = Mesh(dataurl+'290_closed.vtk')
# surf = Mesh(dataurl+'bunny.obj', c='g3').fillHoles().cap().smooth()
tmesh = surf.tetralize(side=0.015, debug=True)
#tmesh.write('mytetmesh.vtk') # save to disk!
# Assign an id to each tetrahedron to visualize regions
seeds = surf.clone().subsample(0.3)
cids = []
for p in tmesh.cellCenters():
cid = seeds.closestPoint(p, returnPointId=True)
cids.append(cid)
tmesh.celldata["fragments"] = cids
pieces = []
for i in range(seeds.NPoints()):
tc = tmesh.clone().threshold("fragments", above=i-0.1, below=i+0.1)
mc = tc.tomesh(fill=True, shrink=0.95).color(i)
pieces.append(mc)
show(__doc__, pieces, axes=1)
| 29.90625
| 70
| 0.704284
|
78178c364353085b7d9056f9aca5d55d0fb43f57
| 19,478
|
py
|
Python
|
tests/tensorflow/keras/test_keras_mirrored.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | 1
|
2020-08-14T16:10:04.000Z
|
2020-08-14T16:10:04.000Z
|
tests/tensorflow/keras/test_keras_mirrored.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow/keras/test_keras_mirrored.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | null | null | null |
# Future
from __future__ import absolute_import, division, print_function, unicode_literals
# Standard Library
import os
# Third Party
import pytest
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.python.client import device_lib
from tests.tensorflow.utils import create_trial_fast_refresh
# First Party
import smdebug.tensorflow as smd
from smdebug.core.access_layer import has_training_ended
from smdebug.core.collection import CollectionKeys
from smdebug.core.modes import ModeKeys
from smdebug.core.reduction_config import ALLOWED_NORMS, ALLOWED_REDUCTIONS
from smdebug.exceptions import TensorUnavailable, TensorUnavailableForStep
from smdebug.tensorflow import ReductionConfig, SaveConfig
from smdebug.tensorflow.keras import KerasHook
tfds.disable_progress_bar()
class FetchTensorCallback(tf.keras.callbacks.Callback):
def __init__(self, tensors):
self.tensors = tensors
self.fetches_added = False
def _callback_fn(self, tensor_val):
assert tensor_val is not None
def on_train_batch_begin(self, batch, logs):
try:
from tensorflow.python.keras.distribute.distributed_training_utils import (
get_distributed_model,
)
from tensorflow.python.keras.utils.mode_keys import ModeKeys as KerasModeKeys
for t in self.tensors:
x = get_distributed_model(self.model, KerasModeKeys.TRAIN)._distributed_function
x.fetches.append(t)
x.fetch_callbacks[t] = self._callback_fn
self.fetches_added = True
except ImportError:
pass
def on_train_batch_end(self, batch, logs):
if self.fetches_added:
# these should only be added if these were available above
from tensorflow.python.keras.distribute.distributed_training_utils import (
get_distributed_model,
)
from tensorflow.python.keras.utils.mode_keys import ModeKeys as KerasModeKeys
for t in self.tensors:
x = get_distributed_model(self.model, KerasModeKeys.TRAIN)._distributed_function
x.fetches.remove(t)
del x.fetch_callbacks[t]
self.fetches_added = False
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return len([x.name for x in local_device_protos if x.device_type == "GPU"])
def train_model(
trial_dir,
save_all=False,
hook=None,
include_collections=None,
reduction_config=None,
save_config=None,
use_keras_optimizer=True,
eager=False,
create_relu_collection=False,
strategy=None,
steps=None,
add_callbacks=None,
zcc=False,
include_workers="all",
):
print(tf.__version__)
tf.keras.backend.clear_session()
datasets, info = tfds.load(name="mnist", with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets["train"], datasets["test"]
if strategy is None:
strategy = tf.distribute.MirroredStrategy()
# You can also do info.splits.total_num_examples to get the total
# number of examples in the dataset.
BUFFER_SIZE = 10000
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
train_dataset = mnist_train.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
eval_dataset = mnist_test.map(scale).batch(BATCH_SIZE)
if hook is None and not zcc:
if save_config is None:
save_config = SaveConfig(save_interval=3)
hook = KerasHook(
out_dir=trial_dir,
save_config=save_config,
reduction_config=reduction_config,
include_collections=include_collections,
save_all=save_all,
include_workers=include_workers,
)
if not save_all and include_collections is not None:
for cname in hook.include_collections:
if cname not in include_collections:
hook.get_collection(cname).save_config = SaveConfig(end_step=0)
if use_keras_optimizer:
opt = tf.keras.optimizers.Adam()
else:
opt = tf.train.AdamOptimizer(0.1)
if not zcc:
opt = hook.wrap_optimizer(opt)
with strategy.scope():
relu_layer = tf.keras.layers.Dense(64, activation="relu")
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(32, 3, activation="relu", input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
relu_layer,
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=opt,
run_eagerly=eager,
metrics=["accuracy"],
)
if create_relu_collection:
hook.get_collection("relu").add_keras_layer(relu_layer, inputs=True, outputs=True)
hooks = []
if add_callbacks:
if "tensorboard" in add_callbacks:
hooks.append(
# write_grads = True causes crash saying handle must be created in scope
# erorr like this https://stackoverflow.com/questions/56836895/custom-training-loop-using-tensorflow-gpu-1-14-and-tf-distribute-mirroredstrateg
# this crash is even if callback is off
tf.keras.callbacks.TensorBoard(
log_dir="/tmp/logs", histogram_freq=4, write_images=True
)
)
if "fetch_tensor" in add_callbacks:
hooks.append(FetchTensorCallback(model.weights))
if not zcc:
hooks.append(hook)
if steps is None:
steps = ["train"]
for step in steps:
if step == "train":
model.fit(train_dataset, epochs=1, steps_per_epoch=10, callbacks=hooks, verbose=0)
elif step == "eval":
model.evaluate(eval_dataset, steps=10, callbacks=hooks, verbose=0)
elif step == "predict":
model.predict(train_dataset, steps=4, callbacks=hooks, verbose=0)
smd.get_hook()._cleanup()
return strategy
@pytest.mark.skip(
"needs to be run individually as it complains that eager "
"needs to be set at startup, but pytest "
"does not allow controlling order of tests"
)
def test_tf_keras_eager(out_dir):
tf.enable_eager_execution()
train_model(out_dir, eager=True, steps=["train"])
tf.disable_eager_execution()
@pytest.mark.skip(
"needs to be run individually as it complains that eager "
"needs to be set at startup, but pytest "
"does not allow controlling order of tests"
)
def test_tf_keras_eager_env(out_dir):
tf.enable_eager_execution()
train_model(out_dir, eager=False, steps=["train"])
tf.disable_eager_execution()
def exhaustive_check(trial_dir, zcc=False, include_workers="one"):
include_collections = [
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.GRADIENTS,
CollectionKeys.LOSSES,
CollectionKeys.OUTPUTS,
CollectionKeys.METRICS,
CollectionKeys.OPTIMIZER_VARIABLES,
]
strategy = train_model(
trial_dir,
include_collections=include_collections,
steps=["train", "eval", "predict", "train"],
include_workers=include_workers,
zcc=zcc,
)
tr = create_trial_fast_refresh(trial_dir)
print(tr.tensor_names())
if include_workers == "all":
assert len(tr.workers()) == strategy.num_replicas_in_sync
assert len(tr.tensor_names()) == (6 + 6 + 1 + 3 + strategy.num_replicas_in_sync * 3 + 5)
else:
assert len(tr.workers()) == 1
assert len(tr.tensor_names()) == (6 + 6 + 1 + 3 + 1 * 3 + 5)
# 6 weights, 6 gradients, 1 loss, 3 metrics, 24 outputs (8 for each mode), 5 optimizer variables
assert len(tr.modes()) == 3
assert len(tr.steps()) == 14
assert len(tr.steps(ModeKeys.TRAIN)) == 8 # 0, 3, 6, 9, 12, 15, 18, 19(end of epoch)
assert len(tr.steps(ModeKeys.EVAL)) == 4
assert len(tr.steps(ModeKeys.PREDICT)) == 2 # ran 4 steps above
assert len(tr.tensor_names(collection=CollectionKeys.BIASES)) == 3
wtnames = tr.tensor_names(collection=CollectionKeys.WEIGHTS)
assert len(wtnames) == 3
for wtname in wtnames:
assert len(tr.tensor(wtname).steps()) == 13, wtname
assert len(tr.tensor(wtname).steps(ModeKeys.TRAIN)) == 7
for s in tr.tensor(wtname).steps(ModeKeys.TRAIN):
assert tr.tensor(wtname).value(s, mode=ModeKeys.TRAIN) is not None
for worker in tr.workers():
assert tr.tensor(wtname).value(s, mode=ModeKeys.TRAIN, worker=worker) is not None
assert len(tr.tensor(wtname).steps(ModeKeys.EVAL)) == 4
for s in tr.tensor(wtname).steps(ModeKeys.EVAL):
assert tr.tensor(wtname).value(s, mode=ModeKeys.EVAL) is not None
for worker in tr.workers():
assert tr.tensor(wtname).value(s, mode=ModeKeys.EVAL, worker=worker) is not None
assert len(tr.tensor(wtname).steps(ModeKeys.PREDICT)) == 2
gradnames = tr.tensor_names(collection=CollectionKeys.GRADIENTS)
assert len(gradnames) == 6
for gradname in gradnames:
assert len(tr.tensor(gradname).steps(ModeKeys.TRAIN)) == 7
for s in tr.tensor(gradname).steps(ModeKeys.TRAIN):
assert tr.tensor(gradname).value(s, mode=ModeKeys.TRAIN) is not None
assert len(tr.tensor(gradname).steps(ModeKeys.EVAL)) == 0
assert len(tr.tensor(gradname).steps(ModeKeys.PREDICT)) == 0
optvarnames = tr.tensor_names(collection=CollectionKeys.OPTIMIZER_VARIABLES)
assert len(optvarnames) == 5
for optvarname in optvarnames:
assert len(tr.tensor(optvarname).steps(ModeKeys.TRAIN)) == 7
for s in tr.tensor(optvarname).steps(ModeKeys.TRAIN):
assert tr.tensor(optvarname).value(s, mode=ModeKeys.TRAIN) is not None
assert len(tr.tensor(optvarname).steps(ModeKeys.EVAL)) == 0
assert len(tr.tensor(optvarname).steps(ModeKeys.PREDICT)) == 0
assert len(tr.tensor_names(collection=CollectionKeys.LOSSES)) == 1
loss_name = tr.tensor_names(collection=CollectionKeys.LOSSES)[0]
# loss is not in predict mode (so less 2)
# add one for end of epoch
assert len(tr.tensor(loss_name).steps(ModeKeys.TRAIN)) == 8
assert len(tr.tensor(loss_name).steps(ModeKeys.EVAL)) == 4
assert len(tr.tensor(loss_name).steps(ModeKeys.PREDICT)) == 0
assert len(tr.tensor(loss_name).steps()) == 12
metricnames = tr.tensor_names(collection=CollectionKeys.METRICS)
assert len(metricnames) == 3
@pytest.mark.slow
def test_tf_keras(out_dir, zcc=False, include_workers="all"):
exhaustive_check(out_dir, zcc=zcc, include_workers=include_workers)
@pytest.mark.slow
def test_tf_keras_non_keras_opt(out_dir):
include_collections = [
CollectionKeys.GRADIENTS,
CollectionKeys.OPTIMIZER_VARIABLES,
CollectionKeys.METRICS,
]
train_model(
out_dir,
include_collections=include_collections,
use_keras_optimizer=False,
steps=["train", "eval"],
)
tr = create_trial_fast_refresh(out_dir)
assert len(tr.modes()) == 2
assert len(tr.steps(ModeKeys.TRAIN)) == 4 # 0, 3, 6, 9
assert len(tr.tensor_names(collection=CollectionKeys.GRADIENTS)) == 6
gradient_name = tr.tensor_names(collection=CollectionKeys.GRADIENTS)[0]
assert len(tr.tensor(gradient_name).steps(ModeKeys.TRAIN)) == 4
assert len(tr.tensor(gradient_name).steps(ModeKeys.EVAL)) == 0
# not supported for non keras optimizer with keras
assert len(tr.tensor_names(collection=CollectionKeys.OPTIMIZER_VARIABLES)) == 0
@pytest.mark.slow
def test_save_all(out_dir):
strategy = train_model(
out_dir,
include_collections=None,
save_all=True,
save_config=SaveConfig(save_steps=[5]),
steps=["train"],
)
tr = create_trial_fast_refresh(out_dir)
print(tr.tensor_names())
assert (
len(tr.tensor_names())
== 6 + 6 + 5 + 3 + 1 + 3 * strategy.num_replicas_in_sync + 2 * strategy.num_replicas_in_sync
)
# weights, grads, optimizer_variables, metrics, losses, outputs
assert len(tr.steps()) == 3
@pytest.mark.slow
def test_save_one_worker(out_dir):
strategy = train_model(
out_dir,
include_collections=None,
save_all=True,
save_config=SaveConfig(save_steps=[5]),
steps=["train"],
include_workers="one",
)
tr = create_trial_fast_refresh(out_dir)
assert len(tr.workers()) == 1
assert len(tr.steps())
assert len(tr.tensor_names(collection="weights"))
assert len(tr.tensor_names(collection="weights"))
assert len(tr.tensor(tr.tensor_names(collection="weights")[0]).workers(0)) == 1
assert len(tr.tensor_names(collection="biases"))
assert len(tr.tensor(tr.tensor_names(collection="biases")[0]).workers(0)) == 1
assert len(tr.tensor_names(collection="gradients"))
@pytest.mark.slow
def test_save_all_workers(out_dir, zcc=False):
# Skip if no GPUS
if get_available_gpus() == 0:
return
strategy = train_model(
out_dir,
include_collections=None,
save_all=True,
save_config=SaveConfig(save_steps=[5]),
steps=["train"],
include_workers="all",
)
tr = create_trial_fast_refresh(out_dir)
assert len(tr.workers()) == get_available_gpus()
assert len(tr.tensor_names(collection="weights"))
assert (
len(tr.tensor(tr.tensor_names(collection="weights")[0]).workers(0))
== strategy.num_replicas_in_sync
)
assert "conv2d/weights/conv2d/kernel:0" in tr.tensor_names(collection="weights")
assert (
len(tr.tensor("conv2d/weights/conv2d/kernel:0").workers(0)) == strategy.num_replicas_in_sync
)
assert len(tr.tensor_names(collection="biases"))
assert "conv2d/weights/conv2d/bias:0" in tr.tensor_names(collection="biases")
assert (
len(tr.tensor(tr.tensor_names(collection="biases")[0]).workers(0))
== strategy.num_replicas_in_sync
)
assert len(tr.tensor_names(collection="gradients"))
@pytest.mark.slow
def test_base_reductions(out_dir):
train_model(
out_dir,
include_collections=[
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.METRICS,
CollectionKeys.LOSSES,
],
reduction_config=ReductionConfig(norms=ALLOWED_NORMS, reductions=ALLOWED_REDUCTIONS),
steps=["train"],
)
tr = create_trial_fast_refresh(out_dir)
weight_name = tr.tensor_names(collection=CollectionKeys.WEIGHTS)[0]
try:
tr.tensor(weight_name).value(0)
assert False
except TensorUnavailableForStep:
assert tr.tensor(weight_name).reduction_values(0)
loss_name = tr.tensor_names(collection=CollectionKeys.LOSSES)[0]
assert tr.tensor(loss_name).value(0) is not None
metric_name = tr.tensor_names(collection=CollectionKeys.METRICS)[0]
assert tr.tensor(metric_name).value(0) is not None
@pytest.mark.slow
def test_collection_reductions(out_dir):
tf.reset_default_graph()
tf.keras.backend.clear_session()
hook = KerasHook(
out_dir=out_dir,
save_config=SaveConfig(save_interval=3),
include_collections=[
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.GRADIENTS,
],
)
hook.get_collection(CollectionKeys.GRADIENTS).reduction_config = ReductionConfig(norms=["l1"])
train_model(out_dir, hook=hook, steps=["train"])
tr = create_trial_fast_refresh(out_dir)
weight_name = tr.tensor_names(collection=CollectionKeys.WEIGHTS)[0]
grad_name = tr.tensor_names(collection=CollectionKeys.GRADIENTS)[0]
try:
tr.tensor(weight_name).value(0)
tr.tensor(grad_name).value(0)
assert False
except TensorUnavailableForStep:
try:
assert tr.tensor(weight_name).reduction_value(0, "l1") is not None
except ValueError:
# some tensors reduction can't be computed
pass
except TensorUnavailable:
# sometimes we might not have tensor saved if it was only being
# saved as reduction and the reduction computation failed
pass
@pytest.mark.slow
def test_training_end(out_dir):
train_model(out_dir, include_collections=[CollectionKeys.OUTPUTS], steps=["train"])
assert has_training_ended(out_dir) is True
@pytest.mark.slow
def test_collection_add(out_dir):
strategy = train_model(
out_dir,
include_collections=["relu"],
save_config=SaveConfig(save_interval=9),
create_relu_collection=True,
steps=["train"],
)
tr = create_trial_fast_refresh(out_dir)
relu_coll_tensor_names = tr.tensor_names(collection="relu")
assert len(relu_coll_tensor_names) == strategy.num_replicas_in_sync * 2
assert tr.tensor(relu_coll_tensor_names[0]).value(0) is not None
assert tr.tensor(relu_coll_tensor_names[1]).value(0) is not None
@pytest.mark.slow
def test_include_regex(out_dir):
hook = KerasHook(
out_dir=out_dir,
save_config=SaveConfig(save_interval=9),
include_collections=["custom_coll"],
include_workers="all",
)
hook.get_collection("custom_coll").include("dense")
strategy = train_model(out_dir, hook=hook, steps=["train"])
tr = create_trial_fast_refresh(out_dir)
tnames = tr.tensor_names(collection="custom_coll")
assert len(tnames) == 4 + 3 * strategy.num_replicas_in_sync
for tname in tnames:
assert tr.tensor(tname).value(0) is not None
@pytest.mark.slow
def test_clash_with_tb_callback(out_dir):
train_model(
out_dir,
save_config=SaveConfig(save_interval=9),
include_collections=[
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.GRADIENTS,
CollectionKeys.LOSSES,
CollectionKeys.METRICS,
],
steps=["train"],
add_callbacks=["tensorboard"],
)
tr = create_trial_fast_refresh(out_dir)
assert len(tr.tensor_names()) == 16
@pytest.mark.slow
def test_clash_with_custom_callback(out_dir):
strategy = train_model(
out_dir,
include_collections=[
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.OUTPUTS,
CollectionKeys.GRADIENTS,
],
save_config=SaveConfig(save_interval=9),
steps=["train"],
add_callbacks=["fetch_tensor"],
)
tr = create_trial_fast_refresh(out_dir)
assert len(tr.tensor_names()) == 6 + 6 + strategy.num_replicas_in_sync * 1 + 3
def test_one_device(out_dir):
strategy = train_model(
out_dir,
include_collections=[
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.OUTPUTS,
CollectionKeys.GRADIENTS,
],
save_config=SaveConfig(save_interval=9),
strategy=tf.distribute.OneDeviceStrategy(device="/cpu:0"),
steps=["train"],
)
assert os.path.isdir(os.path.join(out_dir, "events")) is False
| 34.720143
| 159
| 0.668241
|
b7d9ba4fc2c8f730e8618e7a3862d8ad64519f00
| 635
|
py
|
Python
|
tutorial/cytoscape/reference_chapter.py
|
blozano824/dash-docs
|
f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d
|
[
"MIT"
] | 1
|
2019-03-04T03:17:19.000Z
|
2019-03-04T03:17:19.000Z
|
tutorial/cytoscape/reference_chapter.py
|
blozano824/dash-docs
|
f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d
|
[
"MIT"
] | null | null | null |
tutorial/cytoscape/reference_chapter.py
|
blozano824/dash-docs
|
f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d
|
[
"MIT"
] | null | null | null |
import re
import dash_html_components as html
import dash_core_components as dcc
import dash_cytoscape as cyto
def component_doc(component):
trimmed_docs = re.sub(
r'- setProps.*\n',
'',
re.sub(
r'Available events: .*',
'',
component.__doc__.split('Keyword arguments:')[-1]
)
)
return html.Div([
html.H1('Cytoscape Reference'),
html.H3('Keyword Arguments'),
html.Div(className='cytoscape-reference', children=[
dcc.Markdown(trimmed_docs)
])
])
layout = html.Div([
component_doc(cyto.Cytoscape)
])
| 20.483871
| 61
| 0.584252
|
7e595524ba5ca64bee821eb014437973582d4d72
| 1,233
|
py
|
Python
|
setup.py
|
danyfdz92/stack
|
68656d56d1f983c718b3fb8e174fc0270b872334
|
[
"MIT"
] | 37
|
2015-03-03T15:48:29.000Z
|
2021-06-13T21:17:47.000Z
|
setup.py
|
danyfdz92/stack
|
68656d56d1f983c718b3fb8e174fc0270b872334
|
[
"MIT"
] | 32
|
2017-01-30T18:29:01.000Z
|
2018-02-16T22:16:00.000Z
|
setup.py
|
casmlab/stack
|
86bca591868a5c7dd9fef18e4b34113bd22df2c6
|
[
"MIT"
] | 20
|
2015-02-14T05:02:07.000Z
|
2021-07-19T17:41:15.000Z
|
#!env/bin/python
import sys
import os
import json
from werkzeug import generate_password_hash
from app.controller import Controller
from app.models import DB
basedir = os.getcwd()
db = DB()
def main():
print '\n'
print 'STACK'
print '----------'
print '\n'
print 'Welcome to the STACK setup tool. Follow the instructions below to\nsetup your first project account and initialize the configuration\nfiles for your STACK toolkit.'
print '\n'
project_name = raw_input('Enter a project account name: ')
password = raw_input('Enter a project account password: ')
description = raw_input('Enter a project account description: ')
hashed_password = generate_password_hash(password)
resp = db.create(project_name=project_name, password=password, hashed_password=hashed_password,
description=description)
if resp['status']:
print '\n'
print 'SUCCESS! You can now login to your account %s from the\n STACK front-end. Happy researching.' % project_name
else:
print '\n'
print 'Oops. Something went wrong. Please try again and make sure\n the account name you entered does not already exist.'
if __name__ == "__main__":
main()
| 30.073171
| 175
| 0.696675
|
0857af3d5baa4906d0f7964f30801e245dc8cf52
| 4,702
|
py
|
Python
|
djangoenv/lib/python3.8/site-packages/svgwrite/gradients.py
|
zawad2221/courier_delivery
|
f96c41a60943946f9f7e0837d017215aa8cd02dd
|
[
"MIT"
] | null | null | null |
djangoenv/lib/python3.8/site-packages/svgwrite/gradients.py
|
zawad2221/courier_delivery
|
f96c41a60943946f9f7e0837d017215aa8cd02dd
|
[
"MIT"
] | null | null | null |
djangoenv/lib/python3.8/site-packages/svgwrite/gradients.py
|
zawad2221/courier_delivery
|
f96c41a60943946f9f7e0837d017215aa8cd02dd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
# Author: mozman --<mozman@gmx.at>
# Purpose: gradients module
# Created: 26.10.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
"""
Gradients consist of continuously smooth color transitions along a vector
from one color to another, possibly followed by additional transitions along
the same vector to other colors. SVG provides for two types of gradients:
linear gradients and radial gradients.
"""
from svgwrite.base import BaseElement
from svgwrite.mixins import Transform, XLink
from svgwrite.utils import is_string
class _GradientStop(BaseElement):
elementname = 'stop'
def __init__(self, offset=None, color=None, opacity=None, **extra):
super(_GradientStop, self).__init__(**extra)
if offset is not None:
self['offset'] = offset
if color is not None:
self['stop-color'] = color
if opacity is not None:
self['stop-opacity'] = opacity
class _AbstractGradient(BaseElement, Transform, XLink):
transformname = 'gradientTransform'
def __init__(self, inherit=None, **extra):
super(_AbstractGradient, self).__init__(**extra)
if inherit is not None:
if is_string(inherit):
self.set_href(inherit)
else:
self.set_href(inherit.get_iri())
def get_paint_server(self, default='none'):
""" Returns the <FuncIRI> of the gradient. """
return "%s %s" % (self.get_funciri(), default)
def add_stop_color(self, offset=None, color=None, opacity=None):
""" Adds a stop-color to the gradient.
:param offset: is either a <number> (usually ranging from 0 to 1) or
a `<percentage>` (usually ranging from 0% to 100%) which indicates where
the gradient stop is placed. Represents a location along the gradient
vector. For radial gradients, it represents a percentage distance from
(fx,fy) to the edge of the outermost/largest circle.
:param color: indicates what color to use at that gradient stop
:param opacity: defines the opacity of a given gradient stop
"""
self.add(_GradientStop(offset, color, opacity, factory=self))
return self
def add_colors(self, colors, sweep=(0., 1.), opacity=None):
""" Add stop-colors from colors with linear offset distributuion
from sweep[0] to sweep[1].
i.e. colors=['white', 'red', 'blue']
'white': offset = 0.0
'red': offset = 0.5
'blue': offset = 1.0
"""
delta = (sweep[1] - sweep[0]) / (len(colors) - 1)
offset = sweep[0]
for color in colors:
self.add_stop_color(round(offset, 3), color, opacity)
offset += delta
return self
def get_xml(self):
if hasattr(self, 'href'):
self.update_id()
return super(_AbstractGradient, self).get_xml()
class LinearGradient(_AbstractGradient):
""" Linear gradients are defined by a SVG <linearGradient> element.
"""
elementname = 'linearGradient'
def __init__(self, start=None, end=None, inherit=None, **extra):
"""
:param 2-tuple start: start point of the gradient (**x1**, **y1**)
:param 2-tuple end: end point of the gradient (**x2**, **y2**)
:param inherit: gradient inherits properties from `inherit` see: **xlink:href**
"""
super(LinearGradient, self).__init__(inherit=inherit, **extra)
if start is not None:
self['x1'] = start[0]
self['y1'] = start[1]
if end is not None:
self['x2'] = end[0]
self['y2'] = end[1]
class RadialGradient(_AbstractGradient):
""" Radial gradients are defined by a SVG <radialGradient> element.
"""
elementname = 'radialGradient'
def __init__(self, center=None, r=None, focal=None, inherit=None, **extra):
"""
:param 2-tuple center: center point for the gradient (**cx**, **cy**)
:param r: radius for the gradient
:param 2-tuple focal: focal point for the radial gradient (**fx**, **fy**)
:param inherit: gradient inherits properties from `inherit` see: **xlink:href**
"""
super(RadialGradient, self).__init__(inherit=inherit, **extra)
if center is not None:
self['cx'] = center[0]
self['cy'] = center[1]
if r is not None:
self['r'] = r
if focal is not None:
self['fx'] = focal[0]
self['fy'] = focal[1]
| 36.449612
| 88
| 0.597618
|
586e84c74c3cc5d96bc4e8eb37d46e72f87bc04e
| 7,259
|
py
|
Python
|
aiida/restapi/api.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2016-09-12T10:51:00.000Z
|
2016-09-12T10:51:00.000Z
|
aiida/restapi/api.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 17
|
2020-03-11T17:04:05.000Z
|
2020-05-01T09:34:45.000Z
|
aiida/restapi/api.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Implementation of RESTful API for AiiDA based on flask and flask_restful.
Author: Snehal P. Waychal and Fernando Gargiulo @ Theos, EPFL
"""
from flask import Flask, jsonify
from flask_restful import Api
from werkzeug.exceptions import HTTPException
class App(Flask):
"""
Basic Flask App customized for this REST Api purposes
"""
def __init__(self, *args, **kwargs):
# Decide whether or not to catch the internal server exceptions (default is True)
catch_internal_server = kwargs.pop('catch_internal_server', True)
# Basic initialization
super().__init__(*args, **kwargs)
# Error handler
from aiida.restapi.common.exceptions import RestInputValidationError, \
RestValidationError, RestFeatureNotAvailable
if catch_internal_server:
@self.errorhandler(Exception)
def error_handler(error):
# pylint: disable=unused-variable
"""Error handler to return customized error messages from rest api"""
if isinstance(error, RestValidationError):
response = jsonify({'message': str(error)})
response.status_code = 400
elif isinstance(error, RestInputValidationError):
response = jsonify({'message': str(error)})
response.status_code = 404
elif isinstance(error, RestFeatureNotAvailable):
response = jsonify({'message': str(error)})
response.status_code = 501
elif isinstance(error, HTTPException) and error.code == 404:
from aiida.restapi.common.utils import list_routes
response = jsonify({
'message': 'The requested URL is not found on the server.',
'available_endpoints': list_routes()
})
response.status_code = 404
# Generic server-side error (not to make the api crash if an
# unhandled exception is raised. Caution is never enough!!)
else:
response = jsonify({'message': str(error)})
response.status_code = 500
return response
else:
pass
class AiidaApi(Api):
"""
AiiDA customized version of the flask_restful Api class
"""
def __init__(self, app=None, **kwargs):
"""
The need to have a special constructor is to include directly the
addition of resources with the parameters required to initialize the
resource classes.
:param kwargs: parameters to be passed to the resources for
configuration and PREFIX
"""
from aiida.restapi.common.config import CLI_DEFAULTS
from aiida.restapi.resources import (
ProcessNode, CalcJobNode, Computer, User, Group, Node, ServerInfo, QueryBuilder
)
self.app = app
super().__init__(app=app, prefix=kwargs['PREFIX'], catch_all_404s=True)
posting = kwargs.pop('posting', CLI_DEFAULTS['POSTING'])
self.add_resource(
ServerInfo,
'/',
'/server/',
'/server/endpoints/',
endpoint='server',
strict_slashes=False,
resource_class_kwargs=kwargs
)
if posting:
self.add_resource(
QueryBuilder,
'/querybuilder/',
endpoint='querybuilder',
strict_slashes=False,
resource_class_kwargs=kwargs,
)
## Add resources and endpoints to the api
self.add_resource(
Computer,
# supported urls
'/computers/',
'/computers/page/',
'/computers/page/<int:page>/',
'/computers/<id>/',
'/computers/projectable_properties/',
endpoint='computers',
strict_slashes=False,
resource_class_kwargs=kwargs
)
self.add_resource(
Node,
'/nodes/',
'/nodes/projectable_properties/',
'/nodes/statistics/',
'/nodes/full_types/',
'/nodes/full_types_count/',
'/nodes/download_formats/',
'/nodes/page/',
'/nodes/page/<int:page>/',
'/nodes/<id>/',
'/nodes/<id>/links/incoming/',
'/nodes/<id>/links/incoming/page/',
'/nodes/<id>/links/incoming/page/<int:page>/',
'/nodes/<id>/links/outgoing/',
'/nodes/<id>/links/outgoing/page/',
'/nodes/<id>/links/outgoing/page/<int:page>/',
'/nodes/<id>/links/tree/',
'/nodes/<id>/contents/attributes/',
'/nodes/<id>/contents/extras/',
'/nodes/<id>/contents/derived_properties/',
'/nodes/<id>/contents/comments/',
'/nodes/<id>/repo/list/',
'/nodes/<id>/repo/contents/',
'/nodes/<id>/download/',
endpoint='nodes',
strict_slashes=False,
resource_class_kwargs=kwargs
)
self.add_resource(
ProcessNode,
'/processes/projectable_properties/',
'/processes/<id>/report/',
endpoint='processes',
strict_slashes=False,
resource_class_kwargs=kwargs
)
self.add_resource(
CalcJobNode,
'/calcjobs/<id>/input_files/',
'/calcjobs/<id>/output_files/',
endpoint='calcjobs',
strict_slashes=False,
resource_class_kwargs=kwargs
)
self.add_resource(
User,
'/users/',
'/users/projectable_properties/',
'/users/page/',
'/users/page/<int:page>/',
'/users/<id>/',
endpoint='users',
strict_slashes=False,
resource_class_kwargs=kwargs
)
self.add_resource(
Group,
'/groups/',
'/groups/projectable_properties/',
'/groups/page/',
'/groups/page/<int:page>/',
'/groups/<id>/',
endpoint='groups',
strict_slashes=False,
resource_class_kwargs=kwargs
)
def handle_error(self, e):
"""
this method handles the 404 "URL not found" exception and return custom message
:param e: raised exception
:return: list of available endpoints
"""
raise e
| 33.606481
| 91
| 0.525003
|
ce3f0eed29237b9041730ce13bf6359a6741e650
| 4,461
|
py
|
Python
|
tests/test_section.py
|
damianfs/canvasapi
|
10ef96d268a0535c888d8fdd8169da31d9a66e3f
|
[
"MIT"
] | null | null | null |
tests/test_section.py
|
damianfs/canvasapi
|
10ef96d268a0535c888d8fdd8169da31d9a66e3f
|
[
"MIT"
] | null | null | null |
tests/test_section.py
|
damianfs/canvasapi
|
10ef96d268a0535c888d8fdd8169da31d9a66e3f
|
[
"MIT"
] | null | null | null |
import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.assignment import AssignmentOverride
from canvasapi.enrollment import Enrollment
from canvasapi.progress import Progress
from canvasapi.section import Section
from canvasapi.submission import GroupedSubmission, Submission
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestSection(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({"section": ["get_by_id"]}, m)
self.section = self.canvas.get_section(1)
# __str__()
def test__str__(self, m):
string = str(self.section)
self.assertIsInstance(string, str)
# get_assignment_override
def test_get_assignment_override(self, m):
register_uris({"assignment": ["override_section_alias"]}, m)
override = self.section.get_assignment_override(1)
self.assertIsInstance(override, AssignmentOverride)
self.assertEqual(override.course_section_id, self.section.id)
# get_enrollments()
def test_get_enrollments(self, m):
register_uris({"section": ["list_enrollments", "list_enrollments_2"]}, m)
enrollments = self.section.get_enrollments()
enrollment_list = [enrollment for enrollment in enrollments]
self.assertEqual(len(enrollment_list), 4)
self.assertIsInstance(enrollment_list[0], Enrollment)
def test_cross_list_section(self, m):
register_uris({"course": ["get_by_id_2"], "section": ["crosslist_section"]}, m)
section_by_id = self.section.cross_list_section(2)
self.assertIsInstance(section_by_id, Section)
course_obj = self.canvas.get_course(2)
section_by_obj = self.section.cross_list_section(course_obj)
self.assertIsInstance(section_by_obj, Section)
def test_decross_list_section(self, m):
register_uris({"section": ["decross_section"]}, m)
section = self.section.decross_list_section()
self.assertIsInstance(section, Section)
def test_edit(self, m):
register_uris({"section": ["edit"]}, m)
edit = self.section.edit()
self.assertIsInstance(edit, Section)
def test_delete(self, m):
register_uris({"section": ["delete"]}, m)
deleted_section = self.section.delete()
self.assertIsInstance(deleted_section, Section)
# get_multiple_submission()
def test_get_multiple_submissions(self, m):
register_uris({"section": ["list_multiple_submissions"]}, m)
submissions = self.section.get_multiple_submissions()
submission_list = [submission for submission in submissions]
self.assertEqual(len(submission_list), 2)
self.assertIsInstance(submission_list[0], Submission)
def test_get_multiple_submissions_grouped_true(self, m):
register_uris({"section": ["list_multiple_submissions_grouped"]}, m)
submissions = self.section.get_multiple_submissions(grouped=True)
submission_list = [submission for submission in submissions]
self.assertEqual(len(submission_list), 2)
self.assertIsInstance(submission_list[0], GroupedSubmission)
def test_get_multiple_submissions_grouped_false(self, m):
register_uris({"section": ["list_multiple_submissions"]}, m)
submissions = self.section.get_multiple_submissions(grouped=False)
submission_list = [submission for submission in submissions]
self.assertEqual(len(submission_list), 2)
self.assertIsInstance(submission_list[0], Submission)
def test_get_multiple_submissions_grouped_invalid(self, m):
with self.assertRaises(ValueError) as cm:
self.section.get_multiple_submissions(grouped="blargh")
self.assertIn("Parameter `grouped` must", cm.exception.args[0])
def test_submissions_bulk_update(self, m):
register_uris({"section": ["update_submissions"]}, m)
register_uris({"progress": ["course_progress"]}, m)
progress = self.section.submissions_bulk_update(
grade_data={"1": {"1": {"posted_grade": 97}, "2": {"posted_grade": 98}}}
)
self.assertIsInstance(progress, Progress)
self.assertTrue(progress.context_type == "Course")
progress = progress.query()
self.assertTrue(progress.context_type == "Course")
| 35.975806
| 87
| 0.703878
|
7ff19adf317246ad74838e4d3b05c2bdc4e5ce9b
| 766
|
py
|
Python
|
Source/Scripts/prepare.py
|
kimmania/SqlServer2017GraphDatabase
|
cd3b7eb5c3a445d72aff83dd5037c5b129ba4cc7
|
[
"MIT"
] | 6
|
2018-05-03T03:16:30.000Z
|
2020-05-04T22:02:24.000Z
|
Source/Scripts/prepare.py
|
kimmania/SqlServer2017GraphDatabase
|
cd3b7eb5c3a445d72aff83dd5037c5b129ba4cc7
|
[
"MIT"
] | null | null | null |
Source/Scripts/prepare.py
|
kimmania/SqlServer2017GraphDatabase
|
cd3b7eb5c3a445d72aff83dd5037c5b129ba4cc7
|
[
"MIT"
] | 6
|
2018-06-07T13:31:18.000Z
|
2021-10-09T07:37:32.000Z
|
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Column, Integer, String
import csv
metadata = MetaData()
airport = Table('Airport', metadata,
id = Column(Integer, primary_key=True)
code = Column(String)
city = Column(Integer)
state = Column(String)
country = Column(String))
carrier = Table('Carrier', metadata)
with open('AIRPORT_ID.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for (idx, row) in reader:
# Get Raw Data:
code = row['Code']
description = row['Description']
#
# Build the Insert Statement:
insert_statement = "INSERT INTO AIRPORT(ID, CODE, CITY, STATE, COUNTRY) VALUES ()".format(
| 26.413793
| 98
| 0.634465
|
2647aee711e457fe4418a222ad098d6c71b17711
| 4,383
|
py
|
Python
|
auth/settings.py
|
Splice-Technologies/splice-auth
|
41e28857409402069a15b4a28f0852fe67697780
|
[
"MIT"
] | null | null | null |
auth/settings.py
|
Splice-Technologies/splice-auth
|
41e28857409402069a15b4a28f0852fe67697780
|
[
"MIT"
] | null | null | null |
auth/settings.py
|
Splice-Technologies/splice-auth
|
41e28857409402069a15b4a28f0852fe67697780
|
[
"MIT"
] | null | null | null |
"""
Django settings for auth project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import datetime
from auth import conf
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yfmd$2gfdci$i$+d710d9+kp^%-f5-82wac6-1)1_zrc_x&1ss'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# Django Apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third Party Apps
'rest_framework',
'silk',
'drf_yasg',
# Project Apps
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
"silk.middleware.SilkyMiddleware",
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'auth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries': {
'staticfiles': 'django.templatetags.static',
}
},
},
]
WSGI_APPLICATION = 'auth.wsgi.application'
AUTH_USER_MODEL = 'users.User'
# Django Rest Framework definitions
# https://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
}
ACCESS_TOKEN_LIFETIME = datetime.timedelta(hours=1)
REFRESH_TOKEN_LIFETIME = datetime.timedelta(hours=2)
# Django Mail Server definitions
# https://docs.djangoproject.com/en/3.1/topics/email/
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = conf.mail['host']
EMAIL_PORT = conf.mail['port']
EMAIL_HOST_USER = conf.mail['user']
EMAIL_HOST_PASSWORD = conf.mail['password']
EMAIL_USE_TLS = conf.mail['tls']
EMAIL_USE_SSL = conf.mail['ssl']
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': conf.database['name'],
'USER': conf.database['user'],
'PASSWORD': conf.database['password'],
'HOST': conf.database['host'],
'PORT': conf.database['port'],
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'auth', 'static')
| 26.403614
| 91
| 0.69473
|
a215dd80b320a16ef18248a71097173060805e05
| 1,290
|
py
|
Python
|
appwatch/urls.py
|
ramza007/WatchApp
|
222ce6069ecb8dc3ba68350e521f5470d45667c1
|
[
"Apache-2.0"
] | null | null | null |
appwatch/urls.py
|
ramza007/WatchApp
|
222ce6069ecb8dc3ba68350e521f5470d45667c1
|
[
"Apache-2.0"
] | null | null | null |
appwatch/urls.py
|
ramza007/WatchApp
|
222ce6069ecb8dc3ba68350e521f5470d45667c1
|
[
"Apache-2.0"
] | null | null | null |
"""
App routes
"""
from django.conf.urls import url, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
#-----------------------------App Routes------------------------#
urlpatterns=[
url(r'^$',views.index,name = 'index'),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^update/profile/', views.create_profile, name='createProfile'),
url(r'^new/hood/$',views.create_hood, name='newHood'),
url(r'^all/hoods/',views.view_neighborhoods, name='allHoods'),
url(r'^neighborhood/(\d+)',views.hood_details, name='pickHood'),
url(r'^follow/(\d+)', views.follow, name='follow'),
url(r'^unfollow/(\d+)', views.unfollow, name='unfollow'),
url(r'^profile/', views.profile, name='profile'),
url(r'^new/business/',views.create_business, name='newBusiness'),
url(r'^business/(\d+)',views.business_details, name='business'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^comment/(\d+)', views.new_comment, name='Comment'),
# url(r'^manage/(\d+)',views.manage_image, name='manageImage'),
# url(r'^home',views.home,name='hoodNews'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 33.076923
| 81
| 0.648062
|
d1bb93e4ca908aff51903f5224c2156746e45908
| 562
|
py
|
Python
|
talos/utils/best_model.py
|
zazula/talos
|
4a2a2c1c16310a2158692808cb0a6cfe4e4be326
|
[
"MIT"
] | 1,536
|
2018-05-15T19:36:16.000Z
|
2022-03-31T18:14:11.000Z
|
talos/utils/best_model.py
|
zazula/talos
|
4a2a2c1c16310a2158692808cb0a6cfe4e4be326
|
[
"MIT"
] | 522
|
2018-05-12T18:35:06.000Z
|
2022-03-20T06:23:52.000Z
|
talos/utils/best_model.py
|
zazula/talos
|
4a2a2c1c16310a2158692808cb0a6cfe4e4be326
|
[
"MIT"
] | 304
|
2018-05-16T00:59:44.000Z
|
2022-03-17T05:23:21.000Z
|
from tensorflow.keras.models import model_from_json
def best_model(self, metric, asc):
'''Picks the best model based on a given metric and
returns the index number for the model.
NOTE: for loss 'asc' should be True'''
best = self.data.sort_values(metric, ascending=asc).iloc[0].name
return best
def activate_model(self, model_id):
'''Loads the model from the json that is stored in the Scan object'''
model = model_from_json(self.saved_models[model_id])
model.set_weights(self.saved_weights[model_id])
return model
| 23.416667
| 73
| 0.720641
|
afd04a8eaa253f977762d8062cc4d5094792f406
| 24,254
|
py
|
Python
|
openmc/cell.py
|
MauriDeb/Desarrollo_OpenMC
|
871f81af1d0bec58c0cc9873c192b321ab84414f
|
[
"MIT"
] | null | null | null |
openmc/cell.py
|
MauriDeb/Desarrollo_OpenMC
|
871f81af1d0bec58c0cc9873c192b321ab84414f
|
[
"MIT"
] | null | null | null |
openmc/cell.py
|
MauriDeb/Desarrollo_OpenMC
|
871f81af1d0bec58c0cc9873c192b321ab84414f
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from collections.abc import Iterable
from copy import deepcopy
from math import cos, sin, pi
from numbers import Real, Integral
from xml.etree import ElementTree as ET
import sys
import warnings
import numpy as np
import openmc
import openmc.checkvalue as cv
from openmc.surface import Halfspace
from openmc.region import Region, Intersection, Complement
from openmc._xml import get_text
from .mixin import IDManagerMixin
class Cell(IDManagerMixin):
"""A region of space defined as the intersection of half-space created by
quadric surfaces.
Parameters
----------
cell_id : int, optional
Unique identifier for the cell. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the cell. If not specified, the name is the empty string.
fill : openmc.Material or openmc.Universe or openmc.Lattice or None or iterable of openmc.Material, optional
Indicates what the region of space is filled with
region : openmc.Region, optional
Region of space that is assigned to the cell.
Attributes
----------
id : int
Unique identifier for the cell
name : str
Name of the cell
fill : openmc.Material or openmc.Universe or openmc.Lattice or None or iterable of openmc.Material
Indicates what the region of space is filled with. If None, the cell is
treated as a void. An iterable of materials is used to fill repeated
instances of a cell with different materials.
fill_type : {'material', 'universe', 'lattice', 'distribmat', 'void'}
Indicates what the cell is filled with.
region : openmc.Region or None
Region of space that is assigned to the cell.
rotation : Iterable of float
If the cell is filled with a universe, this array specifies the angles
in degrees about the x, y, and z axes that the filled universe should be
rotated. The rotation applied is an intrinsic rotation with specified
Tait-Bryan angles. That is to say, if the angles are :math:`(\phi,
\theta, \psi)`, then the rotation matrix applied is :math:`R_z(\psi)
R_y(\theta) R_x(\phi)` or
.. math::
\left [ \begin{array}{ccc} \cos\theta \cos\psi & -\cos\phi \sin\psi
+ \sin\phi \sin\theta \cos\psi & \sin\phi \sin\psi + \cos\phi
\sin\theta \cos\psi \\ \cos\theta \sin\psi & \cos\phi \cos\psi +
\sin\phi \sin\theta \sin\psi & -\sin\phi \cos\psi + \cos\phi
\sin\theta \sin\psi \\ -\sin\theta & \sin\phi \cos\theta & \cos\phi
\cos\theta \end{array} \right ]
rotation_matrix : numpy.ndarray
The rotation matrix defined by the angles specified in the
:attr:`Cell.rotation` property.
temperature : float or iterable of float
Temperature of the cell in Kelvin. Multiple temperatures can be given
to give each distributed cell instance a unique temperature.
translation : Iterable of float
If the cell is filled with a universe, this array specifies a vector
that is used to translate (shift) the universe.
paths : list of str
The paths traversed through the CSG tree to reach each cell
instance. This property is initialized by calling the
:meth:`Geometry.determine_paths` method.
num_instances : int
The number of instances of this cell throughout the geometry.
volume : float
Volume of the cell in cm^3. This can either be set manually or
calculated in a stochastic volume calculation and added via the
:meth:`Cell.add_volume_information` method.
"""
next_id = 1
used_ids = set()
##! Aca lo que hago es, primero poner por default a I= 1 y segundo poder poner la importancia
##! como ej: pepito = openmc.Cell(bla, bla, importance= I), osea es una forma de settear I.
def __init__(self, cell_id=None, name='', fill=None, region=None, importance= 1.0, lower_weight= None, const_upp_weight= None, const_surv= None):
# Initialize Cell class attributes
self.id = cell_id
self.name = name
self.fill = fill
self.region = region
self._rotation = None
self._rotation_matrix = None
self._temperature = None
self._importance = importance
self._lower_weight = lower_weight
self._const_upp_weight = const_upp_weight
self._const_surv = const_surv
self._translation = None
self._paths = None
self._num_instances = None
self._volume = None
self._atoms = None
def __contains__(self, point):
if self.region is None:
return True
else:
return point in self.region
def __repr__(self):
string = 'Cell\n'
string += '{: <16}=\t{}\n'.format('\tID', self.id)
string += '{: <16}=\t{}\n'.format('\tName', self.name)
if self.fill_type == 'material':
string += '{: <16}=\tMaterial {}\n'.format('\tFill', self.fill.id)
elif self.fill_type == 'void':
string += '{: <16}=\tNone\n'.format('\tFill')
elif self.fill_type == 'distribmat':
string += '{: <16}=\t{}\n'.format('\tFill', list(map(
lambda m: m if m is None else m.id, self.fill)))
else:
string += '{: <16}=\t{}\n'.format('\tFill', self.fill.id)
string += '{: <16}=\t{}\n'.format('\tRegion', self.region)
string += '{: <16}=\t{}\n'.format('\tRotation', self.rotation)
if self.fill_type == 'material':
string += '\t{0: <15}=\t{1}\n'.format('Temperature',
self.temperature)
if self.fill_type == 'material':
string += '\t{0: <15}=\t{1}\n'.format('Importance',
self.importance)
if self.fill_type == 'material':
string += '\t{0: <15}=\t{1}\n'.format('lower_weight', self.lower_weight)
string += '\t{0: <15}=\t{1}\n'.format('const_upp_weight', self.const_upp_weight)
string += '\t{0: <15}=\t{1}\n'.format('const_surv', self.const_surv)
string += '{: <16}=\t{}\n'.format('\tTranslation', self.translation)
return string
@property
def name(self):
return self._name
@property
def fill(self):
return self._fill
@property
def fill_type(self):
if isinstance(self.fill, openmc.Material):
return 'material'
elif isinstance(self.fill, openmc.Universe):
return 'universe'
elif isinstance(self.fill, openmc.Lattice):
return 'lattice'
elif isinstance(self.fill, Iterable):
return 'distribmat'
else:
return 'void'
@property
def region(self):
return self._region
@property
def rotation(self):
return self._rotation
@property
def rotation_matrix(self):
return self._rotation_matrix
@property
def temperature(self):
return self._temperature
##! Este es el getter de la importancia, es decir que pepito.importance
##! me devuelve la importancia de la celda.
@property
def importance(self):
return self._importance
@property
def lower_weight(self):
return self._lower_weight
@property
def const_upp_weight(self):
return self._const_upp_weight
@property
def const_surv(self):
return self._const_surv
@property
def translation(self):
return self._translation
@property
def volume(self):
return self._volume
@property
def paths(self):
if self._paths is None:
raise ValueError('Cell instance paths have not been determined. '
'Call the Geometry.determine_paths() method.')
return self._paths
@property
def bounding_box(self):
if self.region is not None:
return self.region.bounding_box
else:
return (np.array([-np.inf, -np.inf, -np.inf]),
np.array([np.inf, np.inf, np.inf]))
@property
def num_instances(self):
if self._num_instances is None:
raise ValueError(
'Number of cell instances have not been determined. Call the '
'Geometry.determine_paths() method.')
return self._num_instances
@name.setter
def name(self, name):
if name is not None:
cv.check_type('cell name', name, str)
self._name = name
else:
self._name = ''
@fill.setter
def fill(self, fill):
if fill is not None:
if isinstance(fill, Iterable):
for i, f in enumerate(fill):
if f is not None:
cv.check_type('cell.fill[i]', f, openmc.Material)
elif not isinstance(fill, (openmc.Material, openmc.Lattice,
openmc.Universe)):
msg = 'Unable to set Cell ID="{0}" to use a non-Material or ' \
'Universe fill "{1}"'.format(self._id, fill)
raise ValueError(msg)
self._fill = fill
@rotation.setter
def rotation(self, rotation):
cv.check_type('cell rotation', rotation, Iterable, Real)
cv.check_length('cell rotation', rotation, 3)
self._rotation = np.asarray(rotation)
# Save rotation matrix -- the reason we do this instead of having it be
# automatically calculated when the rotation_matrix property is accessed
# is so that plotting on a rotated geometry can be done faster.
phi, theta, psi = self.rotation*(-pi/180.)
c3, s3 = cos(phi), sin(phi)
c2, s2 = cos(theta), sin(theta)
c1, s1 = cos(psi), sin(psi)
self._rotation_matrix = np.array([
[c1*c2, c1*s2*s3 - c3*s1, s1*s3 + c1*c3*s2],
[c2*s1, c1*c3 + s1*s2*s3, c3*s1*s2 - c1*s3],
[-s2, c2*s3, c2*c3]])
@translation.setter
def translation(self, translation):
cv.check_type('cell translation', translation, Iterable, Real)
cv.check_length('cell translation', translation, 3)
self._translation = np.asarray(translation)
@temperature.setter
def temperature(self, temperature):
# Make sure temperatures are positive
cv.check_type('cell temperature', temperature, (Iterable, Real))
if isinstance(temperature, Iterable):
cv.check_type('cell temperature', temperature, Iterable, Real)
for T in temperature:
cv.check_greater_than('cell temperature', T, 0.0, True)
else:
cv.check_greater_than('cell temperature', temperature, 0.0, True)
# If this cell is filled with a universe or lattice, propagate
# temperatures to all cells contained. Otherwise, simply assign it.
if self.fill_type in ('universe', 'lattice'):
for c in self.get_all_cells().values():
if c.fill_type == 'material':
c._temperature = temperature
else:
self._temperature = temperature
##! Aca como se ve lo que hago es crear el setter viendo si es positivo I primero.
##! Con esto puedo setear la importancia de una celda o cambiar una preexistente
##! haciendo pepito.importance= I.
@importance.setter
def importance(self, importance):
# Make sure importances are positive
cv.check_type('cell importance', importance, Real)
cv.check_greater_than('cell importance', importance, 0.0, True)
self._importance = importance
@lower_weight.setter
def lower_weight(self, lower_weight):
# Make sure lower_weights are positive
cv.check_type('cell lower_weight', lower_weight, Real)
cv.check_greater_than('cell lower_weight', lower_weight, 0.0, True)
self._lower_weight = lower_weight
@const_upp_weight.setter
def const_upp_weight(self, const_upp_weight):
# Make sure lower_weights are positive
cv.check_type('cell const_upp_weight', const_upp_weight, Real)
cv.check_greater_than('cell const_upp_weight', const_upp_weight, 0.0, True)
self._const_upp_weight = const_upp_weight
@const_surv.setter
def const_surv(self, const_surv):
# Make sure lower_weights are positive
cv.check_type('cell const_surv', const_surv, Real)
cv.check_greater_than('cell const_surv', const_surv, 0.0, True)
self._const_surv = const_surv
@region.setter
def region(self, region):
if region is not None:
cv.check_type('cell region', region, Region)
self._region = region
@volume.setter
def volume(self, volume):
if volume is not None:
cv.check_type('cell volume', volume, Real)
self._volume = volume
def add_volume_information(self, volume_calc):
"""Add volume information to a cell.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'cell':
if self.id in volume_calc.volumes:
self._volume = volume_calc.volumes[self.id].n
self._atoms = volume_calc.atoms[self.id]
else:
raise ValueError('No volume information found for this cell.')
else:
raise ValueError('No volume information found for this cell.')
def get_nuclides(self):
"""Returns all nuclides in the cell
Returns
-------
nuclides : list of str
List of nuclide names
"""
return self.fill.get_nuclides() if self.fill_type != 'void' else []
def get_nuclide_densities(self):
"""Return all nuclides contained in the cell and their densities
Returns
-------
nuclides : collections.OrderedDict
Dictionary whose keys are nuclide names and values are 2-tuples of
(nuclide, density)
"""
nuclides = OrderedDict()
if self.fill_type == 'material':
nuclides.update(self.fill.get_nuclide_densities())
elif self.fill_type == 'void':
pass
else:
if self._atoms is not None:
volume = self.volume
for name, atoms in self._atoms.items():
nuclide = openmc.Nuclide(name)
density = 1.0e-24 * atoms.n/volume # density in atoms/b-cm
nuclides[name] = (nuclide, density)
else:
raise RuntimeError(
'Volume information is needed to calculate microscopic cross '
'sections for cell {}. This can be done by running a '
'stochastic volume calculation via the '
'openmc.VolumeCalculation object'.format(self.id))
return nuclides
def get_all_cells(self):
"""Return all cells that are contained within this one if it is filled with a
universe or lattice
Returns
-------
cells : collections.orderedDict
Dictionary whose keys are cell IDs and values are :class:`Cell`
instances
"""
cells = OrderedDict()
if self.fill_type in ('universe', 'lattice'):
cells.update(self.fill.get_all_cells())
return cells
def get_all_materials(self):
"""Return all materials that are contained within the cell
Returns
-------
materials : collections.OrderedDict
Dictionary whose keys are material IDs and values are
:class:`Material` instances
"""
materials = OrderedDict()
if self.fill_type == 'material':
materials[self.fill.id] = self.fill
elif self.fill_type == 'distribmat':
for m in self.fill:
if m is not None:
materials[m.id] = m
else:
# Append all Cells in each Cell in the Universe to the dictionary
cells = self.get_all_cells()
for cell in cells.values():
materials.update(cell.get_all_materials())
return materials
def get_all_universes(self):
"""Return all universes that are contained within this one if any of
its cells are filled with a universe or lattice.
Returns
-------
universes : collections.OrderedDict
Dictionary whose keys are universe IDs and values are
:class:`Universe` instances
"""
universes = OrderedDict()
if self.fill_type == 'universe':
universes[self.fill.id] = self.fill
universes.update(self.fill.get_all_universes())
elif self.fill_type == 'lattice':
universes.update(self.fill.get_all_universes())
return universes
def clone(self, memo=None):
"""Create a copy of this cell with a new unique ID, and clones
the cell's region and fill.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Cell
The clone of this cell
"""
if memo is None:
memo = {}
# If no nemoize'd clone exists, instantiate one
if self not in memo:
# Temporarily remove paths
paths = self._paths
self._paths = None
clone = deepcopy(self)
clone.id = None
clone._num_instances = None
# Restore paths on original instance
self._paths = paths
if self.region is not None:
clone.region = self.region.clone(memo)
if self.fill is not None:
if self.fill_type == 'distribmat':
clone.fill = [fill.clone(memo) if fill is not None else None
for fill in self.fill]
else:
clone.fill = self.fill.clone(memo)
# Memoize the clone
memo[self] = clone
return memo[self]
def create_xml_subelement(self, xml_element):
element = ET.Element("cell")
element.set("id", str(self.id))
if len(self._name) > 0:
element.set("name", str(self.name))
if self.fill_type == 'void':
element.set("material", "void")
elif self.fill_type == 'material':
element.set("material", str(self.fill.id))
elif self.fill_type == 'distribmat':
element.set("material", ' '.join(['void' if m is None else str(m.id)
for m in self.fill]))
elif self.fill_type in ('universe', 'lattice'):
element.set("fill", str(self.fill.id))
self.fill.create_xml_subelement(xml_element)
if self.region is not None:
# Set the region attribute with the region specification
region = str(self.region)
if region.startswith('('):
region = region[1:-1]
if len(region) > 0:
element.set("region", region)
# Only surfaces that appear in a region are added to the geometry
# file, so the appropriate check is performed here. First we create
# a function which is called recursively to navigate through the CSG
# tree. When it reaches a leaf (a Halfspace), it creates a <surface>
# element for the corresponding surface if none has been created
# thus far.
def create_surface_elements(node, element):
if isinstance(node, Halfspace):
path = "./surface[@id='{}']".format(node.surface.id)
if xml_element.find(path) is None:
xml_element.append(node.surface.to_xml_element())
elif isinstance(node, Complement):
create_surface_elements(node.node, element)
else:
for subnode in node:
create_surface_elements(subnode, element)
# Call the recursive function from the top node
create_surface_elements(self.region, xml_element)
if self.temperature is not None:
if isinstance(self.temperature, Iterable):
element.set("temperature", ' '.join(
str(t) for t in self.temperature))
else:
element.set("temperature", str(self.temperature))
if self.importance is not None:
element.set("importance", str(self.importance))
if self.lower_weight is not None:
element.set("lower_weight", str(self.lower_weight))
if self.const_upp_weight is not None:
element.set("const_upp_weight", str(self.const_upp_weight))
if self.const_surv is not None:
element.set("const_surv", str(self.const_surv))
if self.translation is not None:
element.set("translation", ' '.join(map(str, self.translation)))
if self.rotation is not None:
element.set("rotation", ' '.join(map(str, self.rotation)))
return element
@classmethod
def from_xml_element(cls, elem, surfaces, materials, get_universe):
"""Generate cell from XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
`<cell>` element
surfaces : dict
Dictionary mapping surface IDs to :class:`openmc.Surface` instances
materials : dict
Dictionary mapping material IDs to :class:`openmc.Material`
instances (defined in :math:`openmc.Geometry.from_xml`)
get_universe : function
Function returning universe (defined in
:meth:`openmc.Geometry.from_xml`)
Returns
-------
Cell
Cell instance
"""
cell_id = int(get_text(elem, 'id'))
name = get_text(elem, 'name')
c = cls(cell_id, name)
# Assign material/distributed materials or fill
mat_text = get_text(elem, 'material')
if mat_text is not None:
mat_ids = mat_text.split()
if len(mat_ids) > 1:
c.fill = [materials[i] for i in mat_ids]
else:
c.fill = materials[mat_ids[0]]
else:
fill_id = int(get_text(elem, 'fill'))
c.fill = get_universe(fill_id)
# Assign region
region = get_text(elem, 'region')
if region is not None:
c.region = Region.from_expression(region, surfaces)
# Check for other attributes
t = get_text(elem, 'temperature')
if t is not None:
if ' ' in t:
c.temperature = [float(t_i) for t_i in t.split()]
else:
c.temperature = float(t)
for key in ('temperature', 'rotation', 'translation'):
value = get_text(elem, key)
if value is not None:
setattr(c, key, [float(x) for x in value.split()])
imp = get_text(elem, 'importance')
if imp is not None:
c.importance = float(imp)
low_weight = get_text(elem, 'lower_weight')
if low_weight is not None:
c.lower_weight = float(low_weight)
const_upp_w = get_text(elem, 'const_upp_weight')
if const_upp_w is not None:
c.const_upp_weight = float(const_upp_w)
const_s = get_text(elem, 'const_surv')
if const_s is not None:
c.const_surv = float(const_s)
# Add this cell to appropriate universe
univ_id = int(get_text(elem, 'universe', 0))
get_universe(univ_id).add_cell(c)
return c
| 35.407299
| 149
| 0.587408
|
a2b601840dbb2dfdfcd22a4c428e3c01643f8bf5
| 1,757
|
py
|
Python
|
savu/test/jenkins/plugin_tests/filter_tests/ccpi_denoising_gpu_test.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | null | null | null |
savu/test/jenkins/plugin_tests/filter_tests/ccpi_denoising_gpu_test.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | null | null | null |
savu/test/jenkins/plugin_tests/filter_tests/ccpi_denoising_gpu_test.py
|
elainehoml/Savu
|
e4772704606f71d6803d832084e10faa585e7358
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: plugins_test
:platform: Unix
:synopsis: unittest test classes for ccpi denoising plugins (GPU)
.. moduleauthor:: Daniil Kazantsev <scientificsoftware@diamond.ac.uk>
"""
import unittest
import savu.test.test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
import savu.test.base_checkpoint_test
import tempfile
import os
class CcpiDenoisingGpuTest(unittest.TestCase):
def setUp(self):
self.data_file = 'tomo_standard.nxs'
self.experiment = 'tomo'
def test_ccpidenoising2d_gpu(self):
process_list = 'filters/ccpi/ccpi_denoising_gpu_test.nxs'
options = tu.initialise_options(self.data_file, self.experiment, process_list)
run_protected_plugin_runner(options)
tu.cleanup(options)
def test_ccpidenoising3d_gpu(self):
process_list = 'filters/ccpi/ccpi_denoising_gpu3d_test.nxs'
options = tu.initialise_options(self.data_file, self.experiment, process_list)
run_protected_plugin_runner(options)
tu.cleanup(options)
if __name__ == "__main__":
unittest.main()
| 32.537037
| 86
| 0.742743
|
cee790bbdba0685feaf3409dcea6afa92696763c
| 4,241
|
py
|
Python
|
ExamplesElementaryOperations/ExamplesOpNoState.py
|
zatricion/Streams
|
d2f688e230b4cb325d5f76886a7499d132591bd4
|
[
"MIT"
] | null | null | null |
ExamplesElementaryOperations/ExamplesOpNoState.py
|
zatricion/Streams
|
d2f688e230b4cb325d5f76886a7499d132591bd4
|
[
"MIT"
] | null | null | null |
ExamplesElementaryOperations/ExamplesOpNoState.py
|
zatricion/Streams
|
d2f688e230b4cb325d5f76886a7499d132591bd4
|
[
"MIT"
] | null | null | null |
"""This module contains examples of the op() function
where:
op(f,x) returns a stream where x is a stream, and f
is an operator on lists, i.e., f is a function from
a list to a list. These lists are of lists of arbitrary
objects other than streams and agents.
Function f must be stateless, i.e., for any lists u, v:
f(u.extend(v)) = f(u).extend(f(v))
(Stateful functions are given in OpStateful.py with
examples in ExamplesOpWithState.py.)
Let f be a stateless operator on lists and let x be a stream.
If at some point, the value of stream x is a list u then at
that point, the value of stream op(f,x) is the list f(u).
If at a later point, the value of stream x is the list:
u.extend(v) then, at that point the value of stream op(f,x)
is f(u).extend(f(v)).
As a specific example, consider the following f():
def f(lst): return [w * w for w in lst]
If at some point in time, the value of x is [3, 7],
then at that point the value of op(f,x) is f([3, 7])
or [9, 49]. If at a later point, the value of x is
[3, 7, 0, 11, 5] then the value of op(f,x) at that point
is f([3, 7, 0, 11, 5]) or [9, 49, 0, 121, 25].
"""
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from Agent import *
from ListOperators import *
from PrintFunctions import print_streams_recent
def example_1():
print "example_1"
print "op(f, x): f is a function from a list to a list"
print "x is a stream \n"
# FUNCTIONS FROM LIST TO LIST
# This example uses the following list operators:
# functions from a list to a list.
# f, g, h, r
# Example A: function using list comprehension
def f(lst): return [w*w for w in lst]
# Example B: function using filter
threshold = 6
def predicate(w):
return w > threshold
def g(lst):
return filter(predicate, lst)
# Example C: function using map
# Raise each element of the list to the n-th power.
n = 3
def power(w):
return w**n
def h(lst):
return map(power, lst)
# Example D: function using another list comprehension
# Discard any element of x that is not a
# multiple of a parameter n, and divide the
# elements that are multiples of n by n.
n = 3
def r(lst):
result = []
for w in lst:
if w%n == 0: result.append(w/n)
return result
# EXAMPLES OF OPERATIONS ON STREAMS
# The input stream for these examples
x = Stream('x')
print 'x is the input stream.'
print 'a is a stream consisting of the squares of the input'
print 'b is the stream consisting of values that exceed 6'
print 'c is the stream consisting of the third powers of the input'
print 'd is the stream consisting of values that are multiples of 3 divided by 3'
print 'newa is the same as a. It is defined in a more succinct fashion.'
print 'newb has squares that exceed 6.'
print ''
# The output streams a, b, c, d obtained by
# applying the list operators f, g, h, r to
# stream x.
a = op(f, x)
b = op(g, x)
c = op(h, x)
d = op(r, x)
# You can also define a function only on streams.
# You can do this using functools in Python or
# by simple encapsulation as shown below.
def F(x): return op(f,x)
def G(x): return op(g,x)
newa = F(x)
newb = G(F(x))
# The advantage is that F is a function only
# of streams. So, function composition looks cleaner
# as in G(F(x))
# Name the output streams to label the output
# so that reading the output is easier.
a.set_name('a')
newa.set_name('newa')
b.set_name('b')
newb.set_name('newb')
c.set_name('c')
d.set_name('d')
# At this point x is the empty stream:
# its value is []
x.extend([3, 7])
# Now the value of x is [3, 7]
print "FIRST STEP"
print_streams_recent([x, a, b, c, d, newa, newb])
print ""
x.extend([0, 11, 15])
# Now the value of x is [3, 7, 0, 11, 15]
print "SECOND STEP"
print_streams_recent([x, a, b, c, d, newa, newb])
def main():
example_1()
if __name__ == '__main__':
main()
| 29.451389
| 85
| 0.632634
|
2c95d93eb33809c644922fea38b2a1e599819c52
| 26,424
|
py
|
Python
|
scanpy/plotting/tools/scatterplots.py
|
Puriney/scanpy
|
83afc9d5ac9043eacb1deecf14445a7a043be447
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/plotting/tools/scatterplots.py
|
Puriney/scanpy
|
83afc9d5ac9043eacb1deecf14445a7a043be447
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/plotting/tools/scatterplots.py
|
Puriney/scanpy
|
83afc9d5ac9043eacb1deecf14445a7a043be447
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T10:08:45.000Z
|
2019-12-09T10:08:45.000Z
|
from matplotlib import pyplot as pl
from pandas.api.types import is_categorical_dtype
import numpy as np
from matplotlib import rcParams
from matplotlib.colors import is_color_like
from .. import utils
from ...utils import sanitize_anndata, doc_params
from ... import settings
from ..docs import doc_adata_color_etc, doc_edges_arrows, doc_scatter_bulk, doc_show_save_ax
from ... import logging as logg
@doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_bulk, show_save_ax=doc_show_save_ax)
def umap(adata, **kwargs):
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a `matplotlib.Axis` or a list of it.
"""
return plot_scatter(adata, basis='umap', **kwargs)
@doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_bulk, show_save_ax=doc_show_save_ax)
def tsne(adata, **kwargs):
"""\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a `matplotlib.Axis` or a list of it.
"""
return plot_scatter(adata, basis='tsne', **kwargs)
@doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_bulk, show_save_ax=doc_show_save_ax)
def phate(adata, **kwargs):
"""\
Scatter plot in PHATE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False`, a list of `matplotlib.Axis` objects. Every second element
corresponds to the 'right margin' drawing area for color bars and legends.
Examples
--------
>>> import scanpy.api as sc
>>> import phate
>>> data, branches = phate.tree.gen_dla(n_dim=100,
n_branch=20,
branch_length=100)
>>> data.shape
(2000, 100)
>>> adata = sc.AnnData(data)
>>> adata.obs['branches'] = branches
>>> sc.tl.phate(adata, k=5, a=20, t=150)
>>> adata.obsm['X_phate'].shape
(2000, 2)
>>> sc.pl.phate(adata,
color='branches',
color_map='tab20')
"""
return plot_scatter(adata, basis='phate', **kwargs)
@doc_params(adata_color_etc=doc_adata_color_etc, scatter_bulk=doc_scatter_bulk, show_save_ax=doc_show_save_ax)
def diffmap(adata, **kwargs):
"""\
Scatter plot in Diffusion Map basis.
Parameters
----------
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a `matplotlib.Axis` or a list of it.
"""
return plot_scatter(adata, basis='diffmap', **kwargs)
@doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_bulk, show_save_ax=doc_show_save_ax)
def draw_graph(adata, layout=None, **kwargs):
"""\
Scatter plot in graph-drawing basis.
Parameters
----------
{adata_color_etc}
layout : {{'fa', 'fr', 'drl', ...}}, optional (default: last computed)
One of the `draw_graph` layouts, see
:func:`~scanpy.api.tl.draw_graph`. By default, the last computed layout
is used.
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a `matplotlib.Axis` or a list of it.
"""
if layout is None:
layout = str(adata.uns['draw_graph']['params']['layout'])
basis = 'draw_graph_' + layout
if 'X_' + basis not in adata.obsm_keys():
raise ValueError('Did not find {} in adata.obs. Did you compute layout {}?'
.format('draw_graph_' + layout, layout))
return plot_scatter(adata, basis=basis, **kwargs)
@doc_params(adata_color_etc=doc_adata_color_etc, scatter_bulk=doc_scatter_bulk, show_save_ax=doc_show_save_ax)
def pca(adata, **kwargs):
"""\
Scatter plot in PCA coordinates.
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a `matplotlib.Axis` or a list of it.
"""
return plot_scatter(adata, basis='pca', **kwargs)
def plot_scatter(adata,
color=None,
use_raw=None,
sort_order=True,
edges=False,
edges_width=0.1,
edges_color='grey',
arrows=False,
arrows_kwds=None,
basis=None,
groups=None,
components=None,
projection='2d',
color_map=None,
palette=None,
size=None,
frameon=None,
legend_fontsize=None,
legend_fontweight='bold',
legend_loc='right margin',
ncols=4,
hspace=0.25,
wspace=None,
title=None,
show=None,
save=None,
ax=None, return_fig=None, **kwargs):
sanitize_anndata(adata)
if color_map is not None:
kwargs['cmap'] = color_map
if size is not None:
kwargs['s'] = size
if 'edgecolor' not in kwargs:
# by default turn off edge color. Otherwise, for
# very small sizes the edge will not reduce its size
# (https://github.com/theislab/scanpy/issues/293)
kwargs['edgecolor'] = 'none'
if projection == '3d':
from mpl_toolkits.mplot3d import Axes3D
args_3d = {'projection': '3d'}
else:
args_3d = {}
if use_raw is None:
# check if adata.raw is set
if adata.raw is None:
use_raw = False
else:
use_raw = True
if wspace is None:
# try to set a wspace that is not too large or too small given the
# current figure size
wspace = 0.75 / rcParams['figure.figsize'][0] + 0.02
if adata.raw is None and use_raw is True:
raise ValueError("`use_raw` is set to True but annData object does not have raw. "
"Please check.")
# turn color into a python list
color = [color] if isinstance(color, str) or color is None else list(color)
if title is not None:
# turn title into a python list if not None
title = [title] if isinstance(title, str) else list(title)
####
# get the points position and the components list (only if components is not 'None)
data_points, components_list = _get_data_points(adata, basis, projection, components)
###
# setup layout. Most of the code is for the case when multiple plots are required
# 'color' is a list of names that want to be plotted. Eg. ['Gene1', 'louvain', 'Gene2'].
# component_list is a list of components [[0,1], [1,2]]
if (isinstance(color, list) and len(color) > 1) or len(components_list) > 1:
if ax is not None:
raise ValueError("When plotting multiple panels (each for a given value of 'color' "
"a given ax can not be used")
# change from None to empty list
if isinstance(color, str) or color is None:
color = [color]
if len(components_list) == 0:
components_list = [None]
multi_panel = True
# each plot needs to be its own panel
from matplotlib import gridspec
# set up the figure
num_panels = len(color) * len(components_list)
n_panels_x = min(ncols, num_panels)
n_panels_y = np.ceil(num_panels / n_panels_x).astype(int)
# each panel will have the size of rcParams['figure.figsize']
fig = pl.figure(figsize=(n_panels_x * rcParams['figure.figsize'][0] * (1 + wspace),
n_panels_y * rcParams['figure.figsize'][1]))
left = 0.2 / n_panels_x
bottom = 0.13 / n_panels_y
gs = gridspec.GridSpec(nrows=n_panels_y,
ncols=n_panels_x,
left=left,
right=1-(n_panels_x-1)*left-0.01/n_panels_x,
bottom=bottom,
top=1-(n_panels_y-1)*bottom-0.1/n_panels_y,
hspace=hspace,
wspace=wspace)
else:
if len(components_list) == 0:
components_list = [None]
multi_panel = False
if ax is None:
fig = pl.figure()
ax = fig.add_subplot(111, **args_3d)
###
# make the plots
axs = []
import itertools
idx_components = range(len(components_list))
# use itertools.product to make a plot for each color and for each component
# For example if color=[gene1, gene2] and components=['1,2, '2,3'].
# The plots are: [color=gene1, components=[1,2], color=gene1, components=[2,3],
# color=gene2, components = [1, 2], color=gene2, components=[2,3]]
for count, (value_to_plot, component_idx) in enumerate(itertools.product(color, idx_components)):
color_vector, categorical = _get_color_values(adata, value_to_plot,
groups=groups, palette=palette,
use_raw=use_raw)
# check if higher value points should be plot on top
if sort_order is True and value_to_plot is not None and categorical is False:
order = np.argsort(color_vector)
color_vector = color_vector[order]
_data_points = data_points[component_idx][order, :]
else:
_data_points = data_points[component_idx]
# if plotting multiple panels, get the ax from the grid spec
# else use the ax value (either user given or created previously)
if multi_panel is True:
ax = pl.subplot(gs[count], **args_3d)
axs.append(ax)
if not (settings._frameon if frameon is None else frameon):
ax.axis('off')
if title is None:
if value_to_plot is not None:
ax.set_title(value_to_plot)
else:
ax.set_title('')
else:
try:
ax.set_title(title[count])
except IndexError:
logg.warn("The title list is shorter than the number of panels. Using 'color' value instead for"
"some plots.")
ax.set_title(value_to_plot)
if 's' not in kwargs:
kwargs['s'] = 120000 / _data_points.shape[0]
# make the scatter plot
if projection == '3d':
cax= ax.scatter(_data_points[:, 0], _data_points[:, 1], _data_points[:, 2],
marker=".", c=color_vector, rasterized=settings._vector_friendly,
**kwargs)
else:
cax= ax.scatter(_data_points[:, 0], _data_points[:, 1],
marker=".", c=color_vector, rasterized=settings._vector_friendly,
**kwargs)
# remove y and x ticks
ax.set_yticks([])
ax.set_xticks([])
if projection == '3d':
ax.set_zticks([])
# set default axis_labels
name = _basis2name(basis)
if components is not None:
axis_labels = [name + str(x + 1) for x in components_list[component_idx]]
elif projection == '3d':
axis_labels = [name + str(x + 1) for x in range(3)]
else:
axis_labels = [name + str(x + 1) for x in range(2)]
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
if projection == '3d':
# shift the label closer to the axis
ax.set_zlabel(axis_labels[2], labelpad=-7)
ax.autoscale_view()
if edges:
utils.plot_edges(ax, adata, basis, edges_width, edges_color)
if arrows:
utils.plot_arrows(ax, adata, basis, arrows_kwds)
if value_to_plot is None:
# if only dots were plotted without an associated value
# there is not need to plot a legend or a colorbar
continue
_add_legend_or_colorbar(adata, ax, cax, categorical, value_to_plot, legend_loc,
_data_points, legend_fontweight, legend_fontsize, groups, multi_panel)
if return_fig is True:
return fig
axs = axs if multi_panel else ax
utils.savefig_or_show(basis, show=show, save=save)
if show is False:
return axs
def _get_data_points(adata, basis, projection, components):
"""
Returns the data points corresponding to the selected basis, projection and/or components.
Because multiple components are given (eg components=['1,2', '2,3'] the
returned data are lists, containing each of the components. When only one component is plotted
the list length is 1.
Returns
-------
`tuple` of:
data_points : `list`. Each list is a numpy array containing the data points
components : `list` The cleaned list of components. Eg. [[0,1]] or [[0,1], [1,2]]
for components = [1,2] and components=['1,2', '2,3'] respectively
"""
n_dims = 2
if projection == '3d':
# check if the data has a third dimension
if adata.obsm['X_' + basis].shape[1] == 2:
if settings._low_resolution_warning:
logg.warn('Selected projections is "3d" but only two dimensions '
'are available. Only these two dimensions will be plotted')
else:
n_dims = 3
if components == 'all':
components = ['{},{}'.format(*((i, i+1) if i % 2 == 1 else (i+1, i)))
for i in range(1, adata.obsm['X_{}'.format(basis)].shape[1])]
components_list = []
offset = 0
if basis == 'diffmap': offset = 1
if components is not None:
# components have different formats, either a list with integers, a string
# or a list of strings.
if isinstance(components, str):
# eg: components='1,2'
components_list.append([int(x.strip()) - 1 + offset for x in components.split(',')])
elif isinstance(components, list):
if isinstance(components[0], int):
# components=[1,2]
components_list.append([int(x) - 1 + offset for x in components])
else:
# in this case, the components are str
# eg: components=['1,2'] or components=['1,2', '2,3]
# More than one component can be given and is stored
# as a new item of components_list
for comp in components:
components_list.append([int(x.strip()) - 1 + offset for x in comp.split(',')])
else:
raise ValueError("Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`")
# check if the components are present in the data
try:
data_points = []
for comp in components_list:
data_points.append(adata.obsm['X_' + basis][:, comp])
except:
raise ValueError("Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`")
if basis == 'diffmap':
# remove the offset added in the case of diffmap, such that
# plot_scatter can print the labels correctly.
components_list = [[number-1 for number in comp] for comp in components_list]
else:
data_points = [adata.obsm['X_' + basis][:, offset:offset+n_dims]]
components_list = []
return data_points, components_list
def _add_legend_or_colorbar(adata, ax, cax, categorical, value_to_plot, legend_loc,
scatter_array, legend_fontweight, legend_fontsize,
groups, multi_panel):
"""
Adds a color bar or a legend to the given ax. A legend is added when the
data is categorical and a color bar is added when a continuous value was used.
"""
# add legends or colorbars
if categorical is True:
# add legend to figure
categories = list(adata.obs[value_to_plot].cat.categories)
colors = adata.uns[value_to_plot + '_colors']
if multi_panel is True:
# Shrink current axis by 10% to fit legend and match
# size of plots that are not categorical
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.91, box.height])
if groups is not None:
# only label groups with the respective color
colors = [colors[categories.index(x)] for x in groups]
categories = groups
if legend_loc == 'right margin':
for idx, label in enumerate(categories):
color = colors[idx]
# use empty scatter to set labels
ax.scatter([], [], c=color, label=label)
ax.legend(
frameon=False, loc='center left',
bbox_to_anchor=(1, 0.5),
ncol=(1 if len(categories) <= 14
else 2 if len(categories) <= 30 else 3),
fontsize=legend_fontsize)
if legend_loc == 'on data':
# identify centroids to put labels
all_pos = np.zeros((len(categories), 2))
for ilabel, label in enumerate(categories):
_scatter = scatter_array[adata.obs[value_to_plot] == label, :]
x_pos, y_pos = np.median(_scatter, axis=0)
ax.text(x_pos, y_pos, label,
weight=legend_fontweight,
verticalalignment='center',
horizontalalignment='center',
fontsize=legend_fontsize)
all_pos[ilabel] = [x_pos, y_pos]
# this is temporary storage for access by other tools
utils._tmp_cluster_pos = all_pos
else:
# add colorbar to figure
pl.colorbar(cax, ax=ax, pad=0.01, fraction=0.08, aspect=30)
def _set_colors_for_categorical_obs(adata, value_to_plot, palette):
"""
Sets the adata.uns[value_to_plot + '_colors'] according to the given palette
Parameters
----------
adata : annData object
value_to_plot : name of a valid categorical observation
palette : Palette should be either a valid `matplotlib.pyplot.colormaps()` string,
a list of colors (in a format that can be understood by matplotlib,
eg. RGB, RGBS, hex, or a cycler object with key='color'
Returns
-------
None
"""
from matplotlib.colors import to_hex
from cycler import Cycler, cycler
categories = adata.obs[value_to_plot].cat.categories
# check is palette is a valid matplotlib colormap
if isinstance(palette, str) and palette in pl.colormaps():
# this creates a palette from a colormap. E.g. 'Accent, Dark2, tab20'
cmap = pl.get_cmap(palette)
colors_list = [to_hex(x) for x in cmap(np.linspace(0, 1, len(categories)))]
else:
# check if palette is a list and convert it to a cycler, thus
# it doesnt matter if the list is shorter than the categories length:
if isinstance(palette, list):
if len(palette) < len(categories):
logg.warn("Length of palette colors is smaller than the number of "
"categories (palette length: {}, categories length: {}. "
"Some categories will have the same color."
.format(len(palette), len(categories)))
# check that colors are valid
_color_list = []
for color in palette:
if not is_color_like(color):
# check if the color is a valid R color and translate it
# to a valid hex color value
if color in utils.additional_colors:
color = utils.additional_colors[color]
else:
raise ValueError("The following color value of the given palette is not valid: {}".format(color))
_color_list.append(color)
palette = cycler(color=_color_list)
if not isinstance(palette, Cycler):
raise ValueError("Please check that the value of 'palette' is a "
"valid matplotlib colormap string (eg. Set2), a "
"list of color names or a cycler with a 'color' key.")
if 'color' not in palette.keys:
raise ValueError("Please set the palette key 'color'.")
cc = palette()
colors_list = [to_hex(next(cc)['color']) for x in range(len(categories))]
adata.uns[value_to_plot + '_colors'] = colors_list
def _set_default_colors_for_categorical_obs(adata, value_to_plot):
"""
Sets the adata.uns[value_to_plot + '_colors'] using default color palettes
Parameters
----------
adata : annData object
value_to_plot : name of a valid categorical observation
Returns
-------
None
"""
from .. import palettes
categories = adata.obs[value_to_plot].cat.categories
length = len(categories)
# check if default matplotlib palette has enough colors
if len(rcParams['axes.prop_cycle'].by_key()['color']) >= length:
cc = rcParams['axes.prop_cycle']()
palette = [next(cc)['color'] for _ in range(length)]
else:
if length <= 28:
palette = palettes.default_26
elif length <= len(palettes.default_64): # 103 colors
palette = palettes.default_64
else:
palette = ['grey' for i in range(length)]
logg.info('the obs value: "{}" has more than 103 categories. Uniform '
'\'grey\' color will be used for all categories.')
adata.uns[value_to_plot + '_colors'] = palette[:length]
def _get_color_values(adata, value_to_plot, groups=None, palette=None, use_raw=False):
"""
Returns the value or color associated to each data point.
For categorical data, the return value is list of colors taken
from the category palette or from the given `palette` value.
For non-categorical data, the values are returned
"""
###
# when plotting, the color of the dots is determined for each plot
# the data is either categorical or continuous and the data could be in
# 'obs' or in 'var'
categorical = False
if value_to_plot is None:
color_vector = 'lightgray'
# check if value to plot is in obs
elif value_to_plot in adata.obs.columns:
if is_categorical_dtype(adata.obs[value_to_plot]):
categorical = True
if palette:
# use category colors base on given palette
_set_colors_for_categorical_obs(adata, value_to_plot, palette)
else:
if value_to_plot + '_colors' not in adata.uns or \
len(adata.uns[value_to_plot + '_colors']) < len(adata.obs[value_to_plot].cat.categories):
# set a default palette in case that no colors or few colors are found
_set_default_colors_for_categorical_obs(adata, value_to_plot)
else:
# check that the colors in 'uns' are valid
_palette = []
for color in adata.uns[value_to_plot + '_colors']:
if not is_color_like(color):
# check if the color is a valid R color and translate it
# to a valid hex color value
if color in utils.additional_colors:
color = utils.additional_colors[color]
else:
logg.warn("The following color value found in adata.uns['{}'] "
" is not valid: '{}'. Default colors are used.".format(value_to_plot + '_colors', color))
_set_default_colors_for_categorical_obs(adata, value_to_plot)
_palette = None
break
_palette.append(color)
if _palette is not None:
adata.uns[value_to_plot + '_colors'] = _palette
# for categorical data, colors should be
# stored in adata.uns[value_to_plot + '_colors']
# Obtain color vector by converting every category
# into its respective color
color_vector = [adata.uns[value_to_plot + '_colors'][x] for x in adata.obs[value_to_plot].cat.codes]
if groups is not None:
if isinstance(groups, str):
groups = [groups]
color_vector = np.array(color_vector, dtype='<U15')
# set color to 'light gray' for all values
# that are not in the groups
color_vector[~adata.obs[value_to_plot].isin(groups)] = "lightgray"
else:
color_vector = adata.obs[value_to_plot]
# check if value to plot is in var
elif use_raw is False and value_to_plot in adata.var_names:
color_vector = adata[:, value_to_plot].X
elif use_raw is True and value_to_plot in adata.raw.var_names:
color_vector = adata.raw[:, value_to_plot].X
else:
raise ValueError("The passed `color` {} is not a valid observation annotation "
"or variable name. Valid observation annotation keys are: {}"
.format(value_to_plot, adata.obs.columns))
return color_vector, categorical
def _basis2name(basis):
"""
converts the 'basis' into the proper name.
"""
component_name = (
'DC' if basis == 'diffmap'
else 'tSNE' if basis == 'tsne'
else 'UMAP' if basis == 'umap'
else 'PC' if basis == 'pca'
else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis
else basis)
return component_name
| 38.575182
| 141
| 0.578981
|
5a1a30060057db22955f37a9c747d26513b5024c
| 5,195
|
py
|
Python
|
scripts/merge_sources.py
|
KesterTong/recipe-analyzer
|
bf84312a44e9c3ce97a5aa24a8887c2dee38c489
|
[
"Apache-2.0"
] | 1
|
2020-02-03T15:56:53.000Z
|
2020-02-03T15:56:53.000Z
|
scripts/merge_sources.py
|
KesterTong/recipe-analyzer
|
bf84312a44e9c3ce97a5aa24a8887c2dee38c489
|
[
"Apache-2.0"
] | 3
|
2020-03-29T20:22:35.000Z
|
2021-05-24T15:06:17.000Z
|
scripts/merge_sources.py
|
KesterTong/recipe-analyzer
|
bf84312a44e9c3ce97a5aa24a8887c2dee38c489
|
[
"Apache-2.0"
] | 2
|
2020-03-29T20:14:45.000Z
|
2021-06-03T15:05:51.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to merge data files.
This produces an output which is a list of JSON like objects
with the same format as the FoodDataCentral API.
"""
from collections import defaultdict
from datetime import datetime
def _convert_date_format(d):
"""Convert from file format to API format."""
if not d:
return None
dt = datetime.strptime(d, '%Y-%m-%d')
return '%d/%d/%d' % (dt.month, dt.day, dt.year)
_NEW_UNIT_NAMES = {
'G': 'g',
'UG': '\u00b5g',
'IU': 'IU',
'kJ': 'kJ',
'MG_ATE': 'mg_ATE',
'MG': 'mg',
'KCAL': 'kcal',
}
_UNKNOWN_UNIT = 'UNKNOWN_UNIT'
def _remove_nones(obj):
"""Return a new object ommitting keys whose value is none."""
return {key: value for key, value in obj.items() if value is not None}
def _convert_nutrient(nutrient):
return _remove_nones({
'id': int(nutrient.id),
'name': nutrient.name,
'unitName': _NEW_UNIT_NAMES.get(nutrient.unit_name, _UNKNOWN_UNIT),
'nutrient_nbr': nutrient.nutrient_nbr,
'rank': int(nutrient.rank) if nutrient.rank else None
})
def _convert_food_nutrient(food_nutrient, nutrients):
# In order to ensure that _NEW_UNIT_NAMES covers all nutrients that
# are used in the Branded Food data, we check here for _UNKNOWN_UNIT
assert nutrients[food_nutrient.nutrient_id]['unitName'] != _UNKNOWN_UNIT, \
food_nutrient
return {
"type": "FoodNutrient",
"id": int(food_nutrient.id),
"nutrient": nutrients[food_nutrient.nutrient_id],
"amount": float(food_nutrient.amount)
}
def _merge(branded_food, food, food_nutrients, nutrients):
assert food.data_type == 'branded_food'
assert food.fdc_id == branded_food.fdc_id
assert food.publication_date
for food_nutrient in food_nutrients:
assert food_nutrient.fdc_id == branded_food.fdc_id
return _remove_nones({
'foodClass': 'Branded',
'description': food.description,
'foodNutrients': [
_convert_food_nutrient(food_nutrient, nutrients)
for food_nutrient in food_nutrients
if food_nutrient.nutrient_id in nutrients],
'brandOwner': branded_food.brand_owner,
'gtinUpc': branded_food.gtin_upc,
'dataSource': branded_food.data_source,
'ingredients': branded_food.ingredients,
"marketCountry": branded_food.market_country,
'modifiedDate': _convert_date_format(branded_food.modified_date),
'availableDate': _convert_date_format(branded_food.available_date),
'discontinuedDate': _convert_date_format(branded_food.discontinued_date),
'servingSize': float(branded_food.serving_size) if branded_food.serving_size else None,
'servingSizeUnit': branded_food.serving_size_unit or None,
'householdServingFullText': branded_food.household_serving_fulltext or None,
'brandedFoodCategory': branded_food.branded_food_category,
'fdcId': int(branded_food.fdc_id),
'dataType': 'Branded',
'publicationDate': _convert_date_format(food.publication_date),
'foodPortions': [],
# additions
'brand_name': branded_food.brand_name,
'subbrand_name': branded_food.subbrand_name,
'not_a_significant_source_of': branded_food.not_a_significant_source_of
})
def merge_sources(raw_data):
"""Merge all the sources in raw_data.
Each field in raw_data represents a single CSV file. This function
merges these into a single list, where each element of the list is
a JSON-like object containing the same data as the output of the
FoodDataCentral API.
Args:
raw_data: A `RawData`.
Returns:
A list of JSON-like objects.
"""
print('merging raw data rows')
# Convert branded_food_data.foods to dict for merging.
foods = {food.fdc_id: food for food in raw_data.foods}
# Convert nutrients to a dict for merging.
# Skip nutrients with rank '' because it's not clear how to handle them.
nutrients = {
# April 2021: food_nutrients.csv nutrient_id field contains in fact the nutrient_nbr field
nutrient.nutrient_nbr: _convert_nutrient(nutrient)
for nutrient in raw_data.nutrients}
food_nutrients = defaultdict(list)
for food_nutrient in raw_data.food_nutrients:
food_nutrients[food_nutrient.fdc_id].append(food_nutrient)
result = []
for branded_food in raw_data.branded_foods:
fdc_id = branded_food.fdc_id
result.append(_merge(
branded_food, foods[fdc_id], food_nutrients[fdc_id], nutrients))
return result
| 35.827586
| 95
| 0.698749
|
42c4e0556d6f33ebe6a7d4258ce93ce689e4b9c0
| 811
|
py
|
Python
|
backend/src/util/cronjob/extensions.py
|
tonyBen/rcmg
|
fabfe378b7a85d90018e1798bb253603ac9d05f5
|
[
"MIT"
] | null | null | null |
backend/src/util/cronjob/extensions.py
|
tonyBen/rcmg
|
fabfe378b7a85d90018e1798bb253603ac9d05f5
|
[
"MIT"
] | 1
|
2021-12-03T02:37:13.000Z
|
2021-12-03T02:37:13.000Z
|
backend/src/util/cronjob/extensions.py
|
tonyBen/rcmg
|
fabfe378b7a85d90018e1798bb253603ac9d05f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
------------------------------------------------
util.cronjob.extensions
------------------------------------------------
Author: Tony Ben (email: nanjinghhu@vip.qq.com)
Create: 11/29/2021
------------------------------------------------
ChangeLog
------------------------------------------------
Author Date Version Describe
------------------------------------------------
tben 11/29/2021 v1.0.0 Init
------------------------------------------------
"""
"""Initialize any app extensions."""
from flask_apscheduler import APScheduler
from apscheduler.schedulers.background import BackgroundScheduler
scheduler = APScheduler(scheduler=BackgroundScheduler(timezone="UTC"))
# ... any other stuff.. db, caching, sessions, etc.
| 33.791667
| 71
| 0.432799
|
1e8c4b9ebcec3391c629f98436d0c34e5cac7c67
| 3,763
|
py
|
Python
|
src/apps/dbwipes/util.py
|
RogerTangos/datahub-stub
|
8c3e89c792e45ccc9ad067fcf085ddd52f7ecd89
|
[
"MIT"
] | null | null | null |
src/apps/dbwipes/util.py
|
RogerTangos/datahub-stub
|
8c3e89c792e45ccc9ad067fcf085ddd52f7ecd89
|
[
"MIT"
] | null | null | null |
src/apps/dbwipes/util.py
|
RogerTangos/datahub-stub
|
8c3e89c792e45ccc9ad067fcf085ddd52f7ecd89
|
[
"MIT"
] | null | null | null |
#
# this file has no deps on Scorpion
#
import os
import re
import time
import json
import decimal
import md5
import psycopg2
import traceback
from collections import *
from datetime import datetime
from scorpionsql.errfunc import *
from scorpionsql.sql import *
# JSON Encoder
class SummaryEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, float):
if o == float('inf'):
return 1e100
elif o == float('-inf'):
return -1e100
if isinstance(o, decimal.Decimal):
return float(o)
if hasattr(o, 'isoformat'):
s = o.isoformat()
if not s.endswith("Z"):
s += 'Z'
return s
return super(SummaryEncoder, self).default(o)
def where_to_sql(where_json, negate=False):
is_type = lambda s, types: any([t in s for t in types])
l = []
args = []
for clause_json in where_json:
if 'sql' in clause_json:
l.append(clause_json['sql'])
continue
ctype = clause_json['type']
col = clause_json['col']
vals = clause_json['vals']
if not vals: continue
if is_type(ctype, ['num', 'int', 'float', 'double', 'date', 'time']):
q = "%%s <= %s and %s <= %%s" % (col, col)
args.extend(vals)
else:
tmp = []
vals = list(vals)
if None in vals:
tmp.append("(%s is null)" % col)
realvals = list(filter(lambda v: v is not None, vals))
if len(realvals) == 1:
tmp.append("(%s = %%s)" % col)
args.append(realvals[0])
elif len(realvals):
tmp.append("(%s in %%s)" % col)
args.append(tuple(list(realvals)))
q = ' or '.join(tmp)
l.append(q)
q = ' and '.join(filter(bool, l))
if negate and q:
q = "not(%s)" % q
return q, args
__agg2f__ = {
'avg' : AvgErrFunc,
'std' : StdErrFunc,
'stddev' : StdErrFunc,
'stddev_samp': StdErrFunc,
'stddev_pop': StdErrFunc,
'min' : MinErrFunc,
'max' : MaxErrFunc,
'sum' : SumErrFunc,
'corr' : CorrErrFunc,
'count' : CountErrFunc,
'abs' : AbsErrFunc
}
def parse_agg(s):
"""
parse an aggregation SELECT clause e.g., avg(temp) as foo
into dictionary of function name, column, and alias components
"""
print s
p = re.compile('(?P<func>\w+)\(\s*(?P<col>[\w\,\s]+)\s*\)\s*(as\s+(?P<alias>\w+))?')
d = p.match(s).groupdict()
klass = __agg2f__[d['func'].strip()]
expr = str(d['col'])
cols = [col.strip() for col in expr.split(',')]
varlist = [Var(col) for col in cols]
print klass
print cols
print varlist
func = klass(varlist)
return {
'fname': d['func'],
'func': func,
'cols': cols,
'alias': d.get('alias', '') or d['func']
}
def expr_from_nonagg(s):
"""
remove alias component of a nonaggregation SELECT clause
"""
if ' as ' in s:
return ' as '.join(s.split(' as ')[:-1])
return s
def create_sql_obj(db, qjson):
x = qjson['x']
ys = qjson['ys']
#sql = qjson['query']
dbname = qjson['db']
table = qjson['table']
negate = qjson.get('negate', False)
where_json = qjson.get('where', []) or []
basewheres_json = qjson.get('basewheres', []) or []
where, args = where_to_sql(where_json, negate)
basewheres, baseargs = where_to_sql(basewheres_json, False)
where = ' and '.join(filter(bool, [where, basewheres]))
args.extend(baseargs)
select = Select()
nonagg = SelectExpr(x['alias'], [x['col']], x['expr'], x['col'])
select.append(nonagg)
for y in ys:
d = parse_agg(y['expr'])
agg = SelectAgg(y['alias'], d['func'], d['cols'], y['expr'], d['cols'][0])
select.append(agg)
parsed = Query(
db,
select,
[table],
[where],
[x['expr']],
[expr_from_nonagg(x['expr'])]
)
return parsed, args
def pick(iterable, key):
return [item[key] for item in iterable]
| 22.266272
| 86
| 0.590752
|
d42e7e97a844864f9077bb96b289a95a583ccaa0
| 2,811
|
py
|
Python
|
rptools/rpfba/Args.py
|
brsynth/rptools
|
51df7877236dc8d72e50c93b6e5a30fb65d745d0
|
[
"MIT"
] | 4
|
2021-01-14T14:52:55.000Z
|
2022-01-28T09:31:59.000Z
|
rptools/rpfba/Args.py
|
brsynth/rptools
|
51df7877236dc8d72e50c93b6e5a30fb65d745d0
|
[
"MIT"
] | 5
|
2021-01-18T09:24:48.000Z
|
2021-12-17T06:47:07.000Z
|
rptools/rpfba/Args.py
|
brsynth/rptools
|
51df7877236dc8d72e50c93b6e5a30fb65d745d0
|
[
"MIT"
] | 2
|
2021-09-14T07:02:20.000Z
|
2022-01-31T16:59:42.000Z
|
from argparse import ArgumentParser
from typing import (
List,
)
from rptools._version import __version__
from rptools.rpfba.medium import (
__MEDIUM_DEFAULT_ID,
__MEDIUM_PATH,
read_medium_ids
)
def add_arguments(
parser: ArgumentParser):
parser.add_argument(
'pathway_file',
type=str,
help='SBML file that contains an heterologous pathway'
)
parser.add_argument(
'model_file',
type=str,
help='GEM model file (SBML)'
)
parser.add_argument(
'compartment_id',
type=str,
help='model compartment id to consider (e.g. \'c\' or \'MNXC3\' or \'c|MNXC3|cytosol|cytoplasm\')'
)
parser.add_argument(
'outfile',
type=str,
help='output file'
)
parser.add_argument(
'--objective_rxn_id',
type=str,
default='rxn_target',
help='reaction ID to optimise (default: rxn_target)'
)
parser.add_argument(
'--biomass_rxn_id',
type=str,
default='biomass',
help='biomass reaction ID (default: biomass). Note: Only for \'fraction\' simulation'
)
parser.add_argument(
'--sim',
type=str,
choices=['fba', 'pfba', 'fraction'],
default='fraction',
help='type of simulation to use (default: fraction)'
)
parser.add_argument(
'--fraction_of',
type=float,
default=0.75,
help='fraction of the optimum (default: 0.75). Note: this value is ignored is \'fba\' is used'
)
parser.add_argument(
'--merge',
action='store_true',
default=False,
help='output the full merged model instead of heterologous pathway only (default: False)'
)
parser.add_argument(
'--ignore_orphan_species',
action='store_true',
default=True,
help='ignore metabolites that are only consumed or produced (default: True)'
)
parser_medium = parser.add_argument_group('Medium', 'Medium modifications')
parser_medium.add_argument('--medium_compartment_id',
type=str,
default='MNXC2',
help='Model compartiment id corresponding to the extra-cellular compartment'
)
parser_medium.add_argument(
'--medium_file',
type=str,
help='Provide a csv file with an header as <coumpond_id>,<upper_bound>. \
This file provides information about metabolites (Metanetx Id) to add or remove (giving upper bound value)'
)
parser_medium.add_argument(
'--medium_id',
type=str,
default=__MEDIUM_DEFAULT_ID,
choices=[__MEDIUM_DEFAULT_ID] + read_medium_ids(__MEDIUM_PATH),
help='Use a base medium composition. Data can be add with the option --medium_file'
)
return parser
| 30.225806
| 123
| 0.621843
|
cac2e7a4647cd852424fdaf2f6aa83e81d3d7082
| 428
|
py
|
Python
|
xbs-alpha/py/src/interpreter.py
|
XephonineDeb/XPHN-Basic-Scripting-XBS-
|
dbe43c797a608831dbec390bb1449307decffc8f
|
[
"MIT"
] | null | null | null |
xbs-alpha/py/src/interpreter.py
|
XephonineDeb/XPHN-Basic-Scripting-XBS-
|
dbe43c797a608831dbec390bb1449307decffc8f
|
[
"MIT"
] | null | null | null |
xbs-alpha/py/src/interpreter.py
|
XephonineDeb/XPHN-Basic-Scripting-XBS-
|
dbe43c797a608831dbec390bb1449307decffc8f
|
[
"MIT"
] | null | null | null |
from lexer import *
from parser import *
from exec import *
if __name__ == '__main__':
lexer = BasicLexer()
parser = BasicParser()
env = {}
while True:
try:
text = input('>>>')
except EOFError:
break
if text:
tree = parser.parse(lexer.tokenize(text))
BasicExecute(tree, env)
| 12.228571
| 54
| 0.455607
|
d74a87c9787d92a74d10ee1ef08827d66bd015f1
| 18,276
|
py
|
Python
|
mylib/web_client.py
|
mo-han/mo-han-toolbox
|
1f907d42aea9574c34f10474cc7194dbace6ff02
|
[
"MIT"
] | 24
|
2019-12-08T03:56:32.000Z
|
2021-10-02T13:26:37.000Z
|
mylib/web_client.py
|
mo-han/mo-han-toolbox
|
1f907d42aea9574c34f10474cc7194dbace6ff02
|
[
"MIT"
] | 2
|
2020-04-27T14:20:01.000Z
|
2020-07-17T06:05:33.000Z
|
mylib/web_client.py
|
mo-han/mo-han-toolbox
|
1f907d42aea9574c34f10474cc7194dbace6ff02
|
[
"MIT"
] | 10
|
2019-08-06T01:11:28.000Z
|
2021-07-19T08:45:11.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Library for website operation"""
import json
from concurrent.futures.thread import ThreadPoolExecutor
from queue import Queue
from urllib.parse import urlparse, ParseResult
import colorama
import humanize
import lxml.html
import requests.utils
from mylib.easy.stdlibs.typing import JSONType
from mylib.ext import fstk, ostk
from .easy import *
from mylib.ext.http_headers import CURLCookieJar
from .easy.logging import ez_get_logger, LOG_FMT_MESSAGE_ONLY
from .easy.io import SubscriptableFileIO
from .ext.tricks import singleton, iter_factory_retry
from .easy.stdlibs.threading import ez_thread_factory
MAGIC_TXT_NETSCAPE_HTTP_COOKIE_FILE = '# Netscape HTTP Cookie File'
USER_AGENT_FIREFOX_WIN10 = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0'
HTMLElementTree = lxml.html.HtmlElement
def get_html_element_tree(url, **requests_kwargs) -> HTMLElementTree:
r = requests.get(url, **requests_kwargs)
if r.ok:
return lxml.html.document_fromstring(r.text)
else:
raise ConnectionError(r.status_code, r.reason)
def convert_cookies_json_to_netscape(json_data_or_filepath: JSONType or str, disable_filepath: bool = False) -> str:
from mylib.ext.fstk import read_json_file
if not disable_filepath and os.path.isfile(json_data_or_filepath):
json_data = read_json_file(json_data_or_filepath)
else:
json_data = json_data_or_filepath
cookies = ensure_json_cookies(json_data)
tab = '\t'
false_ = 'FALSE' + tab
true_ = 'TRUE' + tab
lines = [MAGIC_TXT_NETSCAPE_HTTP_COOKIE_FILE]
for c in cookies:
http_only_prefix = '#HttpOnly_' if c['httpOnly'] else ''
line = http_only_prefix + c['domain'] + tab
if c['hostOnly']:
line += false_
else:
line += true_
line += c['path'] + tab
if c['secure']:
line += true_
else:
line += false_
line += '{}\t{}\t{}'.format(c.get('expirationDate', 0), c['name'], c['value'])
lines.append(line)
return '\n'.join(lines)
def convert_cookies_file_json_to_netscape(src, dst=None) -> str:
if not os.path.isfile(src):
raise FileNotFoundError(src)
dst = dst or src + '.txt'
with fstk.ensure_open_file(dst, 'w') as f:
f.write(convert_cookies_json_to_netscape(src))
return dst
def ensure_json_cookies(json_data) -> list:
if isinstance(json_data, list):
cookies = json_data
elif isinstance(json_data, dict):
if 'cookies' in json_data:
if isinstance(json_data['cookies'], list):
cookies = json_data['cookies']
else:
raise TypeError("{}['cookies'] is not list".format(json_data))
else:
raise TypeError("dict '{}' has no 'cookies'".format(json_data))
else:
raise TypeError("'{}' is not list or dict".format(json_data))
return cookies
def cookies_dict_from_json(json_data_or_filepath: JSONType or str, disable_filepath: bool = False) -> dict:
from mylib.ext.fstk import read_json_file
if not disable_filepath and os.path.isfile(json_data_or_filepath):
json_data = read_json_file(json_data_or_filepath)
else:
json_data = json_data_or_filepath
d = {}
cookies = ensure_json_cookies(json_data)
for c in cookies:
d[c['name']] = c['value']
return d
def cookies_dict_from_netscape_file(filepath: str, ignore_discard=True, ignore_expires=True) -> dict:
cj = CURLCookieJar(filepath)
cj.load(ignore_discard=ignore_discard, ignore_expires=ignore_expires)
return requests.utils.dict_from_cookiejar(cj)
def cookies_dict_from_file(filepath: str, ignore_discard=True, ignore_expires=True) -> dict:
if not os.path.isfile(filepath):
raise FileNotFoundError(filepath)
if filepath.endswith('.json'):
d = cookies_dict_from_json(filepath)
else:
d = cookies_dict_from_netscape_file(filepath, ignore_discard=ignore_discard, ignore_expires=ignore_expires)
return d
def cookie_str_from_dict(cookies: dict) -> str:
cookies_l = ['{}={}'.format(k, v) for k, v in cookies.items()]
cookie = '; '.join(cookies_l)
return cookie
def headers_from_user_agent(user_agent: str = None, headers: dict = None) -> dict:
from copy import deepcopy
h = deepcopy(headers) or {}
h['User-Agent'] = user_agent or USER_AGENT_FIREFOX_WIN10
return h
def headers_from_cookies(cookies_data: dict or str, headers: dict = None) -> dict:
from copy import deepcopy
h = deepcopy(headers) or headers_from_user_agent()
if isinstance(cookies_data, dict):
cookie = cookie_str_from_dict(cookies_data)
elif isinstance(cookies_data, str):
cookie = cookies_data
else:
raise TypeError('cookies_data', (dict, str))
h['Cookie'] = cookie
return h
def get_phantomjs_splinter(proxy=None, show_image=False, window_size=(1024, 1024)):
import splinter
extra_argv = ['--webdriver-loglevel=WARN']
if proxy:
extra_argv.append('--proxy={}'.format(proxy))
if not show_image:
extra_argv.append('--load-images=no')
b = splinter.Browser(
'phantomjs',
service_log_path=os.path.join(ostk.TEMPDIR, 'ghostdriver.log'),
user_agent=USER_AGENT_FIREFOX_WIN10,
service_args=extra_argv,
)
b.driver.set_window_size(*window_size)
return b
def get_firefox_splinter(headless=True, proxy: str = None, **kwargs):
import splinter
config = {'service_log_path': os.path.join(ostk.TEMPDIR, 'geckodriver.log'),
'headless': headless}
config.update(kwargs)
profile_dict = {}
if proxy:
from urllib.parse import urlparse
prefix = 'network.proxy.'
profile_dict[prefix + 'type'] = 1
proxy_parse = urlparse(proxy)
scheme = proxy_parse.scheme
netloc = proxy_parse.netloc
try:
host, port = netloc.split(':')
port = int(port)
except ValueError:
raise ValueError(proxy)
if scheme in ('http', 'https', ''):
profile_dict[prefix + 'http'] = host
profile_dict[prefix + 'http_port'] = port
profile_dict[prefix + 'https'] = host
profile_dict[prefix + 'https_port'] = port
elif scheme.startswith('socks'):
profile_dict[prefix + 'socks'] = host
profile_dict[prefix + 'socks_port'] = port
else:
raise ValueError(proxy)
browser = splinter.Browser(driver_name='firefox', profile_preferences=profile_dict, **config)
return browser
def get_zope_splinter(**kwargs):
import splinter
return splinter.Browser(driver_name='zope.testbrowser', **kwargs)
get_browser = {
'splinter.phantomjs': get_phantomjs_splinter,
}
def human_filesize(bytes_n: int, no_space=True):
s = humanize.naturalsize(bytes_n, binary=True)
if no_space:
return s.replace(' ', '')
else:
return s
class Download:
def __init__(self, response: requests.Response, filepath: str = None,
content: bytes = None, no_content: bool = False):
content = b'' if no_content else content or response.content
if not response.ok:
raise HTTPResponseInspection(response, content)
self.id = id(response)
self.file = filepath or None
self.code = response.status_code
self.reason = response.reason
self.url = response.request.url
self.data = content
self.size = len(self.data)
content_length = int(response.headers.get('Content-Length', '-1'))
if content_length >= 0 and content_length != self.size:
raise HTTPIncomplete(content_length, self.size)
if self.code == 206:
content_range = response.headers['Content-Range']
start, end, total = [int(s) for s in re.search(r'(\d+)-(\d+)/(\d+)', content_range).groups()]
self.start = start
self.stop = end + 1
if self.stop - self.start != self.size:
raise HTTPIncomplete(self.size, self.stop - self.start)
self.total_size = total
else:
self.start = 0
self.stop = self.size
self.total_size = self.size
@property
def is_complete_data(self):
return self.size == self.total_size
@property
def is_at_end(self):
return self.stop >= self.total_size
class HTTPResponseInspection(Exception):
def __init__(self, response: requests.Response, content: bytes = None, no_content: bool = False, size: int = None):
http_ver = {10: '1.0', 11: '1.1'}
content = b'' if no_content else content or response.content
self.version = http_ver[response.raw.version]
self.code = int(response.status_code)
self.reason = str(response.reason)
self.json = None
self.size = len(content)
if no_content:
self.excerpt = None
elif self.size <= 32:
self.excerpt = content
elif self.size <= 4096:
encoding = response.encoding or response.apparent_encoding
try:
text = str(content, encoding=encoding, errors='replace')
except (LookupError, TypeError):
text = str(content, errors='replace')
h: HTMLElementTree = lxml.html.document_fromstring(text)
self.excerpt = h.body.text_content()
else:
self.excerpt = None
ct = response.headers['content-type']
if 'json' in ct or 'javascript' in ct:
try:
self.json = response.json()
except json.decoder.JSONDecodeError:
pass
if size is not None:
self.size = size
self.excerpt = '{} bytes'.format(size)
def __repr__(self):
t = 'HTTP/{} {} {}'.format(self.version, self.code, self.reason)
if self.json:
t += ', JSON={}'.format(self.json)
elif self.excerpt:
t += ', {}'.format(self.excerpt)
elif self.excerpt is not None:
t += ', {} bytes'.format(self.size)
return t
__str__ = __repr__
class HTTPIncomplete(Exception):
def __init__(self, expect_size: int, recv_size: int):
self.expect_size = expect_size
self.recv_size = recv_size
@singleton
class DownloadPool(ThreadPoolExecutor):
tmpfile_suffix = '.download'
def __init__(self, threads_n: int = 5, timeout: int = 30, name: str = None, show_status: bool = True):
self._max_workers: int = 0
self.queue = Queue()
self.timeout = timeout
self.name = name or self.__class__.__name__
self.logger = ez_get_logger('.'.join((__name__, self.name)), fmt=LOG_FMT_MESSAGE_ONLY)
self.recv_size_queue = Queue()
self.bytes_per_sec = 0
self.emergency_queue = Queue()
self.show_status_interval = 2
self.show_status_enable = show_status
ez_thread_factory(daemon=True)(self.calc_speed).start()
ez_thread_factory(daemon=True)(self.show_status).start()
super().__init__(max_workers=threads_n)
def queue_pipeline(self):
self.logger.debug('queue of {} started'.format(self))
q = self.queue
while True:
args = q.get()
if args is None:
break
url, filepath, retry, kwargs_for_requests = args
self.submit(self.download, url, filepath, retry, **kwargs_for_requests)
self.logger.debug('submit {}'.format(filepath))
self.logger.debug('queue of {} stopped'.format(self))
def show_status(self):
def color(x):
left = colorama.Fore.LIGHTGREEN_EX
right = colorama.Style.RESET_ALL
return left + str(x) + right
colorama.init()
eq = self.emergency_queue
while True:
if self.show_status_enable:
status_msg = f'| {self.name} {color(len(self._threads))}/{self._max_workers} ' \
f'| {color(self.speed):>11} |'
# status_width = len(status_msg)
# preamble = shutil.get_terminal_size()[0] - status_width - 1
# print(' ' * preamble + status_msg, end='\r', file=sys.stderr)
print(status_msg, end='\r', file=sys.stderr)
if not eq.empty():
e = eq.get()
if isinstance(e, Exception):
self.shutdown(wait=False)
raise e
sleep(self.show_status_interval)
@property
def speed(self):
return human_filesize(self.bytes_per_sec, no_space=False) + '/s'
def calc_speed(self):
tl = []
nl = []
q = self.recv_size_queue
while True:
if q.empty():
sleep(0.5)
# print('DEBUG')
continue
t, n = q.get()
tl.append(t)
nl.append(n)
try:
self.bytes_per_sec = sum(nl) // (tl[-1] - tl[0])
except ZeroDivisionError:
self.bytes_per_sec = 0
while time.time() - tl[0] > self.show_status_interval:
tl.pop(0)
nl.pop(0)
def parse_head(self, url, **kwargs_for_requests):
head = requests.head(url, **kwargs_for_requests).headers
split = head.pop('accept-range') == 'bytes'
size = int(head.pop('content-length', '-1'))
self.logger.debug('HEAD: split={}, size={}'.format(split, size))
return {'split': split, 'size': size}
def request_data(self, url, filepath, start=0, stop=0, **kwargs_for_requests) -> Download:
# chunk_size = requests.models.CONTENT_CHUNK_SIZE
chunk_size = 4096 * 1024
kwargs = make_requests_kwargs(**kwargs_for_requests)
if stop:
kwargs['headers']['Range'] = 'bytes={}-{}'.format(start, stop - 1)
elif start > 0:
kwargs['headers']['Range'] = 'bytes={}-'.format(start)
elif start < 0:
kwargs['headers']['Range'] = 'bytes={}'.format(start)
r = requests.get(url, stream=True, timeout=self.timeout, **kwargs)
self.logger.debug(HTTPResponseInspection(r, no_content=True))
content = b''
stop = 0
for chunk in r.iter_content(chunk_size=chunk_size):
self.recv_size_queue.put((time.time(), len(chunk)))
start = stop
stop = start + len(chunk)
content += chunk
total = len(content)
fstk.write_file_chunk(filepath, start, stop, chunk, total)
self.logger.debug(HTTPResponseInspection(r, content=content))
d = Download(r, filepath, content=content)
return d
def write_file(self, dl_obj: Download):
url = dl_obj.url
file = dl_obj.file
start = dl_obj.start
stop = dl_obj.stop
size = dl_obj.size
total = dl_obj.total_size
with SubscriptableFileIO(file, 'rb+') as f:
if f.size != total:
f.truncate(total)
f[start: stop] = dl_obj.data
self.logger.debug('w {} ({}) <- {}'.format(file, human_filesize(size), url))
def download(self, url, filepath, retry, **kwargs_for_requests):
tmpfile = filepath + self.tmpfile_suffix
fstk.touch(tmpfile)
for cnt, x in iter_factory_retry(retry)(self.request_data, url, tmpfile, **kwargs_for_requests):
if isinstance(x, Exception):
self.logger.warning('! <{}> {}'.format(type(x).__name__, x))
# self.logger.warning(''.join(traceback.format_tb(x.__traceback__)))
if cnt:
self.logger.info('++ retry ({}) {} <- {}'.format(cnt, filepath, url))
else:
dl_obj = x
break
else:
return
self.write_file(dl_obj)
os.rename(tmpfile, filepath)
self.log_file_done(filepath, dl_obj.size)
def log_file_done(self, filepath, size):
self.logger.info('* {} ({})'.format(filepath, human_filesize(size)))
def file_already_exists(self, filepath):
if os.path.isfile(filepath):
self.logger.info('# {}'.format(filepath))
return True
else:
return False
def log_new_download(self, url, filepath, retry):
self.logger.info('+ {} <- {} (retry={})'.format(filepath, url, retry))
def submit_download(self, url, filepath, retry, **kwargs_for_requests):
if self.file_already_exists(filepath):
return
future = self.submit(self.download, url, filepath, retry, **kwargs_for_requests)
self.log_new_download(url, filepath, retry)
return future
def put_download_in_queue(self, url, filepath, retry, **kwargs_for_requests):
if self.file_already_exists(filepath):
return
self.queue.put((url, filepath, retry, kwargs_for_requests))
self.log_new_download(url, filepath, retry)
def put_end_of_queue(self):
self.queue.put(None)
def start_queue_loop(self):
ez_thread_factory()(self.queue_pipeline).start()
def parse_https_url(url: str, allow_fragments=True) -> ParseResult:
test_parse = urlparse(url)
if not test_parse.scheme and not test_parse.netloc:
url = 'https://' + url
return urlparse(url, allow_fragments=allow_fragments)
def parse_http_url(url: str, allow_fragments=True) -> ParseResult:
test_parse = urlparse(url)
if not test_parse.scheme and not test_parse.netloc:
url = 'http://' + url
return urlparse(url, allow_fragments=allow_fragments)
def make_requests_kwargs(params=None, cookies=None, headers=None, user_agent=None, proxies=None,
**kwargs):
user_agent = user_agent or USER_AGENT_FIREFOX_WIN10
d = dict(headers=headers_from_user_agent(user_agent=user_agent, headers=headers))
if params:
d.update(params=params)
if cookies:
d.update(cookies=cookies)
if proxies:
d.update(proxies=proxies)
d.update(**kwargs)
return d
| 36.047337
| 119
| 0.618516
|
1c111c5aa7ed38d02d9dec9533ba00d530ecbe78
| 1,200
|
py
|
Python
|
cfd_trading/core/order.py
|
vitrvm/cfd_trading
|
d0880a12c805d150df6058764c153712da177b3a
|
[
"MIT"
] | 2
|
2021-04-20T16:37:55.000Z
|
2021-08-09T10:27:43.000Z
|
cfd_trading/core/order.py
|
vitrvm/cfd_trading
|
d0880a12c805d150df6058764c153712da177b3a
|
[
"MIT"
] | null | null | null |
cfd_trading/core/order.py
|
vitrvm/cfd_trading
|
d0880a12c805d150df6058764c153712da177b3a
|
[
"MIT"
] | 1
|
2021-07-11T22:51:37.000Z
|
2021-07-11T22:51:37.000Z
|
from typing import Type
class Order(object):
class Action:
BUY = 0
SELL = 1
class Type:
MARKET = 'MARKET'
LIMIT = 'LIMIT'
STOP = 'STOP'
__id = None
__action = None
__symbol = None
__type = None
__level = None
__stop_distance = None
__stop_level = None
__limit_distance = None
__limit_level = None
@property
def action(self)->Action:
return self.__action
@property
def level(self):
return self.__level
@property
def limitDistance(self):
return self.__limit_distance
@property
def limitLevel(self):
return self.__limit_level
@property
def id(self):
return self.id
@property
def type_(self)->Type:
return self.__type
@property
def stopDistance(self):
return self.__stop_distance
@property
def stopLevel(self):
return self.__stop_level
@property
def symbol(self):
return self.__symbol
def router(self):
'''
router class is the interface that connects the Order class with each broker.
'''
raise NotImplementedError()
| 17.142857
| 85
| 0.5925
|
f3475c974b69d80f8701c977c45c2dc90c2848fb
| 1,933
|
py
|
Python
|
atomistic2cg-intramolecular-fitting/fit.py
|
mf2810/atomistic-tools
|
a2b806b718e4c402b7f4e4a8c9c90930720d9c6d
|
[
"MIT"
] | null | null | null |
atomistic2cg-intramolecular-fitting/fit.py
|
mf2810/atomistic-tools
|
a2b806b718e4c402b7f4e4a8c9c90930720d9c6d
|
[
"MIT"
] | null | null | null |
atomistic2cg-intramolecular-fitting/fit.py
|
mf2810/atomistic-tools
|
a2b806b718e4c402b7f4e4a8c9c90930720d9c6d
|
[
"MIT"
] | null | null | null |
import numpy as np
import pylab as plb
import matplotlib.pyplot as plt
import scipy
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
import sys
print('welcome')
T = 298
bonds = False
k = np.loadtxt('hist.xvg')
##############################################
for i in range(0, len(k)):
k[i,1] = k[i,1]/np.sin(np.radians(k[i,0]))
sumk = np.sum(k[:,1])
for i in range(0, len(k)):
k[i,1] = k[i,1]/sumk
x = ar(k[:,0])
y = ar(k[:,1])
n = len(x) #the number of data
mean = sum(x*y)/n #note this correction
sigma = sum(y*(x-mean)**2)/n #note this correction
def gaussian(x, height, center, width, offset):
return height*np.exp(-(x - center)**2/(2*width**2)) + offset
def three_gaussians(x, h1, c1, w1, h2, c2, w2, h3, c3, w3, offset):
# offset = 0
return (gaussian(x, h1, c1, w1, offset=0) +
gaussian(x, h2, c2, w2, offset=0) +
gaussian(x, h3, c3, w3, offset=0))
def two_gaussians(x, h1, c1, w1, h2, c2, w2, offset):
return three_gaussians(x, h1, c1, w1, h2, c2, w2, 0,0,1, offset=0)
def one_gaussians(x, h1, c1, w1, offset):
return three_gaussians(x, h1, c1, w1,0,0,1,0,0,1, offset=0)
errfunc1 = lambda p, x, y: (one_gaussians(x, *p) - y)**2
guess1 = [sum(y)/n, sum(x)/n, sigma, 0] # I removed the peak I'm not too sure about
optim1, success = scipy.optimize.leastsq(errfunc1, guess1[:], args=(x, y))
if bonds == False:
print('\n\n')
print('theta_0 [degrees],k_theta [kJ mol-1 rad-2]')
print('%.2f\t%.4f' %(optim1[1], (8.3144598/1000)*(T/optim1[2]**2) * (180**2)/(np.pi)**2))
else:
print('\n\n')
print('b_0 [nm] ,k_b [kJ mol-1 nm-2]')
print(optim1[1], (8.3144598/1000)*(T/optim1[2]**2))
plt.plot(x, y, lw=5, c='g', label='measurement')
plt.scatter(x, one_gaussians(x, *optim1),
lw=1, c='g', label='fit of 1 Gaussians')
plt.legend(loc='best')
plt.savefig('result.png')
plt.show()
| 28.850746
| 93
| 0.585101
|
84fb13f6de5e88ebb3e42aac3e9a9d4a1fc77f5b
| 3,556
|
py
|
Python
|
evaluate/sicp.py
|
patrickdillon/sicp-2019
|
a5fc83354541d298b4b8f6736a6d0551b2e4f443
|
[
"MIT"
] | null | null | null |
evaluate/sicp.py
|
patrickdillon/sicp-2019
|
a5fc83354541d298b4b8f6736a6d0551b2e4f443
|
[
"MIT"
] | null | null | null |
evaluate/sicp.py
|
patrickdillon/sicp-2019
|
a5fc83354541d298b4b8f6736a6d0551b2e4f443
|
[
"MIT"
] | 1
|
2019-01-12T21:11:30.000Z
|
2019-01-12T21:11:30.000Z
|
# Copyright (c) 2019 Red Hat
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
"""SPOILER ALERT: below are the solutions to the exercices..."""
import math
from scheme import EvaluateScheme
class EvaluateSICP(EvaluateScheme):
def eval_1_1(self, fpath):
expect = ["10", "12", "8", "3", "6", "19", "#f", "4", "16", "6", "16"]
errors = []
for statement, result in zip(self.iter_statements(fpath), expect):
if statement != result:
errors.append("%s isn't correct" % statement)
return ",".join(errors)
def eval_1_2(self, fpath):
result = self.eval(self.get_statement(fpath))
if result != ";Value: -37/150":
return "Invalid output %s" % result
def eval_1_3(self, fpath):
for statement in self.iter_statements(fpath):
ret = self.eval(statement)
if ret != ";Unspecified return value":
return "[%s] output %s" % (statement, ret)
for test, expected in (("1 2 3", "13"),
("1 1 1", "2"),
("0 0 1", "1")):
f = "(square-sum-larger %s)" % test
ret = self.eval(f)
if ret != ";Value: %s" % expected:
return "%s should have returned %s instead of %s" % (
f, expected, ret)
def eval_1_7(self, fpath):
# Load provided code
for statement in self.iter_statements(fpath):
ret = self.eval(statement)
if ret != ";Unspecified return value":
return "[%s] output %s" % (statement, ret)
# Eval some test and check the precision
for test, precision in ((1e-3, 1e-5), (1e9, 1e-2), (2**53, 1e-1)):
f = "(sqrt %s)" % test
ret = self.eval(f).split()
if ret[0] != ";Value:":
return "%s should have returned a value instead of %s" % (
f, " ".join(ret))
val = float(ret[1])
if abs(math.sqrt(test) - val) > precision:
return "%s is not precise enough (%s != %s)" % (
f, math.sqrt(test), val)
def eval_1_8(self, fpath):
# Load provided code
for statement in self.iter_statements(fpath):
ret = self.eval(statement)
if ret != ";Unspecified return value":
return "[%s] output %s" % (statement, ret)
# Eval some test and check the precision
for test, precision in ((1e-3, 1e-5), (1e9, 1e-2), (2**53, 1e-1)):
f = "(cube-root %s)" % test
ret = self.eval(f).split()
if ret[0] != ";Value:":
return "%s should have returned a value instead of %s" % (
f, " ".join(ret))
val = float(ret[1])
if abs(test**(1/3.) - val) > precision:
return "%s is not precise enough (%s != %s)" % (
f, test**(1/3.), val)
| 40.409091
| 78
| 0.538808
|
f1528e0372dcbb43da62f56668467d1ba51d3ed4
| 70,025
|
py
|
Python
|
astropy/cosmology/tests/test_cosmology.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
astropy/cosmology/tests/test_cosmology.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
astropy/cosmology/tests/test_cosmology.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from io import StringIO
import pytest
import numpy as np
from .. import core, funcs
from ...tests.helper import quantity_allclose as allclose
from ... import units as u
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_init():
""" Tests to make sure the code refuses inputs it is supposed to"""
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=-0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
Tcmb0=u.Quantity([0.0, 2], u.K))
with pytest.raises(ValueError):
h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc)
cosmo = core.FlatLambdaCDM(H0=h0bad, Om0=0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=0.5)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, Neff=2, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Ob(1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Odm(1)
with pytest.raises(TypeError):
core.default_cosmology.validate(4)
def test_basic():
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04,
Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
# This next test will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
# Make sure setting them as quantities gives the same results
H0 = u.Quantity(70, u.km / (u.s * u.Mpc))
T = u.Quantity(2.0, u.K)
cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_units():
""" Test if the right units are being returned"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H0.unit == u.km / u.Mpc / u.s
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb0.unit == u.K
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu0.unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3
assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_broadcast():
""" Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = ['comoving_distance', 'luminosity_distance',
'comoving_transverse_distance', 'angular_diameter_distance',
'distmod', 'lookback_time', 'age', 'comoving_volume',
'differential_comoving_volume', 'kpc_comoving_per_arcmin']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
'w', 'de_density_scale', 'Onu', 'Ogamma',
'nu_relative_density']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
for tcosmo in [core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
core.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
core.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
core.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,
wp=-1.2, wa=-0.2, zp=0.9),
core.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif('not HAS_SCIPY')
def test_clone():
""" Test clone operation"""
cosmo = core.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
z = np.linspace(0.1, 3, 15)
# First, test with no changes, which should return same object
newclone = cosmo.clone()
assert newclone is cosmo
# Now change H0
# Note that H0 affects Ode0 because it changes Ogamma0
newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc)
assert newclone is not cosmo
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert allclose(newclone.Tcmb0, cosmo.Tcmb0)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# Compare modified version with directly instantiated one
cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Now try changing multiple things
newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc,
Tcmb0=2.8 * u.K)
assert newclone.__class__ == cosmo.__class__
assert not newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)
assert allclose(newclone.Tcmb0, 2.8 * u.K)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# And direct comparison
cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc,
Om0=0.27, Tcmb0=2.8 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Try a dark energy class, make sure it can handle w params
cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc,
Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K)
newclone = cosmo.clone(w0=-1.1, wa=0.2)
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert allclose(newclone.H0, cosmo.H0)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ode0, cosmo.Ode0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.w0, cosmo.w0)
assert allclose(newclone.w0, -1.1)
assert not allclose(newclone.wa, cosmo.wa)
assert allclose(newclone.wa, 0.2)
# Now test exception if user passes non-parameter
with pytest.raises(AttributeError):
newclone = cosmo.clone(not_an_arg=4)
def test_xtfuncs():
""" Test of absorption and lookback integrand"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,
rtol=1e-4)
assert allclose(cosmo.lookback_time_integrand(z),
[0.10333179, 0.04644541], rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,
rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z),
[2.7899584, 3.44104758], rtol=1e-4)
def test_repr():
""" Test string representation of built in classes"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
expected = 'LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, '\
'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[ 0. 0. 0.] eV, '\
'Ob0=None)'
assert str(cosmo) == expected
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725, m_nu=u.Quantity(0.01, u.eV))
expected = 'LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, '\
'Tcmb0=2.725 K, Neff=3.04, m_nu=[ 0.01 0.01 0.01] eV, '\
'Ob0=None)'
assert str(cosmo) == expected
cosmo = core.FlatLambdaCDM(50.0, 0.27, Tcmb0=3, Ob0=0.05)
expected = 'FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, '\
'Tcmb0=3 K, Neff=3.04, m_nu=[ 0. 0. 0.] eV, Ob0=0.05)'
assert str(cosmo) == expected
cosmo = core.wCDM(60.0, 0.27, 0.6, Tcmb0=2.725, w0=-0.8, name='test1')
expected = 'wCDM(name="test1", H0=60 km / (Mpc s), Om0=0.27, '\
'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, '\
'm_nu=[ 0. 0. 0.] eV, Ob0=None)'
assert str(cosmo) == expected
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2')
expected = 'FlatwCDM(name="test2", H0=65 km / (Mpc s), Om0=0.27, '\
'w0=-0.6, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)'
assert str(cosmo) == expected
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, Tcmb0=2.725, wa=0.1, name='test3')
expected = 'w0waCDM(name="test3", H0=60 km / (Mpc s), Om0=0.25, '\
'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, '\
'm_nu=[ 0. 0. 0.] eV, Ob0=None)'
assert str(cosmo) == expected
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4',
Ob0=0.0456789)
expected = 'Flatw0waCDM(name="test4", H0=55 km / (Mpc s), Om0=0.35, '\
'w0=-0.9, Tcmb0=0 K, Neff=3.04, m_nu=None, '\
'Ob0=0.0457)'
assert str(cosmo) == expected
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2,
zp=0.3, name='test5')
expected = 'wpwaCDM(name="test5", H0=50 km / (Mpc s), Om0=0.3, '\
'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3, Tcmb0=0 K, '\
'Neff=3.04, m_nu=None, Ob0=None)'
assert str(cosmo) == expected
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2, Tcmb0=2.725,
m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV))
expected = 'w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, '\
'wz=-0.2 Tcmb0=2.725 K, Neff=3.04, '\
'm_nu=[ 0.001 0.01 0.015] eV, Ob0=None)'
assert str(cosmo) == expected
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_z1():
""" Test a flat cosmology at z=1 against several other on-line
calculators.
"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
z = 1
# Test values were taken from the following web cosmology
# calculators on 27th Feb 2012:
# Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
# (http://adsabs.harvard.edu/abs/2006PASP..118.1711W)
# Kempner: http://www.kempner.net/cosmic.php
# iCosmos: http://www.icosmos.co.uk/index.html
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(cosmo.comoving_distance(z),
[3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.angular_diameter_distance(z),
[1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.luminosity_distance(z),
[6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.lookback_time(z),
[7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(cosmo.lookback_distance(z),
[2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
def test_zeroing():
""" Tests if setting params to 0s always respects that"""
# Make sure Ode = 0 behaves that way
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0)
assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Ode(1), 0)
# Ogamma0 and Onu
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# Obaryon
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0)
assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=0.0,
name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=3.0,
m_nu=0.1 * u.eV, name="test_cos_nu")
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]),
[1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]),
[0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(cosmo.de_density_scale([0.5, 1.0]),
[1.12934694, 1.23114444], rtol=1e-4)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif('not HAS_SCIPY')
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a mathematica
computation"""
# w0wa models
z = np.array([0.2, 0.4, 0.9, 1.2])
cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(cosmo.w0, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5,
Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.9)
assert allclose(cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_matter():
# Test non-relativistic matter evolution
tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455],
rtol=1e-4)
assert allclose(tcos.Ob(z),
[0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = core.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparision is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(np.int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tcmb():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tnu():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
def test_efunc_vs_invefunc():
""" Test that efunc and inv_efunc give inverse values"""
# Note that all of the subclasses here don't need
# scipy because they don't need to call de_density_scale
# The test following this tests the case where that is needed.
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# Below are the 'standard' included cosmologies
# We do the non-standard case in test_efunc_vs_invefunc_flrw,
# since it requires scipy
cosmo = core.LambdaCDM(70, 0.3, 0.5)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV))
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatLambdaCDM(50.0, 0.27)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_efunc_vs_invefunc_flrw():
""" Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_kpc_methods():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3),
0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3),
0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3),
1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3),
472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_volume():
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts),
wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts),
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
ftemp = lambda x: c_flat.differential_comoving_volume(x).value
otemp = lambda x: c_open.differential_comoving_volume(x).value
ctemp = lambda x: c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_flat, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_open, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_open_closed_icosmo():
""" Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif('not HAS_SCIPY')
def test_integral():
# Test integer vs. floating point inputs
cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
def test_wz():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
assert allclose(cosmo.w(1.0), -1.)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-1., -1, -1, -1, -1, -1])
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
assert allclose(cosmo.w0, -0.5)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1.0, -0.75, -0.5, -0.25, 0.15])
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wz, 0.5)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wa, -0.5)
assert allclose(cosmo.w(1.0), -1.25)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485])
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.wp, -0.9)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.w(0.5), -0.9)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667,
-0.82380952, -0.78266667])
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_densityscale():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1., 5.]),
[5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,
m_nu=0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]),
[44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1., 5.]),
[44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(tcos.luminosity_distance([50, 100]),
[16612.44047622, -46890.79092244] * u.Mpc)
assert allclose(tcos.distmod([50, 100]),
[46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_critical_density():
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.critical_density0,
9.309668456020899e-30 * u.g / u.cm**3)
assert allclose(tcos.critical_density0,
tcos.critical_density(0))
assert allclose(tcos.critical_density([1, 5]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
assert allclose(tcos.critical_density([1., 5.]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_distance_z1z2():
tcos = core.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(tcos._comoving_distance_z1z2(1, 2),
-tcos._comoving_distance_z1z2(2, 1))
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_transverse_distance_z1z2():
tcos = core.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),
1313.2232194828466 * u.Mpc)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = core.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# Test positive curvature with scalar, array combination.
tcos = core.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (-281.31602666724865,
0.,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_angular_diameter_distance_z1z2():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
646.22968662822018 * u.Mpc)
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
-969.34452994,
1159.0970895962193,
115.72768186186921) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.,
332.09893173,
986.35635069,
1508.37010062,
1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),
results)
# Non-flat (positive Ok0) test
tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
620.1175337852428 * u.Mpc)
# Non-flat (negative Ok0) test
tcos = core.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
228.42914659246014 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_absorption_distance():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_basic():
# Test no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05,
Tcmb0=2.725 * u.K, m_nu=u.Quantity(0, u.eV))
assert allclose(tcos.Neff, 4.05)
assert not tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 4
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV)
assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,
rtol=1e-6)
assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,
rtol=1e-6)
# Alternative no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0 * u.K,
m_nu=u.Quantity(0.4, u.eV))
assert not tcos.has_massive_nu
assert tcos.m_nu is None
# Test basic setting, retrieval of values
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725 * u.K,
m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV))
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV)
# All massive neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725,
m_nu=u.Quantity(0.1, u.eV), Neff=3.1)
assert allclose(tcos.Neff, 3.1)
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = core.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971,
6440.80611897] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479,
6671.85418235] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2471.80626824, 3567.1902565, 4207.15995626,
4638.20476018] * u.Mpc, rtol=1e-4)
# Flat
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173,
7083.5374303] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102,
7080.71698117] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188,
4409.09346922] * u.Mpc, rtol=1e-4)
# Add w
cos = core.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3216.8296894, 5117.2097601, 6317.05995437,
7149.68648536] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601,
7009.80166062] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289,
4409.40817174] * u.Mpc, rtol=1e-4)
# Non-flat w
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2849.6163356, 4428.71661565, 5450.97862778,
6179.37072324] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353,
6275.9206788] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426,
4671.83818117] * u.Mpc, rtol=1e-4)
# w0wa
cos = core.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924,
6339.8549956] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281,
6342.3228444] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919,
4736.35404638] * u.Mpc, rtol=1e-4)
# Flatw0wa
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818,
6948.26480378] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576,
6945.61856513] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093,
4409.35399673] * u.Mpc, rtol=1e-4)
# wpwa
cos = core.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201,
6373.36147627] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391,
6366.10224229] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397,
5116.51184842] * u.Mpc, rtol=1e-4)
# w0wz
cos = core.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257,
6562.70873734] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2997.8115653, 4686.45599916, 5764.54388557,
6524.17408738] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278,
5191.54178243] * u.Mpc, rtol=1e-4)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719,
5636.10397302] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974,
5213.07557084] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2563.5093049, 3776.63362071, 4506.83448243,
5006.50158829] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,
m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847,
4901.96669755] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.01, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = nuprefac * tcos.Neff * \
np.array([149.386233, 74.87915, 50.0518,
14.002403, 1.03702333])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(np.int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
z_at_value = funcs.z_at_value
cosmo = core.Planck13
d = cosmo.luminosity_distance(3)
assert allclose(z_at_value(cosmo.luminosity_distance, d), 3,
rtol=1e-8)
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356,
rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc),
1.3685790653802761, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr),
0.7951983674601507, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmax=2), 0.68127769625288614, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmin=2.5), 3.7914908028272083, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag),
1.9913891680278133, rtol=1e-6)
# test behaviour when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_roundtrip():
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck13 cosmology
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone isn't a redshift-dependent method
skip = ('Ok',
'angular_diameter_distance_z1z2',
'clone',
'de_density_scale', 'w')
import inspect
methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
print('Round-trip testing {0}'.format(name))
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
# Test distance functions between two redshifts
z2 = 2.0
func_z1z2 = [lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2),
lambda z1: \
core.Planck13._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: \
core.Planck13.angular_diameter_distance_z1z2(z1, z2)]
for func in func_z1z2:
fval = func(z)
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
| 44.887821
| 85
| 0.592046
|
5274d493c56d1ffa4c606f9c1ae028bc7fa6bd80
| 819
|
py
|
Python
|
utils.py
|
CRIPAC-DIG/DESTINE
|
9ef2e1583ec8b6ef63971b83a05f3a3c924a4e3f
|
[
"MIT"
] | 4
|
2021-11-29T14:32:00.000Z
|
2022-02-01T11:24:49.000Z
|
utils.py
|
CRIPAC-DIG/DESTINE
|
9ef2e1583ec8b6ef63971b83a05f3a3c924a4e3f
|
[
"MIT"
] | 2
|
2021-11-28T09:27:53.000Z
|
2022-02-18T08:51:03.000Z
|
utils.py
|
CRIPAC-DIG/DESTINE
|
9ef2e1583ec8b6ef63971b83a05f3a3c924a4e3f
|
[
"MIT"
] | null | null | null |
import torch
import hashlib
class EarlyStopper(object):
def __init__(self, num_trials, save_path):
self.num_trials = num_trials
self.trial_counter = 0
self.best_accuracy = 0
self.save_path = save_path
def is_continuable(self, model, accuracy):
if accuracy > self.best_accuracy:
self.best_accuracy = accuracy
self.trial_counter = 0
torch.save(model, self.save_path)
return True
elif self.trial_counter + 1 < self.num_trials:
self.trial_counter += 1
return True
else:
return False
def load_best(self):
return torch.load(self.save_path)
def hash_dict(m: dict):
b = str(m).encode()
return hashlib.sha224(b).hexdigest()
| 26.419355
| 55
| 0.590965
|
7c6ee6475357d1cf87457575abb17489e239fba1
| 14,177
|
py
|
Python
|
Tests/interop/net/derivation/test_property_override.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | 2
|
2019-09-21T22:22:30.000Z
|
2020-05-09T12:45:51.000Z
|
Tests/interop/net/derivation/test_property_override.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | null | null | null |
Tests/interop/net/derivation/test_property_override.py
|
dsonbill/IronPython3-NETCore
|
8c76bdbec1754233f04b41ecd28e9bae2c862fd0
|
[
"Apache-2.0"
] | null | null | null |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
How to re-define a property in Python.
'''
import unittest
from iptest import IronPythonTestCase, is_netstandard, is_mono, run_test, skipUnlessIronPython
@skipUnlessIronPython()
class PropertyOverrideTest(IronPythonTestCase):
def setUp(self):
super(PropertyOverrideTest, self).setUp()
if is_mono:
self.add_clr_assemblies("baseclasscs", "typesamples")
else:
self.add_clr_assemblies("baseclasscs", "baseclassvb", "typesamples")
def test_read_write_interface(self):
from Merlin.Testing.BaseClass import IProperty10
class C(IProperty10):
def set_IntProperty(self, value):
self.field = value
def get_IntProperty(self):
return self.field
def bad_set(self, arg1, arg2): pass
x = C()
p = IProperty10.IntProperty
# exception message: bug 372518
#p.__set__(x, 10)
#self.assertEqual(p.GetValue(x), 10)
C.IntProperty = property(C.get_IntProperty, C.set_IntProperty)
p.__set__(x, 20)
self.assertEqual(p.GetValue(x), 20)
self.assertTrue(not hasattr(IProperty10, 'set_IntProperty'))
self.assertTrue(not hasattr(IProperty10, 'get_IntProperty'))
# negative
self.assertRaises(TypeError, lambda: p.SetValue(x, 'str'))
del C.IntProperty # workaround: bug 327528
C.IntProperty = property(C.get_IntProperty, C.bad_set)
self.assertRaises(TypeError, lambda: p.__set__(x, 1))
self.assertRaises(TypeError, lambda: p.__set__(x, 1, 2))
class C(IProperty10):
def __init__(self):
self.field = 30
@property
def IntProperty(self):
return self.field
x = C()
self.assertEqual(p.GetValue(x), 30)
self.assertRaisesMessage(AttributeError, "readonly attribute", lambda: p.__set__(x, 40))
def test_readonly_interface(self):
from Merlin.Testing.BaseClass import IProperty11
class C(IProperty11):
def set_StrProperty(self, value):
self.field = value
def get_StrProperty(self):
return self.field
StrProperty = property(get_StrProperty, set_StrProperty)
x = C()
p = IProperty11.StrProperty
p.__set__(x, 'abc') # no-op equivalent?
if is_netstandard: # TODO: revert this once System.SystemException is added to netstandard (https://github.com/IronLanguages/main/issues/1399)
self.assertRaises(Exception, lambda: p.SetValue(x, 'def')) # ?
else:
self.assertRaises(SystemError, lambda: p.SetValue(x, 'def')) # ?
self.assertRaises(AttributeError, lambda: x.field) # make sure x.field not set yet
x.field = 'python'
self.assertEqual(p.GetValue(x), 'python')
self.assertEqual(p.__get__(x), 'python')
def test_writeonly_interface(self):
from Merlin.Testing.BaseClass import IProperty12
class C(IProperty12):
def set_DoubleProperty(self, value):
self.field = value
def get_DoubleProperty(self):
return self.field
DoubleProperty = property(get_DoubleProperty, set_DoubleProperty)
x = C()
p = IProperty12.DoubleProperty
p.__set__(x, 1.23)
self.assertEqual(x.field, 1.23)
for l in [ p.GetValue, p.__get__]:
self.assertRaisesMessage(AttributeError, "unreadable property", l, x)
def test_csindexer(self):
from Merlin.Testing.BaseClass import Callback, IIndexer20
class C(IIndexer20):
def __init__(self):
self.dict = {}
def __setitem__(self, index, value):
self.dict[index] = value
def __getitem__(self, index):
return self.dict[index]
x = C()
#IIndexer20.set_Item(x, 1, 'One') # bug 363289
#IIndexer20.set_Item(x, 2, 'Two')
#self.assertEqual(IIndexer20.get_Item(x, 2), 'Two')
x.dict = {1 : 'One', 2 : 'Two'}
Callback.On1(x)
self.assertEqual(x.dict[1], 'one')
self.assertEqual(x.dict[2], 'TWO')
x[3] = 'Three'
self.assertEqual('TWO', x[2])
class C(IIndexer20):
def __init__(self):
self.field = "start"
def __setitem__(self, index, value):
self.field = "-%s %s %s" % (index[0], index[1], value)
def __getitem__(self, index):
return self.field + "-%s %s" % (index[0], index[1])
# experimental
def set_Item(self, index1, index2, value):
self.field = "+%s %s %s" % (index1, index2, value)
def get_Item(self, index1, index2):
return self.field + "+%s %s" % (index1, index2)
x = C()
#Callback.On2(x) # bug 372940
#self.assertEqual(x.field, "+1 2 start+3 4inside clr")
x = C()
x[1, 2] = x[3, 4] + "something"
self.assertEqual(x.field, "-1 2 start-3 4something")
@unittest.skipIf(is_mono, 'VB compile currently failing https://github.com/IronLanguages/main/issues/1438')
def test_vbindexer(self):
from Merlin.Testing.BaseClass import CVbIndexer30, IVbIndexer10, IVbIndexer11, IVbIndexer20, VbCallback
class C(IVbIndexer10):
def __init__(self):
self.f = 1
'''
def set_IntProperty(self, index, value):
self.f = self.f + index + value
def get_IntProperty(self, index):
return self.f
'''
def __setitem__(self, index, value):
self.f = self.f + index + value
def __getitem__(self, index):
return self.f + index
x = C()
VbCallback.Act(x)
self.assertEqual(x.f, 1112)
# TODO: I doubt it works for now
class C(IVbIndexer11):
pass
class C(IVbIndexer20):
def __init__(self):
self.f = 0
def set_DoubleProperty(self, index, value):
self.f = index * 0.1 + value
def get_DoubleProperty(self, index):
return self.f + index * 0.01
x = C()
# VbCallback.Act(x)
# currently AttributeError: 'C' object has no attribute 'get_DoubleProperty'
# TODO
class C(CVbIndexer30):
pass
@unittest.skipIf(is_mono, "mono doesn't handle this properly, needs debug https://github.com/IronLanguages/main/issues/1593")
def test_virtual_property(self):
from Merlin.Testing.BaseClass import Callback, CProperty30
class C(CProperty30):
pass
x = C()
Callback.On(x)
self.assertEqual(x.Property, 220)
self.assertTrue(not hasattr(CProperty30, 'set_Property'))
self.assertTrue(not hasattr(CProperty30, 'get_Property'))
class C(CProperty30):
def __init__(self):
self.field = 3
def get_Property(self):
return self.field;
def set_Property(self, value):
self.field = value + 30
x = C()
Callback.On(x)
self.assertEqual(x.field, 233) # we read field, we added 200 from C#, and added 30 ourself
self.assertEqual(x.Property, 233)
x.field = 3
C.Property = property(C.get_Property, C.set_Property)
Callback.On(x)
self.assertEqual(x.field, 233)
self.assertEqual(x.Property, 233)
self.assertTrue(not hasattr(CProperty30, 'set_Property'))
self.assertTrue(not hasattr(CProperty30, 'get_Property'))
del C.Property # workaround: remove after bug 327528
C.Property = property(C.get_Property)
self.assertRaisesMessage(AttributeError,
"readonly attribute",
Callback.On, x)
def test_abstract_property(self):
from Merlin.Testing.BaseClass import Callback, CProperty31
class C(CProperty31):
pass
x = C()
self.assertRaises(AttributeError, Callback.On, x)
class C(CProperty31):
def __init__(self):
self.field = 1
def get_PropertyX(self):
return self.field;
def set_PropertyX(self, value):
self.field = value + 10
Property = property(get_PropertyX, set_PropertyX)
x = C()
Callback.On(x)
self.assertEqual(x.field, 111)
x = C()
self.assertTrue(not hasattr(CProperty31, 'get_Property'))
self.assertTrue(not hasattr(CProperty31, 'set_Property'))
self.assertTrue(not hasattr(x, 'get_Property'))
self.assertTrue(not hasattr(x, 'set_Property'))
def test_final_property(self):
from Merlin.Testing.BaseClass import Callback, CProperty32
class C(CProperty32):
pass
x = C()
Callback.On(x) # 0 - 4 + 400 + 40
pv = x.Property # -4
self.assertEqual(432, pv)
class C(CProperty32):
def __init__(self):
self.field = 5
def get_Property(self):
return self.field;
def set_Property(self, value):
self.field = value + 50
Property = property(get_Property, set_Property)
x = C()
Callback.On(x)
#self.assertEqual(x.Property, 5) # bug 372831
x.Property = 6
#self.assertEqual(x.Property, 56)
def test_static_property(self):
from Merlin.Testing.BaseClass import CProperty33
class C(CProperty33):
pass
x = C()
CProperty33.Property = 6
self.assertEqual(CProperty33.Property, 66)
self.assertEqual(x.Property, 66)
self.assertEqual(C.Property, 66)
## test order matters here: x -> C
#x.Property = 7 # bug 372840
#self.assertEqual(x.Property, 7)
#self.assertEqual(CProperty33.Property, 66)
C.Property = 8
self.assertEqual(C.Property, 8)
self.assertEqual(CProperty33.Property, 66)
def test_readonly_writeonly_indexer(self):
from Merlin.Testing.BaseClass import IIndexer21, IIndexer22
def create_type(base):
class C(base):
def __init__(self):
self.f = 1
def __setitem__(self, index, value):
self.f = index + value
def __getitem__(self, index):
return self.f + index
return C
RO, WO = map(create_type, [IIndexer21, IIndexer22])
x = RO()
self.assertEqual(IIndexer21.__getitem__(x, 10), 11)
self.assertRaises(AttributeError, lambda: IIndexer21.__setitem__)
x[10] = 100
self.assertEqual(x.f, 110)
self.assertEqual(x[1000], 1110)
x = WO()
IIndexer22.__setitem__(x, 10, 100)
self.assertEqual(x.f, 110)
#self.assertRaises(AttributeError, lambda: IIndexer22.__getitem__) # ??
#IIndexer22.__getitem__(x, 1000) # otherwise
def test_super_on_property(self):
from Merlin.Testing.BaseClass import Callback, CProperty30
class C(CProperty30):
def get_Property(self):
return super(C, self).Property
def set_Property(self, value):
CProperty30.Property.SetValue(self, value + 500)
Property = property(get_Property, set_Property)
x = C()
Callback.On(x) # read (base_read)/+200/+500/write (base_write/+20)
self.assertEqual(x.Property, 720)
x = C()
x.Property = 1
self.assertEqual(x.Property, 521)
## bad user code attempt: use 'Property' directly
class C(CProperty30):
def get_Property(self):
return super(C, self).Property
def set_Property(self, value):
super(C, self).Property = value
Property = property(get_Property, set_Property)
x = C()
self.assertRaises(AttributeError, Callback.On, x)
self.assertEqual(x.Property, 0) # read
def f(): x.Property = 1 # write
self.assertRaises(AttributeError, f) # cannot set slot
#similar scenaio in CPython: TypeError: 'super' object has only read-only attributes (assign to .Property)
def test_super_on_default_index(self):
from Merlin.Testing.BaseClass import Callback, CIndexer40
class C(CIndexer40):
def __setitem__(self, index, value):
super(C, self).__setitem__(index, value)
def __getitem__(self, index):
return super(C, self).__getitem__(index)
x = C()
Callback.On(x)
self.assertEqual(x[0], 12)
x[1] = 90
self.assertEqual(x[1], 90)
run_test(__name__)
| 36.44473
| 150
| 0.560062
|
160d19d6d3550107ec08e15d1a9c7c155bfd05d2
| 7,583
|
py
|
Python
|
app/bwlist/test.py
|
gheros/testlb
|
cd7d1ee62b15a4658c28f50c440b9d4086974e15
|
[
"MIT"
] | null | null | null |
app/bwlist/test.py
|
gheros/testlb
|
cd7d1ee62b15a4658c28f50c440b9d4086974e15
|
[
"MIT"
] | null | null | null |
app/bwlist/test.py
|
gheros/testlb
|
cd7d1ee62b15a4658c28f50c440b9d4086974e15
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
from socket import inet_aton, inet_ntoa
from struct import unpack, pack
def _check_ip(ip_add):
"""
common func
"""
p = re.compile(r'^(([01]?[\d]{1,2})|(2[0-4][\d])|(25[0-5]))' \
r'(\.(([01]?[\d]{1,2})|(2[0-4][\d])|(25[0-5]))){3}(\/(\d+))?$')
return p.search(str(ip_add)) is not None
def isValidIP(ip_add):
"""
Return the validity of the IP
>>>eisoopylib.isValidIP("192.168.0.1")
True
>>>eisoopylib.isValidIP("192.168.0")
False
>>>eisoopylib.isValidIP("test")
False
>>>eisoopylib.isValidIP("10.0.0.256")
False
etc.
"""
if _check_ip(ip_add):
return True
return False
def isValidMask(mask):
"""
Return the validity of the mask
>>>eisoopylib.isValidMask("255.255.255.0")
True
>>>eisoopylib.isValidMask("192.168.0")
False
>>>eisoopylib.isValidMask("test")
False
>>>eisoopylib.isValidMask("0.0.0.0")
False
>>>eisoopylib.isValidMask("255.255.255.255")
True
etc.
"""
try:
if _check_ip(mask):
mask_num, = unpack("!I", inet_aton(mask))
if mask_num == 0:
return False
# get inverted
mask_num = ~mask_num + 1
binstr = bin(mask_num)[3:]
# convert to positive integer
binstr = '0b%s' % ''.join('1' if b == '0' else '0' for b in binstr)
mask_num = int(binstr, 2) + 1
# check 2^n
return (mask_num & (mask_num - 1) == 0)
return False
except Exception:
return False
def isUseableIP(ip_add, mask=None):
"""
Return the availability of the IP
>>>eisoopylib.isUseableIP("192.168.0.1", "255.255.255.0")
True
>>>eisoopylib.isUseableIP("127.0.0.1") //Loopback address
False
>>>eisoopylib.isUseableIP("224.0.0.1") //Multicast address(224.0.0.0 - 239.255.255.255)
False
>>>eisoopylib.isUseableIP("169.254.0.1") //Failed dhcp allocation IP(169.254.x.x)
False
>>>eisoopylib.isUseableIP("192.168.77.128", "255.255.255.128") //Network number is 1
False
etc.
"""
if _check_ip(ip_add):
ip_split = ip_add.split('.')
# 如果IP地址以0开头,则不可用
if ip_split[0] == '0':
return False
# 如果IP地址以255开头,则不可用
if ip_split[0] == '255':
return False
# 如果IP地址以127开头,则不可用
if ip_split[0] == '127':
return False
# 如果IP地址以169.254开头,则不可用
if ip_split[0] == '169' and ip_split[1] == '254':
return False
ip_num = ip2int(ip_add)
# 2进制字符串,左补零,共32位
ip_bit = bin(ip_num)[2:].zfill(32)
# 过滤全零地址
if ip_num == 0:
return False
# 如果是A类地址,则掩码为255.0.0.0
if ip_bit[0] == '0':
mask = mask or "255.0.0.0"
# 如果是B类地址,则掩码为255.255.0.0
elif ip_bit[:2] == '10':
mask = mask or "255.255.0.0"
# 如果是C类地址,则掩码为255.255.255.0
elif ip_bit[:3] == '110':
mask = mask or "255.255.255.0"
# 其余地址全部不可用
else:
return False
# 掩码不合法则不可用
if not isValidMask(mask):
return False
# 根据掩码计算子网地址,如果IP为子网地址,则不可用
subnet = calcSubnet(ip_add, mask)
if ip_add == subnet:
return False
# 根据子网以及掩码计算广播地址,如果IP为广播地址,则不可用
if ip_add == calcBroadcastBySubnet(subnet, mask):
return False
return True
else:
return False
def ip2int(ip_add):
"""
Return the decimal number of the IP
>>>eisoopylib.ip2int("192.168.0.1")
3232235521
etc.
"""
try:
if _check_ip(ip_add):
result = unpack("!I", inet_aton(ip_add))
return result[0]
else:
return False
except ValueError:
return False
def int2ip(int_num):
"""
Return the IP of the valid decimal number
>>>eisoopylib.int2ip(3232235521)
192.168.0.1
etc.
"""
try:
return inet_ntoa(pack("!I", int_num))
except Exception:
return False
def calcSubnet(ip_add, mask):
"""
Return the sub net of the network
>>>eisoopylib.calcSubnet("192.168.0.1", "255.255.255.0")
192.168.0.0
etc.
"""
if _check_ip(ip_add) and _check_ip(mask):
ip_num, = unpack("!I", inet_aton(ip_add))
mask_num, = unpack("!I", inet_aton(mask))
subnet_num = ip_num & mask_num
return inet_ntoa(pack("!I", subnet_num))
else:
return False
def calcHostNum(mask):
"""
Return the host numbers of the network
>>>eisoopylib.calcHostNum("255.255.255.0")
254
etc.
"""
try:
if isValidMask(mask):
bit_num = bin(ip2int(mask)).count('1')
return (2 ** (32 - bit_num)) - 2
return False
except Exception:
return False
def isInSameNetwork(ip_add1, ip_add2, mask):
"""
Return ip_add1 and ip_add2 in same network
>>>eisoopylib.isInSameNetwork("192.168.77.1", "192.168.77.2", "255.255.255.0")
True
>>>eisoopylib.isInSameNetwork("192.168.77.1", "192.168.8.2", "255.255.0.0")
True
>>>eisoopylib.isInSameNetwork("192.168.77.1", "192.168.8.2", "255.255.255.0")
False
"""
if _check_ip(ip_add1) and _check_ip(ip_add2) and _check_ip(mask) \
and isValidMask(mask):
ip1_num, = unpack("!I", inet_aton(ip_add1))
ip2_num, = unpack("!I", inet_aton(ip_add2))
mask_num, = unpack("!I", inet_aton(mask))
if ip1_num & mask_num != ip2_num & mask_num:
return False
else:
return True
def calcBroadcast(ip_add, mask):
"""
Return the broadcast
>>>eisoopylib.calcHostNum("192.168.77.12", "255.255.255.128")
192.168.77.127
"""
subnet = calcSubnet(ip_add, mask)
if not subnet:
return False
return calcBroadcastBySubnet(subnet, mask)
def calcBroadcastBySubnet(subnet, mask):
"""
Return the broadcast
>>>eisoopylib.calcHostNum("192.168.77.12", "255.255.255.128")
192.168.77.127
"""
if not isValidMask(mask):
return False
try:
subnet_num = ip2int(subnet)
# calc host bit num
host_bit = bin(ip2int(mask)).count('1')
# replace 32 - host_bit numbers 0 to 1
binstr = ''
if host_bit < 32:
binstr = bin(subnet_num)[host_bit - 32:]
binstr = ''.join('1' for b in binstr)
binstr = ''.join([bin(subnet_num)[:host_bit + 2], binstr])
broadcast_num = int(binstr, 2)
return int2ip(broadcast_num)
except Exception:
return False
def isNetConflict(ip_addr1, mask1, ip_addr2, mask2):
"""
Return two networks confliction
>>>eisoopylib.isNetConflict("192.168.77.1", "255.255.255.0", "192.168.77.2", "255.255.255.0")
False
"""
subnet1 = calcSubnet(ip_addr1, mask1)
if not subnet1:
return False
subnet2 = calcSubnet(ip_addr2, mask2)
if not subnet2:
return False
if subnet1 == subnet2:
return False
print(isInSameNetwork("192.168.77.1", "192.168.77.2", "255.255.255.252"))
print(_check_ip('192.18.1.1/100'))
print(inet_aton('192.168.1.16'))
def exchange_maskint(mask_int):
bin_arr = ['0' for i in range(32)]
for i in range(mask_int):
bin_arr[i] = '1'
tmpmask = [''.join(bin_arr[i * 8:i * 8 + 8]) for i in range(4)]
tmpmask = [str(int(tmpstr, 2)) for tmpstr in tmpmask]
return '.'.join(tmpmask)
print(exchange_maskint(24))
| 24.540453
| 97
| 0.563365
|
acd7c0943cd35c3f9a9344cf981f867aae07d66e
| 463
|
py
|
Python
|
04_oanda/viz.py
|
Tjorriemorrie/trading
|
aafa15a6c564bfa86948ab30e33d554172b38a3e
|
[
"MIT"
] | 2
|
2017-07-02T09:06:28.000Z
|
2020-09-11T04:23:14.000Z
|
04_oanda/viz.py
|
Tjorriemorrie/trading
|
aafa15a6c564bfa86948ab30e33d554172b38a3e
|
[
"MIT"
] | 2
|
2021-03-31T19:14:07.000Z
|
2021-06-01T23:34:32.000Z
|
04_oanda/viz.py
|
Tjorriemorrie/trading
|
aafa15a6c564bfa86948ab30e33d554172b38a3e
|
[
"MIT"
] | 2
|
2016-03-29T07:51:16.000Z
|
2016-10-30T04:53:58.000Z
|
import json
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
with open('data/EURUSD3600.json') as f:
data = json.loads(f.read())
data = OrderedDict(sorted(data.items()))
for i, v in data.iteritems():
print 'timestamp', i
print v['rate']
points = OrderedDict(sorted(v['price_points'].items()))
for k, d in points.iteritems():
print k
print points
plt.scatter(i, v['rate'])
plt.show()
| 20.130435
| 59
| 0.667387
|
e76af242f3986d7c4eaba4e0761c17d412230fe6
| 331
|
py
|
Python
|
migrations/20211223_01_g7Y82-change-comment-on-holdemgamerecord.py
|
zw-g/Funny-Nation
|
bcb72e802e0ff46b4a409c5d51fc8b10e0987463
|
[
"MIT"
] | 126
|
2022-01-15T02:29:07.000Z
|
2022-03-30T09:57:40.000Z
|
migrations/20211223_01_g7Y82-change-comment-on-holdemgamerecord.py
|
zw-g/Funny-Nation
|
bcb72e802e0ff46b4a409c5d51fc8b10e0987463
|
[
"MIT"
] | 18
|
2022-01-11T22:24:35.000Z
|
2022-03-16T00:13:01.000Z
|
migrations/20211223_01_g7Y82-change-comment-on-holdemgamerecord.py
|
zw-g/Funny-Nation
|
bcb72e802e0ff46b4a409c5d51fc8b10e0987463
|
[
"MIT"
] | 25
|
2022-01-22T15:06:27.000Z
|
2022-03-01T04:34:19.000Z
|
"""
change comment on holdemGameRecord
"""
from yoyo import step
__depends__ = {'20211128_01_Mn7Ng-create-holdem-game-record-table'}
steps = [
step("ALTER TABLE `holdemGameRecord` CHANGE `status` `status` INT NOT NULL COMMENT '0 represent in progress; 1 represent lose or fold; 2 represent win; 3 represent game close';")
]
| 27.583333
| 182
| 0.743202
|
e2d5716313f850b5bd5c95b4dd3f6327671bc343
| 4,622
|
py
|
Python
|
services/web/apps/inv/reportdiscoveryproblem/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/web/apps/inv/reportdiscoveryproblem/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/web/apps/inv/reportdiscoveryproblem/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# inv.reportdiscovery
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
from django import forms
# NOC modules
from noc.lib.app.simplereport import SimpleReport
from noc.sa.models.managedobject import ManagedObject
from noc.sa.models.managedobject import ManagedObjectProfile
from noc.inv.models.interface import Interface
from noc.inv.models.link import Link
from noc.sa.models.objectdata import ObjectData
from noc.main.models.pool import Pool
from noc.sa.models.profile import Profile
from noc.inv.models.platform import Platform
from noc.inv.models.networksegment import NetworkSegment
from noc.sa.models.useraccess import UserAccess
from noc.core.translation import ugettext as _
from noc.core.profile.loader import GENERIC_PROFILE
from noc.sa.models.objectstatus import ObjectStatus
class ReportDiscoveryTopologyProblemApplication(SimpleReport):
title = _("Discovery Topology Problems")
def get_form(self):
class ReportForm(forms.Form):
pool = forms.ChoiceField(
label=_("Managed Objects Pools"),
required=True,
choices=list(Pool.objects.order_by("name").scalar("id", "name"))
+ [(None, "-" * 9)],
)
obj_profile = forms.ModelChoiceField(
label=_("Managed Objects Profile"),
required=False,
queryset=ManagedObjectProfile.objects.order_by("name"),
)
available_only = forms.BooleanField(
label=_("Managed Objects Profile"),
required=False,
)
return ReportForm
def get_data(self, request, pool=None, obj_profile=None, available_only=False, **kwargs):
problems = {} # id -> problem
mos = ManagedObject.objects.filter(is_managed=True, pool=pool)
if not request.user.is_superuser:
mos = mos.filter(administrative_domain__in=UserAccess.get_domains(request.user))
if obj_profile:
# Get all managed objects
mos = mos.filter(object_profile=obj_profile)
mos = {
mo[0]: (mo[1], mo[2], Profile.get_by_id(mo[3]), mo[4], mo[5])
for mo in mos.values_list("id", "name", "address", "profile", "platform", "segment")
}
mos_set = set(mos)
if available_only:
statuses = ObjectStatus.get_statuses(list(mos_set))
mos_set = {mo for mo in mos_set if statuses.get(mo)}
# Get all managed objects with generic profile
for mo in mos:
if mos[mo][2] == GENERIC_PROFILE:
problems[mo] = _("Profile check failed")
# Get all managed objects without interfaces
if_mo = {
x["_id"]: x.get("managed_object")
for x in Interface._get_collection().find({}, {"_id": 1, "managed_object": 1})
}
for mo in mos_set - set(problems) - set(if_mo.values()):
problems[mo] = _("No interfaces")
# Get all managed objects without links
linked_mos = set()
for d in Link._get_collection().find({}):
for i in d["interfaces"]:
linked_mos.add(if_mo.get(i))
for mo in mos_set - set(problems) - linked_mos:
problems[mo] = _("No links")
# Get all managed objects without uplinks
uplinks = {}
for d in ObjectData._get_collection().find():
nu = len(d.get("uplinks", []))
if nu:
uplinks[d["_id"]] = nu
for mo in mos_set - set(problems) - set(uplinks):
problems[mo] = _("No uplinks")
#
data = []
for mo_id in problems:
if mo_id not in mos:
continue
name, address, profile, platform, segment = mos[mo_id]
data += [
[
name,
address,
profile.name,
Platform.get_by_id(platform).name if platform else "",
NetworkSegment.get_by_id(segment).name if segment else "",
problems[mo_id],
]
]
data = sorted(data)
return self.from_dataset(
title=self.title,
columns=["Name", "Address", "Profile", "Platform", "Segment", "Problem"],
data=data,
enumerate=True,
)
| 39.504274
| 96
| 0.559931
|
b879eba17f70605bece36e275e9e3193ce7da619
| 2,953
|
py
|
Python
|
publications/views.py
|
LuizFelipeGondim/AUline
|
04e59770b7c835962e8c6f1ea49f853cac716efb
|
[
"MIT"
] | null | null | null |
publications/views.py
|
LuizFelipeGondim/AUline
|
04e59770b7c835962e8c6f1ea49f853cac716efb
|
[
"MIT"
] | 5
|
2021-04-08T21:32:17.000Z
|
2021-09-22T19:29:23.000Z
|
publications/views.py
|
LuizFelipeGondim/AUline
|
04e59770b7c835962e8c6f1ea49f853cac716efb
|
[
"MIT"
] | 1
|
2021-07-25T22:40:18.000Z
|
2021-07-25T22:40:18.000Z
|
from django.shortcuts import render, redirect
from .models import Animal
from services.models import PontoAcesso, Depoimento
from .forms import AnimalForm, MotivoForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from accounts.models import Perfil
from services.forms import ContatoForm
from .utils import filtro_animal, paginacao
def lista_animal(request):
categorias = {}
ids = []
lista_de_animais = Animal.objects.all()
if request.method == 'POST':
lista_de_animais = filtro_animal(request, lista_de_animais)
animais = paginacao(request, lista_de_animais)
#transformando as categorias em dicionário para trabalhar com javascript
for animal in lista_de_animais:
categorias[animal.id] = animal.categoria
ids.append(animal.id)
contexto = {
'animais': animais,
'categorias': categorias,
'ids': ids,
}
return render(request, 'index.html', contexto)
@login_required
def cadastro_animal(request):
form = AnimalForm(request.POST or None, request.FILES)
if request.method == 'POST' and form.is_valid():
try:
user = User.objects.get(id=request.user.id)
animal = form.save(commit=False)
animal.usuario = user
animal.save()
return redirect('cadastro-motivo', animal.id)
except:
return HttpResponse(status=500)
return render(request, 'cadastro-animal.html', {'form':form})
@login_required
def cadastro_motivo(request, id):
form = MotivoForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
try:
animal = Animal.objects.get(id=id)
motivo = form.save(commit=False)
motivo.animal_id = animal
motivo.save()
return redirect('/')
except:
return HttpResponse(status=500)
return render(request, 'motivo.html', {'form':form})
def contato(request):
form = ContatoForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
form.save()
return redirect('/')
contexto = {
'form':form
}
return render(request, 'entre-em-contato.html', contexto)
def doe(request):
pontos = PontoAcesso.objects.exclude(tipo_ponto='PA')
contexto = {
'pontos':pontos
}
return render(request, 'doe.html', contexto)
def incentivo(request):
pontos = PontoAcesso.objects.exclude(tipo_ponto='PD')
depoimentos = Depoimento.objects.all()
contexto = {
'pontos':pontos,
'depoimentos': depoimentos,
}
return render(request, 'incentivo.html', contexto)
def sobre(request):
return render(request, 'sobre.html')
def handler404(request, exception):
return render(request, 'erro.html')
def handler500(request):
return render(request, 'erro.html')
| 26.845455
| 76
| 0.650186
|
cbff39dc17a83ae39a61d5be4d76a8dfd26838d2
| 29
|
py
|
Python
|
portfolio/Python/scrapy/sie_hunting/__init__.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/sie_hunting/__init__.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/sie_hunting/__init__.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5
|
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
ACCOUNT_NAME = 'SIE Hunting'
| 14.5
| 28
| 0.758621
|
a1cac91340bd79b5cadea1c3e4e452005c26213d
| 1,744
|
py
|
Python
|
oechem_eval/oe_rigid_body_opt.py
|
PatWalters/fragment_expansion
|
e5e5a7d9bb24fe1196ea849f128d1a61a33e67b9
|
[
"MIT"
] | 10
|
2020-03-27T00:42:15.000Z
|
2021-07-19T12:35:02.000Z
|
oechem_eval/oe_rigid_body_opt.py
|
PatWalters/fragment_expansion
|
e5e5a7d9bb24fe1196ea849f128d1a61a33e67b9
|
[
"MIT"
] | null | null | null |
oechem_eval/oe_rigid_body_opt.py
|
PatWalters/fragment_expansion
|
e5e5a7d9bb24fe1196ea849f128d1a61a33e67b9
|
[
"MIT"
] | 5
|
2020-03-26T00:29:38.000Z
|
2022-03-25T06:34:26.000Z
|
#!/usr/bin/env python
import sys
from openeye import oechem
from openeye import oeff
class RigidBodyOptimizer:
def __init__(self,prot_mol):
oechem.OEAddExplicitHydrogens(prot_mol)
self.mmff = oeff.OEMMFFAmber(prot_mol)
self.mmff.PrepMol(prot_mol)
def minimize(self,mol):
oechem.OEAddExplicitHydrogens(mol)
if (not self.mmff.PrepMol(mol)) or (not self.mmff.Setup(mol)):
oechem.OEThrow.Warning("Unable to process molecule: title = '%s'" % mol.GetTitle())
return None
adaptor = oeff.OEQuatAdaptor(self.mmff, False, False)
if not adaptor.Setup(mol):
oechem.OEThrow.Warning("Unable to process subset for molecule: title = '%s'"
% mol.GetTitle())
return None
vecCoords = oechem.OEDoubleArray(3*mol.GetMaxAtomIdx())
mol.GetCoords(vecCoords)
vecX = oechem.OEDoubleArray(adaptor.NumVar())
adaptor.GetVar(vecX, vecCoords)
initial_energy = adaptor(vecX)
optimizer = oeff.OEBFGSOpt()
final_energy = optimizer(adaptor, vecX, vecX)
adaptor.AdaptVar(vecCoords, vecX)
mol.SetCoords(vecCoords)
return initial_energy, final_energy
def main():
prot_fs = oechem.oemolistream(sys.argv[1])
lig_fs = oechem.oemolistream(sys.argv[2])
ofs = oechem.oemolostream(sys.argv[3])
prot_mol = oechem.OEGraphMol()
oechem.OEReadMolecule(prot_fs,prot_mol)
rigid_body_opt = RigidBodyOptimizer(prot_mol)
for lig_mol in lig_fs.GetOEMols():
res = rigid_body_opt.minimize(lig_mol)
print(lig_mol.GetTitle(),res)
oechem.OEWriteMolecule(ofs,lig_mol)
if __name__ == "__main__":
main()
| 27.68254
| 95
| 0.650803
|
ce48f653d0c330af095693608908d6d6ff938b82
| 3,359
|
py
|
Python
|
data_engineering_capstone-master/python/process_fact_immigration.py
|
jrderek/ETL-Pipeline-with-Apache-Airflow
|
6a460fc9279d8ae681a5b977f3c08d9569ca8e42
|
[
"MIT"
] | null | null | null |
data_engineering_capstone-master/python/process_fact_immigration.py
|
jrderek/ETL-Pipeline-with-Apache-Airflow
|
6a460fc9279d8ae681a5b977f3c08d9569ca8e42
|
[
"MIT"
] | null | null | null |
data_engineering_capstone-master/python/process_fact_immigration.py
|
jrderek/ETL-Pipeline-with-Apache-Airflow
|
6a460fc9279d8ae681a5b977f3c08d9569ca8e42
|
[
"MIT"
] | null | null | null |
from pyspark.sql import functions as F
from python import get_arguments, initialize_spark_session, load_state_mapping, load_country_mapping
if __name__ == "__main__":
"""
Spark Job to process raw data into immigration fact-table.
"""
args = get_arguments()
# initialize spark-session
spark = initialize_spark_session()
JDBC_URL = args.jdbc_uri
TABLE_SINK = args.table_sink
INPUT_PATH = args.input
OUTPUT_PATH = args.output + "/fact_immigration/"
TMP_DIR = args.tmp
df = spark.sql("""SELECT * FROM stag_immigration""")
# derive new column indicating stayed-days
df = df.withColumn("stayed_days", F.datediff("departure_date", "arrival_date"))
# get us-states mapping & join to get only valid i94-us-states, missing states are set to '99'
state_mapping = load_state_mapping(spark, INPUT_PATH + "/mappings/i94_states.txt")
df = df.join(state_mapping, df.i94_address == state_mapping.id, "left") \
.fillna({"id": "99"}) \
.drop("state", "i94_address") \
.withColumnRenamed("id", "state_id")
# get modes mapping & join to get only valid i94-modes, missing values are set to 'Not reported'
mode_mapping = spark.sql("""SELECT * FROM dim_travel_mode""")
df = df.join(mode_mapping, df.i94_mode == mode_mapping.id, "left") \
.fillna({"id": "Not reported"}) \
.drop("transport", "i94_mode") \
.withColumnRenamed("id", "mode_id")
# get visa mapping & join to get only valid i94-visa-types, missing values are set to 'Other'
visa_mapping = spark.sql("""SELECT * FROM dim_visa_type""")
df = df.join(visa_mapping, df.i94_visa_code == visa_mapping.id, "left") \
.fillna({"id": "Other"}) \
.drop("reason", "i94_visa_code") \
.withColumnRenamed("id", "visa_id")
# get port mapping & join to get only valid i94-ports
port_mapping = spark.sql("""SELECT * FROM dim_port""").drop("state_id")
df = df.join(port_mapping, df.i94_port == port_mapping.id, "left") \
.drop("city", "i94_port") \
.withColumnRenamed("id", "port_id")
# get country mapping & join to get only valid i94 residences
country_mapping = load_country_mapping(spark, INPUT_PATH + "/mappings/i94_city_res.txt")
df = df.join(country_mapping, df.i94_residence == country_mapping.id, "left") \
.drop("country", "i94_residence") \
.withColumnRenamed("id", "country_id")
# create unique identifier
df = df.withColumn("id", F.monotonically_increasing_id() + 1)
# select relevant columns
df = df.select("id", "admission_number", "cic_id", "ins_number", "i94_year", "i94_month", "arrival_date",
"departure_date", "stayed_days", "airline", "flight_number", "gender", "i94_age", "year_of_birth",
"occupation", "i94_city", "country_id", "state_id", "port_id", "mode_id", "visa_id", "visa_type")
# show final table
df.show()
df.printSchema()
# write to parquet on s3
df.write.mode("overwrite").option("path", OUTPUT_PATH) \
.saveAsTable(TABLE_SINK)
# save final fact table
df.write.format("com.databricks.spark.redshift") \
.option("url", JDBC_URL) \
.option("dbtable", TABLE_SINK) \
.option("tempdir", TMP_DIR) \
.mode("append") \
.save()
spark.stop()
| 39.988095
| 117
| 0.646919
|
29bccf95f47bb1debddf70322a1efaa38bb69a34
| 3,884
|
py
|
Python
|
nvtabular/tools/dataset_inspector.py
|
davidxia/NVTabular
|
97b05ac74204d4e21fa31d522d0f84fb37cf94a9
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/tools/dataset_inspector.py
|
davidxia/NVTabular
|
97b05ac74204d4e21fa31d522d0f84fb37cf94a9
|
[
"Apache-2.0"
] | 1
|
2021-08-30T21:24:22.000Z
|
2021-08-30T21:24:22.000Z
|
nvtabular/tools/dataset_inspector.py
|
liftoffio/NVTabular
|
4ebb6bc11da12f2a0034dccc8f59701dd2240061
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import fsspec
import numpy as np
from nvtabular.columns import ColumnSelector
from nvtabular.ops import DataStats
from nvtabular.workflow import Workflow
# Class to help Json to serialize the data
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class DatasetInspector:
"""
Analyzes an existing dataset to extract its statistics.
"""
def __init__(self, client=None):
self.client = client
def inspect(self, dataset, columns_dict, output_file):
"""
Parameters
-----------
path: str, list of str, or <dask.dataframe|cudf|pd>.DataFrame
Dataset path (or list of paths), or a DataFrame. If string,
should specify a specific file or directory path. If this is a
directory path, the directory structure must be flat (nested
directories are not yet supported).
dataset_format: str
Dataset format (i.e parquet or csv)
columns_dict: dict
Dictionary indicating the diferent columns type
output_file: str
Filename to write the output statistics
"""
# Get dataset columns
cats = columns_dict["cats"]
conts = columns_dict["conts"]
labels = columns_dict["labels"]
# Create Dataset, Workflow, and get Stats
stats = DataStats()
features = ColumnSelector(cats + conts + labels) >> stats
workflow = Workflow(features, client=self.client)
workflow.fit(dataset)
# get statistics from the datastats op
output = stats.output
# Dictionary to store collected information
data = {}
# Store num_rows
data["num_rows"] = dataset.num_rows
# Store cols
for col_type in ["conts", "cats", "labels"]:
data[col_type] = {}
for col in columns_dict[col_type]:
data[col_type][col] = {}
data[col_type][col]["dtype"] = output[col]["dtype"]
if col_type != "conts":
data[col_type][col]["cardinality"] = output[col]["cardinality"]
if col_type == "cats":
data[col_type][col]["min_entry_size"] = output[col]["min"]
data[col_type][col]["max_entry_size"] = output[col]["max"]
data[col_type][col]["avg_entry_size"] = output[col]["mean"]
elif col_type == "conts":
data[col_type][col]["min_val"] = output[col]["min"]
data[col_type][col]["max_val"] = output[col]["max"]
data[col_type][col]["mean"] = output[col]["mean"]
data[col_type][col]["std"] = output[col]["std"]
data[col_type][col]["per_nan"] = output[col]["per_nan"]
# Write json file
with fsspec.open(output_file, "w") as outfile:
json.dump(data, outfile, cls=NpEncoder)
| 35.309091
| 83
| 0.602729
|
03d43ae0c2e51fba2d8a98c6985bb229a8c44db2
| 29,827
|
py
|
Python
|
LocalHistory.py
|
lukaw3d/sublime-local-history-customized
|
eb2ff2de73a0ff750f4cbc9df558d4d716e70ece
|
[
"MIT"
] | null | null | null |
LocalHistory.py
|
lukaw3d/sublime-local-history-customized
|
eb2ff2de73a0ff750f4cbc9df558d4d716e70ece
|
[
"MIT"
] | null | null | null |
LocalHistory.py
|
lukaw3d/sublime-local-history-customized
|
eb2ff2de73a0ff750f4cbc9df558d4d716e70ece
|
[
"MIT"
] | null | null | null |
import sys
import os
import re
import time
import platform
import datetime
import difflib
import filecmp
import shutil
from threading import Thread
import subprocess
import sublime
import sublime_plugin
import string
PY2 = sys.version_info < (3, 0)
if PY2:
from math import log
else:
from math import log2
NO_SELECTION = -1
settings = None
def get_filename(view):
file_path = view.file_name()
if file_path == None or not os.path.isfile(file_path):
file_path = '!:\\' + format_filename(view.name() + ' ' + str(view.id())) + '.txt'
return (True, file_path)
return (False, file_path)
def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ','_') # I don't like spaces in filenames.
return filename
def status_msg(msg):
sublime.status_message('Local History: ' + msg)
def readable_file_size(size):
suffixes = ['bytes', 'KB', 'MB', 'GB', 'TB', 'EB', 'ZB']
if PY2:
order = int(log(size, 2) / 10) if size else 0
else:
order = int(log2(size) / 10) if size else 0
return '{:.4g} {}'.format(size / (1 << (order * 10)), suffixes[order])
def get_history_root():
path_default_not_portable = os.path.join(os.path.abspath(os.path.expanduser('~')), '.sublime', 'Local History')
path_not_portable = settings.get('history_path', path_default_not_portable)
return os.path.join(os.path.dirname(sublime.packages_path()), '.sublime', 'Local History') if settings.get('portable', True) else path_not_portable
def get_history_subdir(file_path):
history_root = get_history_root()
file_dir = os.path.dirname(file_path)
if platform.system() == 'Windows':
if file_dir.find(os.sep) == 0:
file_dir = file_dir[2:]
if file_dir.find(':') == 1:
file_dir = file_dir.replace(':', '', 1)
else:
file_dir = file_dir[1:]
return os.path.join(history_root, file_dir)
def get_history_files(file_name, history_dir):
file_root, file_extension = os.path.splitext(file_name)
history_files = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(history_dir)
for f in files if f.startswith(file_root) and f.endswith(file_extension)]
history_files.sort(key=lambda f: os.path.getmtime(os.path.join(history_dir, f)), reverse=True)
return history_files
def filtered_history_files(files):
'''Only show file name in quick panel, not path'''
if not settings.get('show_full_path', True):
return [os.path.split(f)[1] for f in files]
else:
return files
def check_sbs_compare():
prefs = sublime.load_settings("Preferences.sublime-settings")
pcsets = sublime.load_settings("Package Control.sublime-settings")
installed = "Compare Side-By-Side" in pcsets.get('installed_packages')
ignored = "Compare Side-By-Side" in prefs.get('ignored_packages')
if installed and not ignored:
return True
else:
return False
def plugin_loaded():
global settings
settings = sublime.load_settings('LocalHistory.sublime-settings')
settings.add_on_change('reload', sublime.load_settings('LocalHistory.sublime-settings'))
status_msg('Target directory: "' + get_history_root() + '"')
HistoryListener.listening = False
if sublime.version().startswith('2'):
plugin_loaded()
def auto_diff_pane(view, index, history_dir, history_files):
win = view.window()
from_file = os.path.join(history_dir, history_files[index])
from_file = from_file, os.path.basename(from_file)
file_name = os.path.basename(view.file_name())
to_file = view.file_name(), file_name
group = win.get_view_index(view)[0]
# view is not in first group
if group:
win.focus_group(0)
# view is in first group
elif win.num_groups() > 1:
layout = win.get_layout()
# only add to other group if pane is big enough
if layout['cols'][2] - layout['cols'][1] > 0.35:
win.focus_group(1)
# create a pane in the middle
else:
middle_col = layout['cols'][1]
layout['cols'].insert(1, middle_col)
layout['cols'][1] = middle_col/2
x1, y1, x2, y2 = layout['cells'][0]
new_cell = [x1+1, y1, x2+1, y2]
layout['cells'].insert(1, new_cell)
new_cells = layout['cells'][:2]
old_cells = [[x1+1, y1, x2+1, y2] for i, [x1, y1, x2, y2] in enumerate(layout['cells']) if i > 1]
new_cells.extend(old_cells)
layout['cells'] = new_cells
win.run_command('set_layout', layout)
for g, cell in enumerate(layout['cells']):
if g > 0:
for view in win.views_in_group(g):
pos = win.get_view_index(view)[1]
win.set_view_index(view, g+1, pos)
win.focus_group(1)
else:
win.run_command(
"set_layout",
{
"cols": [0.0, 0.5, 1.0],
"rows": [0.0, 1.0],
"cells": [[0, 0, 1, 1], [1, 0, 2, 1]]
}
)
view.run_command('show_diff', {'from_file': from_file, 'to_file': to_file})
# focus back to view
win.focus_group(group)
def rename_tab(view, lh_view, pre, ext, snap=False):
def delay():
lh_file = os.path.basename(lh_view.file_name())
name = pre+"-" if not snap else pre
name = lh_file.replace(name, "")
name = name.replace(ext, "")
lh_view.set_syntax_file(view.settings().get("syntax"))
lh_view.set_name(name)
sublime.set_timeout_async(delay)
class HistorySave(sublime_plugin.EventListener):
def on_load(self, view):
if not PY2 or not settings.get('history_on_load', True):
return
t = Thread(target=self.process_history, args=(get_filename(view),view.substr(sublime.Region(0, view.size())),))
t.start()
def on_load_async(self, view):
if settings.get('history_on_load', True):
t = Thread(target=self.process_history, args=(get_filename(view),view.substr(sublime.Region(0, view.size())),))
t.start()
def on_close(self, view):
if settings.get('history_on_close', True):
t = Thread(target=self.process_history, args=(get_filename(view),view.substr(sublime.Region(0, view.size())),))
t.start()
def on_post_save(self, view):
if not PY2 or settings.get('history_on_close', True):
return
t = Thread(target=self.process_history, args=(get_filename(view),view.substr(sublime.Region(0, view.size())),))
t.start()
def on_post_save_async(self, view):
if not settings.get('history_on_close', True):
t = Thread(target=self.process_history, args=(get_filename(view),view.substr(sublime.Region(0, view.size())),))
t.start()
def on_deactivated(self, view):
if (view.is_dirty() and settings.get('history_on_focus_lost', False)):
t = Thread(target=self.process_history, args=(get_filename(view),view.substr(sublime.Region(0, view.size())),))
t.start()
def process_history(self, file_name_pack, file_content):
(unsaved, file_path) = file_name_pack
size_limit = settings.get('file_size_limit', 4194304)
history_retention = settings.get('history_retention', 0)
skip_recently_saved = settings.get('skip_if_saved_within_minutes')
if PY2:
file_path = file_path.encode('utf-8')
if not unsaved and os.path.getsize(file_path) > size_limit:
status_msg('File not saved, exceeded %s limit.' % readable_file_size(size_limit))
return
file_name = os.path.basename(file_path)
history_dir = get_history_subdir(file_path)
if not os.path.exists(history_dir):
os.makedirs(history_dir)
history_files = get_history_files(file_name, history_dir)
if history_files:
if not unsaved and filecmp.cmp(file_path, os.path.join(history_dir, history_files[0])):
status_msg('File not saved, no changes for "' + file_name + '".')
return
elif skip_recently_saved:
current_time = time.time()
last_modified = os.path.getmtime(history_files[0])
if current_time - last_modified < skip_recently_saved*60:
status_msg('File not saved, recent backup for "' + file_name + '" exists.')
return
file_root, file_extension = os.path.splitext(file_name)
if unsaved:
fh = open(os.path.join(history_dir, '{0}-{1}{2}'.format(file_root, datetime.datetime.now().strftime(settings.get('format_timestamp', '%Y%m%d%H%M%S')), file_extension)),"w")
fh.write(file_content)
fh.close()
if not unsaved:
shutil.copyfile(file_path, os.path.join(history_dir, '{0}-{1}{2}'.format(file_root, datetime.datetime.now().strftime(settings.get('format_timestamp', '%Y%m%d%H%M%S')), file_extension)))
status_msg('File saved, updated Local History for "' + file_name + '".')
if history_retention == 0:
return
max_valid_archive_date = datetime.date.today() - datetime.timedelta(days=history_retention)
for file in history_files:
file = os.path.join(history_dir, file)
if datetime.date.fromtimestamp(os.path.getmtime(file)) < max_valid_archive_date:
os.remove(file)
class HistorySaveNow(sublime_plugin.TextCommand):
def run(self, edit):
t = Thread(target=HistorySave().process_history, args=(get_filename(self.view),self.view.substr(sublime.Region(0, self.view.size())),))
t.start()
class HistoryBrowse(sublime_plugin.TextCommand):
def run(self, edit):
target_dir = get_history_subdir(self.view.file_name())
target_dir = target_dir.replace('\\', os.sep).replace('/', os.sep)
system = platform.system()
if system == 'Darwin':
subprocess.call(['open', target_dir])
elif system == 'Linux':
subprocess.call('xdg-open %s' % target_dir, shell=True)
elif system == 'Windows':
subprocess.call('explorer %s' % target_dir, shell=True)
class HistoryOpen(sublime_plugin.TextCommand):
def run(self, edit, autodiff=False):
(unsaved, file_path) = get_filename(self.view)
file_name = os.path.basename(file_path)
history_dir = get_history_subdir(file_path)
pre, ext = os.path.splitext(file_name)
history_files = get_history_files(file_name, history_dir)
if not history_files:
status_msg('Local History not found for "' + file_name + '".')
return
filtered_files = filtered_history_files(history_files)
def on_done(index):
if index is NO_SELECTION:
return
lh_view = self.view.window().open_file(os.path.join(history_dir, history_files[index]))
sublime.set_timeout_async(lambda: lh_view.set_scratch(True))
if settings.get('rename_tab'):
rename_tab(self.view, lh_view, pre, ext)
if settings.get('auto_diff') or autodiff:
auto_diff_pane(self.view, index, history_dir, history_files)
self.view.window().show_quick_panel(filtered_files, on_done)
class HistoryCompare(sublime_plugin.TextCommand):
def run(self, edit, snapshots=False, sbs=False):
if not self.view.file_name():
status_msg("not a valid file.")
return
file_name = os.path.basename(self.view.file_name())
history_dir = get_history_subdir(self.view.file_name())
history_files = get_history_files(file_name, history_dir)
history_files = history_files[1:]
if history_files:
filtered_files = filtered_history_files(history_files)
else:
status_msg('Local History not found for "' + file_name + '".')
return
def on_done(index):
if index is NO_SELECTION:
return
if self.view.is_dirty() and settings.get('auto_save_before_diff', True):
self.view.run_command('save')
from_file = os.path.join(history_dir, history_files[index])
from_file = from_file, os.path.basename(from_file)
to_file = self.view.file_name(), file_name
if sbs:
HistorySbsCompare.vars = self.view, from_file[0], to_file[0]
self.view.window().run_command("history_sbs_compare")
else:
self.view.run_command('show_diff', {'from_file': from_file, 'to_file': to_file})
self.view.window().show_quick_panel(filtered_files, on_done)
class HistoryReplace(sublime_plugin.TextCommand):
def run(self, edit):
if not self.view.file_name():
status_msg("not a valid file.")
return
file_name = os.path.basename(self.view.file_name())
history_dir = get_history_subdir(self.view.file_name())
history_files = get_history_files(file_name, history_dir)
history_files = history_files[1:]
if history_files:
filtered_files = filtered_history_files(history_files)
else:
status_msg('Local History not found for "' + file_name + '".')
return
def on_done(index):
if index is NO_SELECTION:
return
# send vars to the listener for the diff/replace view
from_file = os.path.join(history_dir, history_files[index])
from_file = from_file, os.path.basename(from_file)
to_file = self.view.file_name(), file_name
HistoryReplaceDiff.from_file = from_file
HistoryReplaceDiff.to_file = to_file
HistoryListener.listening = True
self.view.run_command('show_diff', {'from_file': from_file, 'to_file': to_file, 'replace': True})
self.view.window().show_quick_panel(filtered_files, on_done)
class HistoryIncrementalDiff(sublime_plugin.TextCommand):
def run(self, edit):
file_name = os.path.basename(self.view.file_name())
history_dir = get_history_subdir(self.view.file_name())
history_files = get_history_files(file_name, history_dir)
if len(history_files) < 2:
status_msg('Incremental diff not found for "' + file_name + '".')
return
filtered_files = filtered_history_files(history_files)
def on_done(index):
if index is NO_SELECTION:
return
if index == len(history_files) - 1:
status_msg('Incremental diff not found for "' + file_name + '".')
return
from_file = os.path.join(history_dir, history_files[index + 1])
to_file = os.path.join(history_dir, history_files[index])
self.view.run_command('show_diff', {'from_file': from_file, 'to_file': to_file})
self.view.window().show_quick_panel(filtered_files, on_done)
class ShowDiff(sublime_plugin.TextCommand):
header = "\n-\n- PRESS CTRL+ALT+ENTER TO ACCEPT AND REPLACE\n-\n\n"
def run(self, edit, replace=False, **kwargs):
from_file = kwargs['from_file'][0]
to_file = kwargs['to_file'][0]
if PY2:
from_file = from_file.encode('utf-8')
with open(from_file, 'r') as f:
from_content = f.readlines()
else:
with open(from_file, 'r', encoding='utf-8') as f:
from_content = f.readlines()
if PY2:
to_file = to_file.encode('utf-8')
with open(to_file, 'r') as f:
to_content = f.readlines()
else:
with open(to_file, 'r', encoding='utf-8') as f:
to_content = f.readlines()
diff = difflib.unified_diff(from_content, to_content, from_file, to_file)
diff = ''.join(diff)
if PY2:
diff = diff.decode('utf-8')
panel = sublime.active_window().new_file()
panel.set_name("## LH: Diff ##")
panel.set_scratch(True)
panel.set_syntax_file('Packages/Diff/Diff.sublime-syntax')
if replace and diff:
HistoryListener.diff_view = panel
panel.insert(edit, 0, self.header+diff)
elif diff:
panel.insert(edit, 0, diff)
else:
f1, f2 = os.path.split(from_file)[1], os.path.split(to_file)[1]
panel.insert(edit, 0, "\n--- "+f1+"\n+++ "+f2+"\n\nNo differences\n\n\n")
panel.set_read_only(True)
class HistoryDeleteAll(sublime_plugin.TextCommand):
def run(self, edit):
if not sublime.ok_cancel_dialog('Are you sure you want to delete the Local History for all files?'):
return
shutil.rmtree(get_history_root())
status_msg('The Local History has been deleted for all files.')
class HistoryCreateSnapshot(sublime_plugin.TextCommand):
def on_done(self, string):
self.string = string
self.view.window().run_command('history_create_snapshot', {"callback": True})
def run(self, edit, callback=None):
if not callback:
v = self.view
file_name = os.path.basename(v.file_name())
self.pre, self.ext = os.path.splitext(file_name)
c = "Enter a name for this snapshot: "
s = ""
v.window().show_input_panel(c, s, self.on_done, None, None)
else:
v = self.view
file_name = self.pre + " # " + self.string + self.ext
history_dir = get_history_subdir(v.file_name())
shutil.copyfile(v.file_name(), os.path.join(history_dir, file_name))
status_msg('File snapshot saved under "' + file_name + '".')
class HistoryOpenSnapshot(sublime_plugin.TextCommand):
def run(self, edit, open=True, compare=False, replace=False, sbs=False, delete=False, autodiff=False):
# ---------------
def Compare(index):
if self.view.is_dirty():
self.view.run_command('save')
from_file = os.path.join(history_dir, history_files[index])
from_file = from_file, os.path.basename(from_file)
to_file = self.view.file_name(), os.path.basename(self.view.file_name())
if sbs:
HistorySbsCompare.vars = self.view, from_file[0], to_file[0]
self.view.window().run_command("history_sbs_compare")
elif replace:
# send vars to the listener for the diff/replace view
HistoryReplaceDiff.from_file = from_file
HistoryReplaceDiff.to_file = to_file
HistoryListener.listening = True
self.view.run_command('show_diff', {'from_file': from_file, 'to_file': to_file, 'replace': True})
else:
self.view.run_command('show_diff', {'from_file': from_file, 'to_file': to_file})
# ---------------
if not self.view.file_name():
status_msg("not a valid file.")
return
file_name = os.path.basename(self.view.file_name())
history_dir = get_history_subdir(self.view.file_name())
pre, ext = os.path.splitext(file_name)
history_files = get_history_files(file_name, history_dir)
fpat = pre+r" # .+"
history_files = [re.search(fpat, file).group(0) for file in history_files
if re.search(fpat, file)]
if not history_files:
status_msg('No snapshots found for "' + file_name + '".')
return
def rename(file):
pre, ext = os.path.splitext(file_name)
base, msg = file.split(" # ", 1)
msg = msg.replace(ext, "")
return [base+ext, msg]
show_files = [rename(file) for file in history_files]
def on_done(index):
if index is NO_SELECTION:
return
if compare or sbs or replace:
Compare(index)
elif delete:
os.remove(os.path.join(history_dir, history_files[index]))
status_msg("The snapshot "+history_files[index]+" has been deleted.")
else:
lh_view = self.view.window().open_file(os.path.join(history_dir, history_files[index]))
sublime.set_timeout_async(lambda: lh_view.set_scratch(True))
if settings.get('rename_tab'):
rename_tab(self.view, lh_view, pre, ext, snap=True)
if settings.get('auto_diff') or autodiff:
auto_diff_pane(self.view, index, history_dir, history_files)
self.view.window().show_quick_panel(show_files, on_done)
class HistoryDelete(sublime_plugin.TextCommand):
def interval(self, edit, m, mode):
choice = (
["Older than one year", mode],
["Older than six months", mode],
["Older than one month", mode],
["Older than one week", mode]
)
def on_done(index):
if index is NO_SELECTION:
return
if index == 0:
self.run(edit, ask=False, dir=m, before_last="year")
elif index == 1:
self.run(edit, ask=False, dir=m, before_last="months6")
elif index == 2:
self.run(edit, ask=False, dir=m, before_last="month")
elif index == 3:
self.run(edit, ask=False, dir=m, before_last="week")
self.view.window().show_quick_panel(choice, on_done)
def run(self, edit, ask=True, before_last=None, dir=False):
if ask:
i1 = "For all files, snapshots excluded"
i2 = "Current folder only, snapshots excluded"
choice = (
["Time interval", i1],
["Time interval", i2],
["All", "All files for all folders, no exceptions"]
)
def on_done(index):
if index is NO_SELECTION:
return
if index == 0:
self.interval(edit, False, i1)
elif index == 1:
self.interval(edit, True, i2)
elif index == 2:
self.view.window().run_command('history_delete_all')
self.view.window().show_quick_panel(choice, on_done)
return
# ---------------
# today
current = time.time()
folder = get_history_subdir(self.view.file_name()) if dir else get_history_root()
base_name = os.path.splitext(os.path.split(self.view.file_name())[1])[0]
for root, dirs, files in os.walk(folder):
for f in files:
file = os.path.join(root, f)
if not os.path.isfile(file):
continue
# skip snapshots
if re.match(base_name+" # ", f):
continue
# file last modified
last_mod = os.path.getmtime(file)
if before_last == "year":
if current - last_mod > 31536000:
os.remove(file)
elif before_last == "months6":
if current - last_mod > 15811200:
os.remove(file)
elif before_last == "month":
if current - last_mod > 2635200:
os.remove(file)
elif before_last == "week":
if current - last_mod > 604800:
os.remove(file)
if before_last == "year":
status_msg('deleted files older than one year.')
elif before_last == "months6":
status_msg('deleted files older than six months.')
elif before_last == "month":
status_msg('deleted files older than one month.')
elif before_last == "week":
status_msg('deleted files older than one week.')
class HistorySbsCompare(sublime_plugin.ApplicationCommand):
def run(self, callback=False):
global sbsW, sbsF, sbsVI
if callback:
view = sbsW.find_open_file(sbsF)
sbsW.set_view_index(view, sbsVI[0], sbsVI[1])
sbsV, sbsF1, sbsF2 = self.vars
sbsW = sbsV.window()
sbsVI = sbsW.get_view_index(sbsV)
sbsW.run_command("sbs_compare_files", {"A": sbsF1, "B": sbsF2})
# file has been closed, open it again
sublime.set_timeout_async(lambda: sbsW.open_file(sbsF2), 1000)
sublime.set_timeout_async(lambda: sbsW.run_command(
"history_sbs_compare", {"callback": True}), 2000)
def is_visible(self):
return check_sbs_compare()
class HistoryMenu(sublime_plugin.TextCommand):
def compare(self):
choice = [
["Diff with history"],
["Diff wih snapshot"],
["Diff & Replace with history"],
["Diff & Replace wih snapshot"],
["Compare Side-By-Side with history"],
["Compare Side-By-Side wih snapshot"],
]
def on_done(index):
if index is NO_SELECTION:
return
if index == 0:
self.view.window().run_command('history_compare')
elif index == 1:
self.view.window().run_command('history_open_snapshot', {"compare": True})
if index == 2:
self.view.window().run_command('history_replace')
elif index == 3:
self.view.window().run_command('history_open_snapshot', {"replace": True})
elif index == 4:
self.view.window().run_command('history_compare', {"sbs": True})
elif index == 5:
self.view.window().run_command('history_open_snapshot', {"sbs": True})
sbs = check_sbs_compare()
choice = choice if sbs else choice[:4]
self.view.window().show_quick_panel(choice, on_done)
def snapshots(self):
choice = [
["Open"],
["Create"],
["Delete"],
["Compare"],
["Compare & Replace"],
["Compare Side-By-Side wih snapshot"],
]
def on_done(index):
if index is NO_SELECTION:
return
elif index == 0:
self.view.window().run_command('history_open_snapshot')
elif index == 1:
self.view.window().run_command('history_create_snapshot')
elif index == 2:
self.view.window().run_command('history_open_snapshot', {"delete": True})
elif index == 3:
self.view.window().run_command('history_open_snapshot', {"compare": True})
elif index == 4:
self.view.window().run_command('history_open_snapshot', {"replace": True})
elif index == 5 and sbs:
self.view.window().run_command('history_open_snapshot', {"sbs": True})
sbs = check_sbs_compare()
choice = choice if sbs else choice[:5]
self.view.window().show_quick_panel(choice, on_done)
def run(self, edit, compare=False, snapshots=False):
choice = (
["Open history"],
["Compare & Replace"],
["Snapshots"],
["Browse in Explorer"],
["Delete history"]
)
if compare:
self.compare()
return
elif snapshots:
self.snapshots()
return
def on_done(index):
if index is NO_SELECTION:
return
elif index == 0:
self.view.window().run_command('history_open')
elif index == 1:
self.view.window().run_command('history_menu', {"compare": True})
elif index == 2:
self.view.window().run_command('history_menu', {"snapshots": True})
elif index == 3:
self.view.window().run_command('history_browse')
elif index == 4:
self.view.window().run_command('history_delete')
self.view.window().show_quick_panel(choice, on_done)
class HistoryReplaceDiff(sublime_plugin.TextCommand):
from_file, to_file = None, None
def run(self, edit):
HistoryListener.listening = False
from_file, to_file = HistoryReplaceDiff.from_file, HistoryReplaceDiff.to_file
shutil.copyfile(from_file[0], to_file[0])
status_msg('"'+to_file[1]+'"'+' replaced with "' + from_file[1] + '".')
self.view.window().run_command('close_file')
class HistoryListener(sublime_plugin.EventListener):
listening = False
def on_query_context(self, view, key, operator, operand, match_all):
if HistoryListener.listening:
if key == "replace_diff":
if view == HistoryListener.diff_view:
return True
else:
HistoryListener.listening = False
return None
def on_close(self, view):
HistoryListener.listening = False
| 37.052174
| 197
| 0.592249
|
239d0c9a0fc25a45d3a07aea1da9d799063cdab7
| 15,321
|
py
|
Python
|
plaso/lib/timelib.py
|
ir4n6/plaso
|
010f9cbdfc82e21ed6658657fd09a7b44115c464
|
[
"Apache-2.0"
] | null | null | null |
plaso/lib/timelib.py
|
ir4n6/plaso
|
010f9cbdfc82e21ed6658657fd09a7b44115c464
|
[
"Apache-2.0"
] | null | null | null |
plaso/lib/timelib.py
|
ir4n6/plaso
|
010f9cbdfc82e21ed6658657fd09a7b44115c464
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Time manipulation functions and variables.
This module contain common methods that can be used to convert timestamps
from various formats into number of micro seconds since January 1, 1970,
00:00:00 UTC that is used internally to store timestamps.
It also contains various functions to represent timestamps in a more
human readable form.
"""
from __future__ import unicode_literals
import calendar
import datetime
import logging
import time
import dateutil.parser
import pytz
from plaso.lib import errors
MONTH_DICT = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12}
class Timestamp(object):
"""Class for converting timestamps to Plaso timestamps.
The Plaso timestamp is a 64-bit signed timestamp value containing:
micro seconds since 1970-01-01 00:00:00.
The timestamp is not necessarily in UTC.
"""
# The minimum timestamp in seconds.
TIMESTAMP_MIN_SECONDS = -(((1 << 63) - 1) / 1000000)
# The maximum timestamp in seconds.
TIMESTAMP_MAX_SECONDS = ((1 << 63) - 1) / 1000000
# The minimum timestamp in micro seconds.
TIMESTAMP_MIN_MICRO_SECONDS = -((1 << 63) - 1)
# The maximum timestamp in micro seconds.
TIMESTAMP_MAX_MICRO_SECONDS = (1 << 63) - 1
# Timestamp that represents the timestamp representing not
# a date and time value.
# TODO: replace this with a real None implementation.
NONE_TIMESTAMP = 0
# The number of micro seconds per second.
MICRO_SECONDS_PER_SECOND = 1000000
# The number of microseconds per minute.
MICROSECONDS_PER_MINUTE = (60 * MICRO_SECONDS_PER_SECOND)
# The multiplication factor to change milliseconds to micro seconds.
MILLI_SECONDS_TO_MICRO_SECONDS = 1000
@classmethod
def CopyFromString(cls, time_string):
"""Copies a timestamp from a string containing a date and time value.
Args:
time_string: A string containing a date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the
seconds fraction can be either 3 or 6 digits. The time
of day, seconds fraction and timezone offset are optional.
The default timezone is UTC.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC.
Raises:
ValueError: if the time string is invalid or not supported.
"""
if not time_string:
raise ValueError('Invalid time string.')
time_string_length = len(time_string)
# The time string should at least contain 'YYYY-MM-DD'.
if (time_string_length < 10 or time_string[4] != '-' or
time_string[7] != '-'):
raise ValueError('Invalid time string.')
# If a time of day is specified the time string it should at least
# contain 'YYYY-MM-DD hh:mm:ss'.
if (time_string_length > 10 and (
time_string_length < 19 or time_string[10] != ' ' or
time_string[13] != ':' or time_string[16] != ':')):
raise ValueError('Invalid time string.')
try:
year = int(time_string[0:4], 10)
except ValueError:
raise ValueError('Unable to parse year.')
try:
month = int(time_string[5:7], 10)
except ValueError:
raise ValueError('Unable to parse month.')
if month not in range(1, 13):
raise ValueError('Month value out of bounds.')
try:
day_of_month = int(time_string[8:10], 10)
except ValueError:
raise ValueError('Unable to parse day of month.')
if day_of_month not in range(1, 32):
raise ValueError('Day of month value out of bounds.')
hours = 0
minutes = 0
seconds = 0
if time_string_length > 10:
try:
hours = int(time_string[11:13], 10)
except ValueError:
raise ValueError('Unable to parse hours.')
if hours not in range(0, 24):
raise ValueError('Hours value out of bounds.')
try:
minutes = int(time_string[14:16], 10)
except ValueError:
raise ValueError('Unable to parse minutes.')
if minutes not in range(0, 60):
raise ValueError('Minutes value out of bounds.')
try:
seconds = int(time_string[17:19], 10)
except ValueError:
raise ValueError('Unable to parse day of seconds.')
if seconds not in range(0, 60):
raise ValueError('Seconds value out of bounds.')
micro_seconds = 0
timezone_offset = 0
if time_string_length > 19:
if time_string[19] != '.':
timezone_index = 19
else:
for timezone_index in range(19, time_string_length):
if time_string[timezone_index] in ['+', '-']:
break
# The calculation that follow rely on the timezone index to point
# beyond the string in case no timezone offset was defined.
if timezone_index == time_string_length - 1:
timezone_index += 1
if timezone_index > 19:
fraction_of_seconds_length = timezone_index - 20
if fraction_of_seconds_length not in [3, 6]:
raise ValueError('Invalid time string.')
try:
micro_seconds = int(time_string[20:timezone_index], 10)
except ValueError:
raise ValueError('Unable to parse fraction of seconds.')
if fraction_of_seconds_length == 3:
micro_seconds *= 1000
if timezone_index < time_string_length:
if (time_string_length - timezone_index != 6 or
time_string[timezone_index + 3] != ':'):
raise ValueError('Invalid time string.')
try:
timezone_offset = int(time_string[
timezone_index + 1:timezone_index + 3])
except ValueError:
raise ValueError('Unable to parse timezone hours offset.')
if timezone_offset not in range(0, 24):
raise ValueError('Timezone hours offset value out of bounds.')
# Note that when the sign of the timezone offset is negative
# the difference needs to be added. We do so by flipping the sign.
if time_string[timezone_index] == '-':
timezone_offset *= 60
else:
timezone_offset *= -60
try:
timezone_offset += int(time_string[
timezone_index + 4:timezone_index + 6])
except ValueError:
raise ValueError('Unable to parse timezone minutes offset.')
timezone_offset *= 60
timestamp = int(calendar.timegm((
year, month, day_of_month, hours, minutes, seconds)))
return ((timestamp + timezone_offset) * 1000000) + micro_seconds
@classmethod
def CopyToDatetime(cls, timestamp, timezone, raise_error=False):
"""Copies the timestamp to a datetime object.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: The timezone (pytz.timezone) object.
raise_error: Boolean that if set to True will not absorb an OverflowError
if the timestamp is out of bounds. By default there will be
no error raised.
Returns:
A datetime object (instance of datetime.datetime). A datetime object of
January 1, 1970 00:00:00 UTC is returned on error if raises_error is
not set.
Raises:
OverflowError: If raises_error is set to True and an overflow error
occurs.
ValueError: If raises_error is set to True and no timestamp value is
provided.
"""
datetime_object = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
if not timestamp:
if raise_error:
raise ValueError('Missing timestamp value')
return datetime_object
try:
datetime_object += datetime.timedelta(microseconds=timestamp)
return datetime_object.astimezone(timezone)
except OverflowError as exception:
if raise_error:
raise
logging.error((
'Unable to copy {0:d} to a datetime object with error: '
'{1:s}').format(timestamp, exception))
return datetime_object
@classmethod
def CopyToIsoFormat(cls, timestamp, timezone=pytz.UTC, raise_error=False):
"""Copies the timestamp to an ISO 8601 formatted string.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: Optional timezone (instance of pytz.timezone).
raise_error: Boolean that if set to True will not absorb an OverflowError
if the timestamp is out of bounds. By default there will be
no error raised.
Returns:
A string containing an ISO 8601 formatted date and time.
"""
datetime_object = cls.CopyToDatetime(
timestamp, timezone, raise_error=raise_error)
return datetime_object.isoformat()
@classmethod
def CopyToPosix(cls, timestamp):
"""Converts microsecond timestamps to POSIX timestamps.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
Returns:
The timestamp which is an integer containing the number of seconds
since January 1, 1970, 00:00:00 UTC.
"""
return timestamp // cls.MICRO_SECONDS_PER_SECOND
@classmethod
def FromPosixTime(cls, posix_time):
"""Converts a POSIX timestamp into a timestamp.
The POSIX time is a signed 32-bit or 64-bit value containing:
seconds since 1970-01-01 00:00:00
Args:
posix_time: The POSIX timestamp.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC or 0 on error.
"""
if (posix_time < cls.TIMESTAMP_MIN_SECONDS or
posix_time > cls.TIMESTAMP_MAX_SECONDS):
return 0
return int(posix_time) * cls.MICRO_SECONDS_PER_SECOND
@classmethod
def FromPythonDatetime(cls, datetime_object):
"""Converts a Python datetime object into a timestamp.
Args:
datetime_object: The datetime object (instance of datetime.datetime).
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC or 0 on error.
"""
if not isinstance(datetime_object, datetime.datetime):
return 0
posix_time = int(calendar.timegm(datetime_object.utctimetuple()))
return cls.FromPosixTime(posix_time) + datetime_object.microsecond
@classmethod
def FromTimeString(
cls, time_string, dayfirst=False, gmt_as_timezone=True,
timezone=pytz.UTC):
"""Converts a string containing a date and time value into a timestamp.
Args:
time_string: String that contains a date and time value.
dayfirst: An optional boolean argument. If set to true then the
parser will change the precedence in which it parses timestamps
from MM-DD-YYYY to DD-MM-YYYY (and YYYY-MM-DD will be
YYYY-DD-MM, etc).
gmt_as_timezone: Sometimes the dateutil parser will interpret GMT and UTC
the same way, that is not make a distinction. By default
this is set to true, that is GMT can be interpreted
differently than UTC. If that is not the expected result
this attribute can be set to false.
timezone: Optional timezone object (instance of pytz.timezone) that
the data and time value in the string represents. This value
is used when the timezone cannot be determined from the string.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC or 0 on error.
Raises:
TimestampError: if the time string could not be parsed.
"""
if not gmt_as_timezone and time_string.endswith(' GMT'):
time_string = '{0:s}UTC'.format(time_string[:-3])
try:
# TODO: deprecate the use of dateutil parser.
datetime_object = dateutil.parser.parse(time_string, dayfirst=dayfirst)
except (TypeError, ValueError) as exception:
raise errors.TimestampError((
'Unable to convert time string: {0:s} in to a datetime object '
'with error: {1!s}').format(time_string, exception))
if datetime_object.tzinfo:
datetime_object = datetime_object.astimezone(pytz.UTC)
else:
datetime_object = timezone.localize(datetime_object)
return cls.FromPythonDatetime(datetime_object)
@classmethod
def GetNow(cls):
"""Retrieves the current time (now) as a timestamp in UTC.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC.
"""
time_elements = time.gmtime()
return calendar.timegm(time_elements) * 1000000
@classmethod
def LocaltimeToUTC(cls, timestamp, timezone, is_dst=False):
"""Converts the timestamp in localtime of the timezone to UTC.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: The timezone (pytz.timezone) object.
is_dst: A boolean to indicate the timestamp is corrected for daylight
savings time (DST) only used for the DST transition period.
Returns:
The timestamp which is an integer containing the number of micro seconds
since January 1, 1970, 00:00:00 UTC or 0 on error.
"""
if timezone and timezone != pytz.UTC:
datetime_object = (
datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=None) +
datetime.timedelta(microseconds=timestamp))
# Check if timezone is UTC since utcoffset() does not support is_dst
# for UTC and will raise.
datetime_delta = timezone.utcoffset(datetime_object, is_dst=is_dst)
seconds_delta = int(datetime_delta.total_seconds())
timestamp -= seconds_delta * cls.MICRO_SECONDS_PER_SECOND
return timestamp
@classmethod
def RoundToSeconds(cls, timestamp):
"""Takes a timestamp value and rounds it to a second precision."""
leftovers = timestamp % cls.MICRO_SECONDS_PER_SECOND
scrubbed = timestamp - leftovers
rounded = round(float(leftovers) / cls.MICRO_SECONDS_PER_SECOND)
return int(scrubbed + rounded * cls.MICRO_SECONDS_PER_SECOND)
def GetCurrentYear():
"""Determines the current year."""
datetime_object = datetime.datetime.now()
return datetime_object.year
def GetYearFromPosixTime(posix_time, timezone=pytz.UTC):
"""Gets the year from a POSIX timestamp
The POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC.
Args:
posix_time: An integer containing the number of seconds since
1970-01-01 00:00:00 UTC.
timezone: Optional timezone of the POSIX timestamp.
Returns:
The year of the POSIX timestamp.
Raises:
ValueError: If the posix timestamp is out of the range of supported values.
"""
datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone)
return datetime_object.year
| 33.672527
| 80
| 0.666406
|
f02aae99854e47b89ca4f27c9c872528589554e4
| 179
|
py
|
Python
|
blogvenv/Scripts/django-admin.py
|
jerem-uzoma/django-blog
|
987f5c51fb148b380a58a55daa7cd528a72244de
|
[
"MIT"
] | null | null | null |
blogvenv/Scripts/django-admin.py
|
jerem-uzoma/django-blog
|
987f5c51fb148b380a58a55daa7cd528a72244de
|
[
"MIT"
] | null | null | null |
blogvenv/Scripts/django-admin.py
|
jerem-uzoma/django-blog
|
987f5c51fb148b380a58a55daa7cd528a72244de
|
[
"MIT"
] | null | null | null |
#!c:\users\fslclient\pythondjango\clone\blog\blogvenv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 29.833333
| 72
| 0.798883
|
02155d9f15cda41f92c8818dfd1a967aa4b1dc6d
| 6,420
|
py
|
Python
|
tests/pytests/functional/modules/test_aptpkg.py
|
litnialex/salt
|
ed773dcacedebb01a0d34b5e8e66e57bdf2e2d0f
|
[
"Apache-2.0"
] | 3
|
2015-08-30T04:23:47.000Z
|
2018-07-15T00:35:23.000Z
|
tests/pytests/functional/modules/test_aptpkg.py
|
litnialex/salt
|
ed773dcacedebb01a0d34b5e8e66e57bdf2e2d0f
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/functional/modules/test_aptpkg.py
|
litnialex/salt
|
ed773dcacedebb01a0d34b5e8e66e57bdf2e2d0f
|
[
"Apache-2.0"
] | 1
|
2016-10-28T03:58:05.000Z
|
2016-10-28T03:58:05.000Z
|
import pathlib
import shutil
import pytest
import salt.exceptions
import salt.modules.aptpkg as aptpkg
import salt.modules.cmdmod as cmd
import salt.modules.file as file
import salt.utils.files
import salt.utils.stringutils
from tests.support.mock import Mock, patch
pytestmark = [
pytest.mark.skip_if_binaries_missing("apt-cache", "grep"),
]
@pytest.fixture
def configure_loader_modules(minion_opts):
return {
aptpkg: {
"__salt__": {
"cmd.run_all": cmd.run_all,
"cmd.run": cmd.run,
"file.replace": file.replace,
"file.append": file.append,
"file.grep": file.grep,
},
"__opts__": minion_opts,
},
file: {
"__salt__": {"cmd.run_all": cmd.run_all},
"__utils__": {
"files.is_text": salt.utils.files.is_text,
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
"__opts__": minion_opts,
},
}
@pytest.fixture()
def revert_repo_file(tmp_path):
try:
repo_file = pathlib.Path("/etc") / "apt" / "sources.list"
backup = tmp_path / "repo_backup"
# make copy of repo file
shutil.copy(str(repo_file), str(backup))
yield
finally:
# revert repo file
shutil.copy(str(backup), str(repo_file))
aptpkg.refresh_db()
def get_current_repo(multiple_comps=False):
"""
Get a repo currently in sources.list
multiple_comps:
Search for a repo that contains multiple comps.
For example: main, restricted
"""
with salt.utils.files.fopen("/etc/apt/sources.list") as fp:
for line in fp:
if line.startswith("#"):
continue
if "ubuntu.com" in line or "debian.org" in line:
test_repo = line.strip()
comps = test_repo.split()[3:]
if multiple_comps:
if len(comps) > 1:
break
else:
break
return test_repo, comps
def test_list_repos():
"""
Test aptpkg.list_repos
"""
ret = aptpkg.list_repos()
repos = [x for x in ret if "http" in x]
for repo in repos:
check_repo = ret[repo][0]
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in check_repo
assert pathlib.Path(check_repo["file"]).is_file()
assert check_repo["dist"] in check_repo["line"]
if isinstance(check_repo["comps"], list):
assert " ".join(check_repo["comps"]) in check_repo["line"]
else:
assert check_repo["comps"] in check_repo["line"]
def test_get_repos():
"""
Test aptpkg.get_repos
"""
test_repo, comps = get_current_repo()
if not test_repo:
pytest.skip("Did not detect an apt repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
assert ret["file"] == "/etc/apt/sources.list"
def test_get_repos_multiple_comps():
"""
Test aptpkg.get_repos when multiple comps
exist in repo.
"""
test_repo, comps = get_current_repo(multiple_comps=True)
if not test_repo:
pytest.skip("Did not detect an ubuntu repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
def test_get_repos_doesnot_exist():
"""
Test aptpkg.get_repos when passing a repo
that does not exist
"""
for test_repo in [
"doesnotexist",
"deb http://archive.ubuntu.com/ubuntu/ focal-backports compdoesnotexist",
]:
ret = aptpkg.get_repo(repo=test_repo)
assert not ret
@pytest.mark.skip_if_binaries_missing("apt-add-repository")
@pytest.mark.destructive_test
def test_del_repo(revert_repo_file):
"""
Test aptpkg.del_repo when passing repo
that exists. And checking correct error
is returned when it no longer exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg.del_repo(repo=test_repo)
assert "Repo '{}' has been removed".format(test_repo)
with pytest.raises(salt.exceptions.CommandExecutionError) as exc:
ret = aptpkg.del_repo(repo=test_repo)
assert "Repo {} doesn't exist".format(test_repo) in exc.value.message
def test_expand_repo_def():
"""
Test aptpkg.expand_repo_def when the repo exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg.expand_repo_def(repo=test_repo)
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in ret
assert pathlib.Path(ret["file"]).is_file()
assert ret["dist"] in ret["line"]
if isinstance(ret["comps"], list):
for comp in ret["comps"]:
assert comp in ret["line"]
else:
assert ret["comps"] in ret["line"]
@pytest.mark.destructive_test
def test_mod_repo(revert_repo_file):
"""
Test aptpkg.mod_repo when the repo exists.
"""
test_repo, comps = get_current_repo()
msg = "This is a test"
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, comments=msg)
assert sorted(ret[list(ret.keys())[0]]["comps"]) == sorted(comps)
ret = file.grep("/etc/apt/sources.list", msg)
assert "#{}".format(msg) in ret["stdout"]
@pytest.mark.destructive_test
def test_mod_repo_no_file(tmp_path, revert_repo_file):
"""
Test aptpkg.mod_repo when the file does not exist.
It should create the file.
"""
test_repo, comps = get_current_repo()
test_file = str(tmp_path / "test_repo")
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, file=test_file)
with salt.utils.files.fopen(test_file, "r") as fp:
ret = fp.read()
assert test_repo.split()[1] in ret.strip()
for comp in comps:
assert comp in ret
| 29.181818
| 81
| 0.59595
|
59b400e47b7ea8d6352ddda671388ca39a3bab81
| 279
|
py
|
Python
|
pines_raspberry_img/take_photos.py
|
pines-and-electronics/pines_raspberry
|
b2f1448fe6f9897ce8bd5763f86c942a2b3601d4
|
[
"Apache-2.0"
] | null | null | null |
pines_raspberry_img/take_photos.py
|
pines-and-electronics/pines_raspberry
|
b2f1448fe6f9897ce8bd5763f86c942a2b3601d4
|
[
"Apache-2.0"
] | null | null | null |
pines_raspberry_img/take_photos.py
|
pines-and-electronics/pines_raspberry
|
b2f1448fe6f9897ce8bd5763f86c942a2b3601d4
|
[
"Apache-2.0"
] | null | null | null |
raspistill -t 10000 -tl 2000 -o image%04d.jpg
#This will produce a capture every 2 seconds over a total period of 10s, named image1.jpg, image0002.jpg...image0015.jpg.
raspistill -t 10000 -tl 2000 -o /home/pi/Documents/pines_raspberry/pines_raspberry_img/images/image%04d.jpg
| 39.857143
| 121
| 0.781362
|
25df1fbb9a825efdc4c257d3a4a5ff6ccd9ba551
| 1,183
|
py
|
Python
|
easy/728-Self Dividing Numbers.py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | 2
|
2020-05-08T02:17:17.000Z
|
2020-05-17T04:55:56.000Z
|
easy/728-Self Dividing Numbers.py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | null | null | null |
easy/728-Self Dividing Numbers.py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | null | null | null |
"""
https://leetcode.com/problems/self-dividing-numbers/
A self-dividing number is a number that is divisible by every digit it contains.
For example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.
Also, a self-dividing number is not allowed to contain the digit zero.
Given a lower and upper number bound, output a list of every possible self dividing number, including the bounds if possible.
Example 1:
Input:
left = 1, right = 22
Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]
Note:
The boundaries of each input argument are 1 <= left <= right <= 10000.
"""
# time complexity: O(n), space complexity: O(1)
class Solution:
def selfDividingNumbers(self, left: int, right: int) -> List[int]:
result = []
for number in range(left, right + 1):
temp = number
add = True
while number > 0:
remainder = number % 10
if remainder == 0 or temp % remainder != 0:
add = False
break
else:
number //= 10
if add:
result.append(temp)
return result
| 31.131579
| 125
| 0.579036
|
024acbaff2cfa70c10356e6a154567928b0fd649
| 826
|
py
|
Python
|
examples/api/friendships/get_followers.py
|
javad94/instauto
|
8d4d068863176b0a1df13e5be3d5e32388036921
|
[
"MIT"
] | 79
|
2020-08-24T23:32:57.000Z
|
2022-02-20T19:03:17.000Z
|
examples/api/friendships/get_followers.py
|
klaytonpaiva/instauto
|
7f8c26b22f84d3d966625c7fa656e91cc623bb2e
|
[
"MIT"
] | 146
|
2020-07-25T16:27:48.000Z
|
2021-10-02T09:03:50.000Z
|
examples/api/friendships/get_followers.py
|
klaytonpaiva/instauto
|
7f8c26b22f84d3d966625c7fa656e91cc623bb2e
|
[
"MIT"
] | 41
|
2020-09-07T14:19:04.000Z
|
2022-02-07T23:08:10.000Z
|
from instauto.api.client import ApiClient
import instauto.api.actions.structs.friendships as fs
client = ApiClient.initiate_from_file('.instauto.save')
# Instauto has an `followers_get` endpoint for retrieving followers. The endpoint, returns
# two values. The input object, and the retrieved response. You can re-use the input
# object, to enable pagination:
user_id = "12345678"
obj = fs.GetFollowers(user_id)
obj, response = client.followers_get(obj)
followers = response.json()['users']
# Let's retrieve the first 50 followers of the user.
while response and len(followers) < 50:
# We check if the response is 'truthy'. This is important, since it will be `False` if
# there are no more items to retrieve from your feed.
obj, response = client.followers_get(obj)
followers.extend(response.json()['users'])
| 37.545455
| 91
| 0.75908
|
31a0cce9c77cde72f8c03bc18a1802a82c04e9e5
| 343
|
py
|
Python
|
4/lab4_4.py
|
Vasniktel/prometheus-python
|
b9c1614bd5faf41762b3217ba5e1d92f5701f14e
|
[
"MIT"
] | 2
|
2017-10-10T20:11:57.000Z
|
2018-04-13T18:39:41.000Z
|
4/lab4_4.py
|
Vasniktel/prometheus-python
|
b9c1614bd5faf41762b3217ba5e1d92f5701f14e
|
[
"MIT"
] | null | null | null |
4/lab4_4.py
|
Vasniktel/prometheus-python
|
b9c1614bd5faf41762b3217ba5e1d92f5701f14e
|
[
"MIT"
] | null | null | null |
import sys
arg_1 = sys.argv[1]
arg_2 = sys.argv[2]
count = 0
for i in xrange(int(arg_1), int(arg_2) + 1):
str_ = str(i)
if len(str_) < 6:
str_ = '0' * (6 - len(str_)) + str_
int_1 = int_2 = 0
for j in range(6):
if j < 3:
int_1 += int(str_[j])
else:
int_2 += int(str_[j])
if int_1 == int_2:
count += 1
print(count)
| 14.291667
| 44
| 0.553936
|
c33fc34dffbac7a493fb8fbedbf708d8d6b78f60
| 776
|
py
|
Python
|
lab/refactoring/rename_method.py
|
Andre-Williams22/SPD-2.31-Testing-and-Architecture
|
a28abb56c7b0c920144867f5aa138f70aae65260
|
[
"MIT"
] | null | null | null |
lab/refactoring/rename_method.py
|
Andre-Williams22/SPD-2.31-Testing-and-Architecture
|
a28abb56c7b0c920144867f5aa138f70aae65260
|
[
"MIT"
] | null | null | null |
lab/refactoring/rename_method.py
|
Andre-Williams22/SPD-2.31-Testing-and-Architecture
|
a28abb56c7b0c920144867f5aa138f70aae65260
|
[
"MIT"
] | null | null | null |
# by Kami Bigdely
# Rename Method
# Reference: https://parade.com/1039985/marynliles/pick-up-lines/
def area_under_graph(graph): # TODO: Rename this function to reflect what it's doing.
"""Calculate the area under the input graph."""
# bla bla bla.
pass
#####################
def get_max_value(li): # TODO: Rename this function to reflect what it's doing.
m = li[0]
for value in li:
if value > m:
m = value
return m
li = [5, -1, 43, 32, 87, -100]
print(get_max_value(li))
############################
def create_word_list(sentence): # TODO: Rename this function to reflect what it's doing.
words = sentence[0:].split(' ')
return words
print(create_word_list('If you were a vegetable, you’d be a ‘cute-cumber.'))
| 25.866667
| 89
| 0.615979
|
4dd5df4de876c9d66658c10de2daa8145f3e3499
| 2,730
|
py
|
Python
|
invenio_vocabularies/datastreams/datastreams.py
|
ctrl-alt-soneca/invenio-vocabularies
|
a78a70529b3191e5f77c3e43148dfa9a51773e36
|
[
"MIT"
] | null | null | null |
invenio_vocabularies/datastreams/datastreams.py
|
ctrl-alt-soneca/invenio-vocabularies
|
a78a70529b3191e5f77c3e43148dfa9a51773e36
|
[
"MIT"
] | null | null | null |
invenio_vocabularies/datastreams/datastreams.py
|
ctrl-alt-soneca/invenio-vocabularies
|
a78a70529b3191e5f77c3e43148dfa9a51773e36
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-Vocabularies is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Base data stream."""
from .errors import TransformerError, WriterError
class StreamEntry:
"""Object to encapsulate streams processing."""
def __init__(self, entry, errors=None):
"""Constructor."""
self.entry = entry
self.errors = errors or []
class BaseDataStream:
"""Base data stream."""
def __init__(self, reader, writers, transformers=None, *args, **kwargs):
"""Constructor.
:param reader: the reader object.
:param writers: an ordered list of writers.
:param transformers: an ordered list of transformers to apply.
"""
self._reader = reader # a single entry point
self._transformers = transformers
self._writers = writers
def filter(self, stream_entry, *args, **kwargs):
"""Checks if an stream_entry should be filtered out (skipped)."""
return False
def process(self, *args, **kwargs):
"""Iterates over the entries.
Uses the reader to get the raw entries and transforms them.
It will iterate over the `StreamEntry` objects returned by
the reader, apply the transformations and yield the result of
writing it.
"""
for stream_entry in self._reader.read():
transformed_entry = self.transform(stream_entry)
if transformed_entry.errors:
yield transformed_entry
elif not self.filter(transformed_entry):
yield self.write(transformed_entry)
def transform(self, stream_entry, *args, **kwargs):
"""Apply the transformations to an stream_entry."""
for transformer in self._transformers:
try:
stream_entry = transformer.apply(stream_entry)
except TransformerError as err:
stream_entry.errors.append(
f"{transformer.__class__.__name__}: {str(err)}"
)
return stream_entry # break loop
return stream_entry
def write(self, stream_entry, *args, **kwargs):
"""Apply the transformations to an stream_entry."""
for writer in self._writers:
try:
writer.write(stream_entry)
except WriterError as err:
stream_entry.errors.append(
f"{writer.__class__.__name__}: {str(err)}"
)
return stream_entry
def total(self, *args, **kwargs):
"""The total of entries obtained from the origin."""
pass
| 32.5
| 76
| 0.611355
|
4470338bf48bd51fcaffd6b48a5fd36a7bd2fd47
| 1,124
|
py
|
Python
|
app/metaclassexer.py
|
asuraswrath/pysample
|
efc125bc6af41d68a86e4b09b3eb1c0c35e43d1f
|
[
"MIT"
] | null | null | null |
app/metaclassexer.py
|
asuraswrath/pysample
|
efc125bc6af41d68a86e4b09b3eb1c0c35e43d1f
|
[
"MIT"
] | null | null | null |
app/metaclassexer.py
|
asuraswrath/pysample
|
efc125bc6af41d68a86e4b09b3eb1c0c35e43d1f
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
sensitive_words_list = ['asshole', 'fuck', 'shit']
def detect_sensitive_words(string):
''' detect sensitive words'''
#import pdb;pdb.set_trace()
words_detected = list(filter(lambda word: word in string.lower(), sensitive_words_list))
if words_detected:
name_error_string = 'Sensitive word {0} detected in the string "{1}"'\
.format(','.join(map(lambda s:'"%s"' %s, words_detected)), string)
raise NameError(name_error_string)
class CleanerMeta(type):
''' the metaclass '''
def __new__(cls, class_name, bases, attrs):
#import pdb; pdb.set_trace()
detect_sensitive_words(class_name)
#import pdb; pdb.set_trace()
map(detect_sensitive_words, attrs.keys())
print('Well done! You are a polite coder')
return type.__new__(cls, class_name, bases, attrs)
class APIBase(metaclass=CleanerMeta):
''' Base class for derive '''
#__metaclass__ = CleanerMeta
print('This is the APIBase')
class TestFuck(APIBase):
shit = 1
if __name__ == '__main__':
fuck = TestFuck()
print(fuck.shit)
| 33.058824
| 92
| 0.653915
|
f2c9f21b0e6d41118b38ed6b495f032d16c120c2
| 982
|
py
|
Python
|
learning_python/lesson3/exercise3.py
|
khanhvc2003/pynet
|
872dc71e42025c31b8737ae0465311235e7f4769
|
[
"Apache-2.0"
] | 1
|
2020-04-02T01:18:38.000Z
|
2020-04-02T01:18:38.000Z
|
learning_python/lesson3/exercise3.py
|
khanhvc2003/pynet
|
872dc71e42025c31b8737ae0465311235e7f4769
|
[
"Apache-2.0"
] | null | null | null |
learning_python/lesson3/exercise3.py
|
khanhvc2003/pynet
|
872dc71e42025c31b8737ae0465311235e7f4769
|
[
"Apache-2.0"
] | 4
|
2019-12-17T03:18:52.000Z
|
2019-12-17T03:21:21.000Z
|
#!/usr/bin/env python
'''
Read the 'show_lldp_neighbors_detail.txt' file. Loop over the lines of this file. Keep reading the
lines until you have encountered the remote "System Name" and remote "Port id". Save these two items
into variables and print them to the screen. You should extract only the system name and port id
from the lines (i.e. your variables should only have 'twb-sf-hpsw1' and '15'). Break out of your
loop once you have retrieved these two items.
'''
from __future__ import unicode_literals, print_function
with open("show_lldp_neighbors_detail.txt") as f:
show_lldp = f.read()
system_name, port_id = (None, None)
for line in show_lldp.splitlines():
if 'System Name: ' in line:
_, system_name = line.split('System Name: ')
elif 'Port id: ' in line:
_, port_id = line.split('Port id: ')
if port_id and system_name:
break
print()
print("System Name: {}".format(system_name))
print("Port ID: {}".format(port_id))
print()
| 33.862069
| 100
| 0.711813
|
c03852e50d51dd5c1bdca8ba54b412db81ffcc9c
| 441
|
py
|
Python
|
examples/marbles/hot_datetime.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
examples/marbles/hot_datetime.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
examples/marbles/hot_datetime.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
import datetime
import rx3
import rx.operators as ops
"""
Delay the emission of elements to the specified datetime.
"""
now = datetime.datetime.utcnow()
dt = datetime.timedelta(seconds=3.0)
duetime = now + dt
print('{} -> now\n'
'{} -> start of emission in {}s'.format(now, duetime, dt.total_seconds()))
hot = rx.hot('10--11--12--13--(14,|)', timespan=0.2, duetime=duetime)
source = hot.pipe(ops.do_action(print))
source.run()
| 21
| 81
| 0.673469
|
a33fdbd69615cfbcdb68b7d8c2e2661f46afa24e
| 458
|
py
|
Python
|
examples/other/icon.py
|
danielhrisca/vedo
|
487568b7956a67b87752e3d518ba3f7e87b327a6
|
[
"CC0-1.0"
] | null | null | null |
examples/other/icon.py
|
danielhrisca/vedo
|
487568b7956a67b87752e3d518ba3f7e87b327a6
|
[
"CC0-1.0"
] | null | null | null |
examples/other/icon.py
|
danielhrisca/vedo
|
487568b7956a67b87752e3d518ba3f7e87b327a6
|
[
"CC0-1.0"
] | null | null | null |
"""Make a icon to indicate orientation
and place it in one of the 4 corners
within the same renderer.
"""
from vedo import *
vp = Plotter(axes=5)
# axes type 5 builds an annotated orientation cube
vp.load(datadir+'porsche.ply').lighting('metallic')
vp.show(interactive=0)
elg = load(datadir+"images/embl_logo.jpg")
vp.addIcon(elg, pos=2, size=0.06)
vp.addIcon(VedoLogo(), pos=1, size=0.06)
vp += Text2D(__doc__, pos=8, s=0.8)
vp.show(interactive=1)
| 19.083333
| 51
| 0.716157
|
d552fe352e376385d91c68f7968012b09848e600
| 2,959
|
py
|
Python
|
cfn.py
|
lukedmor/dragv2-jsinput
|
c3f3e90a27354d51b69da3130e08a76b43235b84
|
[
"MIT"
] | null | null | null |
cfn.py
|
lukedmor/dragv2-jsinput
|
c3f3e90a27354d51b69da3130e08a76b43235b84
|
[
"MIT"
] | null | null | null |
cfn.py
|
lukedmor/dragv2-jsinput
|
c3f3e90a27354d51b69da3130e08a76b43235b84
|
[
"MIT"
] | null | null | null |
# Regex (this is to make python pretty in html):
# .replace(/[\n\r]/g,'<br>')
# .replace(/\s{4}/g,' ')
# .replace(/\s+/g,' ')
# .replace(/(def |import|if|elif|else|for|return|continue|break)/g,'<span style="color:#8A59A8;">$1</span>')
# .replace(/(None|False|True|\d+(?=]|:|<br>|,))/g,'<span style="color:#ED821C;">$1</span>')
# .replace(/( not | in |==|!=|\+| - |&lt;|&gt;| and | or )/g,'<span style="color:#3D999E;">$1</span>')
# .replace(/([\w\.]+\(.*?\))/g,'<span style="color:#5C70AD;">$1</span>')
# .replace(/("[\w\s]+?")/g,'<span style="color:#4A9933;">$1</span>')
# .replace(/<\/span> </g,' </span><')
# .replace(/(# .+?)<br/g,'<span style="opacity:.5">$1</span><br')
# .replace(/'/g,"\'")
import json
solutions = $VAL$ # [SOLUTION,...]
common_mistakes = [] # [[FALSE_SOLUTION,MSG],...] # For predictable false solutions and corresponding feedback.
# You can mess with this lots
def isNear(pos, target, d):
return abs(pos[0]-target[0])<d["size"]["width"]/2 and abs(pos[1]-target[1])<d["size"]["height"]/2
# Definitely don't mess with this
def rcheck(states, targets, d):
if len(states) != len(targets): return False
if len(states) == 0: return True
suitables = [state for state in states if isNear(state, targets[0], d)]
for suitable in suitables:
newStates=list(states)
newStates.remove(suitable)
if rcheck(newStates, targets[1:], d): return True
return False
# You probably shouldn't be messing with this
def check(expect, ans):
par = json.loads(ans)
init = json.loads(par["answer"])
state = json.loads(par["state"])
dN = {init["draggables"][i]["id"]: i for i in xrange(len(init["draggables"]))}
def grade(solution):
if len(state)!=len(solution): return [False,'Too many/too few']
for id in state:
d=init["draggables"][dN[id]]
if id not in solution: return [False,id+' used, not in solution']
elif d.get("reusable"):
if not rcheck(state[id], solution[id], d): return [False,id+" in wrong place"]
else:
if not isNear(state[id], solution[id], d): return [False,id+" in wrong place"]
return [True,""]
for solution in solutions:
g = grade(solution)
# For debugging of a single solution, use: return {'ok':False,'msg':g[1]}
if g[0]: return True
for mistake in common_mistakes:
g = grade(mistake[0])
# For debugging of a single common mistake, use: return {'ok':False,'msg':g[1]}
if g[0]: return {'ok':False,'msg':mistake[1]}
return False;
################# For using draganddrop.grade(legacy_state, correct_answer) – edX's grading method:
legacy_state = []
for id in state:
if init["draggables"][dN[id]]["reusable"]: [legacy_state.append({id:coordinate}) for coordinate in state[id]]
else: legacy_state.append({id:state[id]})
legacy_state=json.dumps(legacy_state)
| 45.523077
| 113
| 0.600879
|
991c0e2b30e8cdba143af80bf403311f290d407a
| 856
|
py
|
Python
|
mk011-pick_one.py
|
karakose77/udacity-cs101-intro-to-computer-science-exercises-and-projects
|
5d41d5274f01887f20c6fe82b9214305f4e81e36
|
[
"MIT"
] | null | null | null |
mk011-pick_one.py
|
karakose77/udacity-cs101-intro-to-computer-science-exercises-and-projects
|
5d41d5274f01887f20c6fe82b9214305f4e81e36
|
[
"MIT"
] | null | null | null |
mk011-pick_one.py
|
karakose77/udacity-cs101-intro-to-computer-science-exercises-and-projects
|
5d41d5274f01887f20c6fe82b9214305f4e81e36
|
[
"MIT"
] | null | null | null |
# Question 1: Pick One
# Define a procedure, pick_one, that takes three inputs: a Boolean
# and two other values. If the first input is True, it should return
# the second input. If the first input is False, it should return the
# third input.
# For example, pick_one(True, 37, 'hello') should return 37, and
# pick_one(False, 37, 'hello') should return 'hello'.
def pick_one(choice, second, third):
"""
Takes three inputs: a Boolean and two other values.
If the first input is True, it returns the second input.
If the first input is False, it returns the third input.
"""
return (choice) * str(second) + (not choice) * str(third)
print(pick_one(True, 37, 'hello'))
# 37
print(pick_one(False, 37, 'hello'))
# hello
print(pick_one(True, 'red pill', 'blue pill'))
# red pill
print(pick_one(False, 'sunny', 'rainy'))
# rainy
| 26.75
| 69
| 0.688084
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.