hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7121bc90502bb714cdb2acbbcead4ac4b1af4fec
| 392
|
py
|
Python
|
src/pytorch_adapt/adapters/utils.py
|
MarkusSagen/pytorch-adapt
|
947b9f1b748d2078cecbf4a00c34f73108d9ecde
|
[
"MIT"
] | 1
|
2021-12-15T19:36:01.000Z
|
2021-12-15T19:36:01.000Z
|
src/pytorch_adapt/adapters/utils.py
|
MarkusSagen/pytorch-adapt
|
947b9f1b748d2078cecbf4a00c34f73108d9ecde
|
[
"MIT"
] | null | null | null |
src/pytorch_adapt/adapters/utils.py
|
MarkusSagen/pytorch-adapt
|
947b9f1b748d2078cecbf4a00c34f73108d9ecde
|
[
"MIT"
] | null | null | null |
from collections.abc import MutableMapping
import torch
def default_optimizer_tuple():
return (torch.optim.Adam, {"lr": 0.0001})
def with_opt(x):
suffix = "_opt"
if isinstance(x, str):
return f"{x}{suffix}"
if isinstance(x, list):
return [with_opt(y) for y in x]
if isinstance(x, MutableMapping):
return {with_opt(k): v for k, v in x.items()}
| 21.777778
| 53
| 0.635204
|
137c492195cb384797f0678ab5b2854dac59237e
| 166,434
|
py
|
Python
|
angr/analyses/cfg/cfg_fast.py
|
Pamplemousse/angr
|
0783af9afb379fd22940505ac031d8c0ab18ae23
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/cfg/cfg_fast.py
|
Pamplemousse/angr
|
0783af9afb379fd22940505ac031d8c0ab18ae23
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/cfg/cfg_fast.py
|
Pamplemousse/angr
|
0783af9afb379fd22940505ac031d8c0ab18ae23
|
[
"BSD-2-Clause"
] | null | null | null |
import itertools
import logging
import math
import re
import string
from typing import List, Optional
from collections import defaultdict, OrderedDict
from sortedcontainers import SortedDict
import claripy
import cle
import pyvex
from cle.address_translator import AT
from archinfo.arch_soot import SootAddressDescriptor
from archinfo.arch_arm import is_arm_arch, get_real_address_if_arm
from ...knowledge_plugins.cfg import CFGNode, MemoryDataSort, MemoryData, IndirectJump
from ...knowledge_plugins.xrefs import XRef, XRefType
from ...misc.ux import deprecated
from ... import sim_options as o
from ...errors import (AngrCFGError, AngrSkipJobNotice, AngrUnsupportedSyscallError, SimEngineError, SimMemoryError,
SimTranslationError, SimValueError, SimOperationError, SimError, SimIRSBNoDecodeError,
)
from ...utils.constants import DEFAULT_STATEMENT
from ..forward_analysis import ForwardAnalysis
from .cfg_arch_options import CFGArchOptions
from .cfg_base import CFGBase
from .segment_list import SegmentList
VEX_IRSB_MAX_SIZE = 400
l = logging.getLogger(name=__name__)
class ContinueScanningNotification(RuntimeError):
pass
class FunctionReturn:
"""
FunctionReturn describes a function call in a specific location and its return location. Hashable and equatable
"""
__slots__ = ('callee_func_addr', 'caller_func_addr', 'call_site_addr', 'return_to', )
def __init__(self, callee_func_addr, caller_func_addr, call_site_addr, return_to):
self.callee_func_addr = callee_func_addr
self.caller_func_addr = caller_func_addr
self.call_site_addr = call_site_addr
self.return_to = return_to
def __eq__(self, other):
"""
Comparison
:param FunctionReturn other: The other object
:return: True if equal, False otherwise
"""
return self.callee_func_addr == other.callee_func_addr and \
self.caller_func_addr == other.caller_func_addr and \
self.call_site_addr == other.call_site_addr and \
self.return_to == other.return_to
def __hash__(self):
return hash((self.callee_func_addr, self.caller_func_addr, self.call_site_addr, self.return_to))
class PendingJobs:
"""
A collection of pending jobs during CFG recovery.
"""
def __init__(self, functions, deregister_job_callback):
self._jobs = OrderedDict() # A mapping between function addresses and lists of pending jobs
self._functions = functions
self._deregister_job_callback = deregister_job_callback
self._returning_functions = set()
self._updated_functions = set() # Addresses of functions whose returning status have changed between two
# consecutive calls to cleanup().
self._job_count = 0
def __len__(self):
return self._job_count
def __bool__(self):
return self._job_count > 0
__nonzero__ = __bool__
def _pop_job(self, func_addr):
jobs = self._jobs[func_addr]
j = jobs.pop(-1)
if not jobs:
del self._jobs[func_addr]
self._job_count -= 1
return j
def add_job(self, job):
func_addr = job.returning_source
if func_addr not in self._jobs:
self._jobs[func_addr] = [ ]
self._jobs[func_addr].append(job)
self._job_count += 1
def pop_job(self, returning=True):
"""
Pop a job from the pending jobs list.
When returning == True, we prioritize the jobs whose functions are known to be returning (function.returning is
True). As an optimization, we are sorting the pending jobs list according to job.function.returning.
:param bool returning: Only pop a pending job if the corresponding function returns.
:return: A pending job if we can find one, or None if we cannot find any that satisfies the requirement.
:rtype: angr.analyses.cfg.cfg_fast.CFGJob
"""
if not self:
return None
if not returning:
return self._pop_job(next(reversed(self._jobs.keys())))
# Prioritize returning functions
for func_addr in reversed(self._jobs.keys()):
if func_addr not in self._returning_functions:
continue
return self._pop_job(func_addr)
return None
def cleanup(self):
"""
Remove those pending exits if:
a) they are the return exits of non-returning SimProcedures
b) they are the return exits of non-returning syscalls
b) they are the return exits of non-returning functions
:return: None
"""
pending_exits_to_remove = defaultdict(list)
for func_addr in self._updated_functions:
if func_addr not in self._jobs:
continue
jobs = self._jobs[func_addr]
for i, pe in enumerate(jobs):
if pe.returning_source is None:
# The original call failed. This pending exit must be followed.
continue
func = self._functions.function(pe.returning_source)
if func is None:
# Why does it happen?
l.warning("An expected function at %s is not found. Please report it to Fish.",
pe.returning_source if pe.returning_source is not None else 'None')
continue
if func.returning is False:
# Oops, it's not returning
# Remove this pending exit
pending_exits_to_remove[pe.returning_source].append(i)
for func_addr, indices in pending_exits_to_remove.items():
jobs = self._jobs[func_addr]
for index in reversed(indices):
job = jobs[index]
self._deregister_job_callback(job.func_addr, job)
del jobs[index]
self._job_count -= 1
if not jobs:
del self._jobs[func_addr]
self.clear_updated_functions()
def add_returning_function(self, func_addr):
"""
Mark a function as returning.
:param int func_addr: Address of the function that returns.
:return: None
"""
self._returning_functions.add(func_addr)
self._updated_functions.add(func_addr)
def add_nonreturning_function(self, func_addr):
"""
Mark a function as not returning.
:param int func_addr: Address of the function that does not return.
:return: None
"""
self._updated_functions.add(func_addr)
def clear_updated_functions(self):
"""
Clear the updated_functions set.
:return: None
"""
self._updated_functions.clear()
#
# Descriptors of edges in individual function graphs
#
class FunctionEdge:
__slots__ = ('src_func_addr', 'stmt_idx', 'ins_addr',)
def apply(self, cfg):
raise NotImplementedError()
class FunctionTransitionEdge(FunctionEdge):
__slots__ = ('src_node', 'dst_addr', 'src_func_addr', 'to_outside', 'dst_func_addr', 'is_exception', )
def __init__(self, src_node, dst_addr, src_func_addr, to_outside=False, dst_func_addr=None, stmt_idx=None,
ins_addr=None, is_exception=False):
self.src_node = src_node
self.dst_addr = dst_addr
self.src_func_addr = src_func_addr
self.to_outside = to_outside
self.dst_func_addr = dst_func_addr
self.stmt_idx = stmt_idx
self.ins_addr = ins_addr
self.is_exception = is_exception
def apply(self, cfg):
to_outside = self.to_outside
if not to_outside:
# is it jumping to outside? Maybe we are seeing more functions now.
dst_node = cfg.model.get_any_node(self.dst_addr, force_fastpath=True)
if dst_node is not None and dst_node.function_address != self.src_func_addr:
to_outside = True
return cfg._function_add_transition_edge(
self.dst_addr,
self.src_node,
self.src_func_addr,
to_outside=to_outside,
dst_func_addr=self.dst_func_addr,
stmt_idx=self.stmt_idx,
ins_addr=self.ins_addr,
is_exception=self.is_exception,
)
class FunctionCallEdge(FunctionEdge):
__slots__ = ('src_node', 'dst_addr', 'ret_addr', 'syscall')
def __init__(self, src_node, dst_addr, ret_addr, src_func_addr, syscall=False, stmt_idx=None, ins_addr=None):
self.src_node = src_node
self.dst_addr = dst_addr
self.ret_addr = ret_addr
self.src_func_addr = src_func_addr
self.syscall = syscall
self.stmt_idx = stmt_idx
self.ins_addr = ins_addr
def apply(self, cfg):
return cfg._function_add_call_edge(
self.dst_addr,
self.src_node,
self.src_func_addr,
syscall=self.syscall,
stmt_idx=self.stmt_idx,
ins_addr=self.ins_addr,
)
class FunctionFakeRetEdge(FunctionEdge):
__slots__ = ('src_node', 'dst_addr', 'confirmed')
def __init__(self, src_node, dst_addr, src_func_addr, confirmed=None):
self.src_node = src_node
self.dst_addr = dst_addr
self.src_func_addr = src_func_addr
self.confirmed = confirmed
def apply(self, cfg):
return cfg._function_add_fakeret_edge(
self.dst_addr,
self.src_node,
self.src_func_addr,
confirmed=self.confirmed,
)
class FunctionReturnEdge(FunctionEdge):
__slots__ = ('ret_from_addr', 'ret_to_addr', 'dst_func_addr')
def __init__(self, ret_from_addr, ret_to_addr, dst_func_addr):
self.ret_from_addr = ret_from_addr
self.ret_to_addr = ret_to_addr
self.dst_func_addr = dst_func_addr
def apply(self, cfg):
return cfg._function_add_return_edge(
self.ret_from_addr,
self.ret_to_addr,
self.dst_func_addr
)
#
# CFGJob
#
class CFGJob:
"""
Defines a job to work on during the CFG recovery
"""
__slots__ = ('addr', 'func_addr', 'jumpkind', 'ret_target', 'last_addr', 'src_node', 'src_ins_addr', 'src_stmt_idx',
'returning_source', 'syscall', '_func_edges', 'job_type', 'gp', )
JOB_TYPE_NORMAL = "Normal"
JOB_TYPE_FUNCTION_PROLOGUE = "Function-prologue"
JOB_TYPE_COMPLETE_SCANNING = "Complete-scanning"
def __init__(self, addr: int, func_addr: int, jumpkind: str,
ret_target: Optional[int]=None, last_addr: Optional[int]=None,
src_node: Optional[CFGNode]=None, src_ins_addr:Optional[int]=None,
src_stmt_idx: Optional[int]=None, returning_source=None, syscall: bool=False,
func_edges: Optional[List]=None, job_type=JOB_TYPE_NORMAL,
gp: Optional[int]=None):
self.addr = addr
self.func_addr = func_addr
self.jumpkind = jumpkind
self.ret_target = ret_target
self.last_addr = last_addr
self.src_node = src_node
self.src_ins_addr = src_ins_addr
self.src_stmt_idx = src_stmt_idx
self.returning_source = returning_source
self.syscall = syscall
self.job_type = job_type
self.gp = gp # Used in MIPS32/MIPS64. Value of the gp register in the caller function. Only set at call sites.
self._func_edges = func_edges
def add_function_edge(self, edge):
if self._func_edges is None:
self._func_edges = [ ]
self._func_edges.append(edge)
def apply_function_edges(self, cfg, clear=False):
if not self._func_edges:
return
for edge in self._func_edges:
edge.apply(cfg)
if clear:
self._func_edges = None
def __repr__(self):
if isinstance(self.addr, SootAddressDescriptor):
return "<CFGJob {}>".format(self.addr)
else:
return "<CFGJob%s %#08x @ func %#08x>" % (" syscall" if self.syscall else "", self.addr, self.func_addr)
def __eq__(self, other):
return self.addr == other.addr and \
self.func_addr == other.func_addr and \
self.jumpkind == other.jumpkind and \
self.ret_target == other.ret_target and \
self.last_addr == other.last_addr and \
self.src_node == other.src_node and \
self.src_stmt_idx == other.src_stmt_idx and \
self.src_ins_addr == other.src_ins_addr and \
self.returning_source == other.returning_source and \
self.syscall == other.syscall
def __hash__(self):
return hash((self.addr, self.func_addr, self.jumpkind, self.ret_target, self.last_addr, self.src_node,
self.src_stmt_idx, self.src_ins_addr, self.returning_source, self.syscall)
)
class CFGFast(ForwardAnalysis, CFGBase): # pylint: disable=abstract-method
"""
We find functions inside the given binary, and build a control-flow graph in very fast manners: instead of
simulating program executions, keeping track of states, and performing expensive data-flow analysis, CFGFast will
only perform light-weight analyses combined with some heuristics, and with some strong assumptions.
In order to identify as many functions as possible, and as accurate as possible, the following operation sequence
is followed:
# Active scanning
- If the binary has "function symbols" (TODO: this term is not accurate enough), they are starting points of
the code scanning
- If the binary does not have any "function symbol", we will first perform a function prologue scanning on the
entire binary, and start from those places that look like function beginnings
- Otherwise, the binary's entry point will be the starting point for scanning
# Passive scanning
- After all active scans are done, we will go through the whole image and scan all code pieces
Due to the nature of those techniques that are used here, a base address is often not required to use this analysis
routine. However, with a correct base address, CFG recovery will almost always yield a much better result. A custom
analysis, called GirlScout, is specifically made to recover the base address of a binary blob. After the base
address is determined, you may want to reload the binary with the new base address by creating a new Project object,
and then re-recover the CFG.
"""
# TODO: Move arch_options to CFGBase, and add those logic to CFGEmulated as well.
PRINTABLES = string.printable.replace("\x0b", "").replace("\x0c", "").encode()
SPECIAL_THUNKS = {
'AMD64': {
bytes.fromhex('E807000000F3900FAEE8EBF9488D642408C3'): ('ret',),
bytes.fromhex('E807000000F3900FAEE8EBF948890424C3'): ('jmp', 'rax'),
}
}
tag = "CFGFast"
def __init__(self,
binary=None,
objects=None,
regions=None,
pickle_intermediate_results=False,
symbols=True,
function_prologues=True,
resolve_indirect_jumps=True,
force_segment=False,
force_complete_scan=True,
indirect_jump_target_limit=100000,
data_references=False,
cross_references=False,
normalize=False,
start_at_entry=True,
function_starts=None,
extra_memory_regions=None,
data_type_guessing_handlers=None,
arch_options=None,
indirect_jump_resolvers=None,
base_state=None,
exclude_sparse_regions=True,
skip_specific_regions=True,
heuristic_plt_resolving=None,
detect_tail_calls=False,
low_priority=False,
cfb=None,
model=None,
use_patches=False,
elf_eh_frame=True,
exceptions=True,
start=None, # deprecated
end=None, # deprecated
collect_data_references=None, # deprecated
extra_cross_references=None, # deprecated
**extra_arch_options
):
"""
:param binary: The binary to recover CFG on. By default the main binary is used.
:param objects: A list of objects to recover the CFG on. By default it will recover the CFG of
all loaded objects.
:param iterable regions: A list of tuples in the form of (start address, end address) describing memory
regions that the CFG should cover.
:param bool pickle_intermediate_results: If we want to store the intermediate results or not.
:param bool symbols: Get function beginnings from symbols in the binary.
:param bool function_prologues: Scan the binary for function prologues, and use those positions as function
beginnings
:param bool resolve_indirect_jumps: Try to resolve indirect jumps. This is necessary to resolve jump targets
from jump tables, etc.
:param bool force_segment: Force CFGFast to rely on binary segments instead of sections.
:param bool force_complete_scan: Perform a complete scan on the binary and maximize the number of identified
code blocks.
:param bool data_references: Enables the collection of references to data used by individual instructions.
This does not collect 'cross-references', particularly those that involve
multiple instructions. For that, see `cross_references`
:param bool cross_references: Whether CFGFast should collect "cross-references" from the entire program or
not. This will populate the knowledge base with references to and from each
recognizable address constant found in the code. Note that, because this
performs constant propagation on the entire program, it may be much slower and
consume more memory.
This option implies `data_references=True`.
:param bool normalize: Normalize the CFG as well as all function graphs after CFG recovery.
:param bool start_at_entry: Begin CFG recovery at the entry point of this project. Setting it to False
prevents CFGFast from viewing the entry point as one of the starting points of
code scanning.
:param list function_starts: A list of extra function starting points. CFGFast will try to resume scanning
from each address in the list.
:param list extra_memory_regions: A list of 2-tuple (start-address, end-address) that shows extra memory
regions. Integers falling inside will be considered as pointers.
:param list indirect_jump_resolvers: A custom list of indirect jump resolvers. If this list is None or empty,
default indirect jump resolvers specific to this architecture and binary
types will be loaded.
:param base_state: A state to use as a backer for all memory loads
:param bool detect_tail_calls: Enable aggressive tail-call optimization detection.
:param bool elf_eh_frame: Retrieve function starts (and maybe sizes later) from the .eh_frame of ELF
binaries.
:param int start: (Deprecated) The beginning address of CFG recovery.
:param int end: (Deprecated) The end address of CFG recovery.
:param CFGArchOptions arch_options: Architecture-specific options.
:param dict extra_arch_options: Any key-value pair in kwargs will be seen as an arch-specific option and will
be used to set the option value in self._arch_options.
Extra parameters that angr.Analysis takes:
:param progress_callback: Specify a callback function to get the progress during CFG recovery.
:param bool show_progressbar: Should CFGFast show a progressbar during CFG recovery or not.
:return: None
"""
ForwardAnalysis.__init__(self, allow_merging=False)
CFGBase.__init__(
self,
'fast',
0,
normalize=normalize,
binary=binary,
force_segment=force_segment,
base_state=base_state,
resolve_indirect_jumps=resolve_indirect_jumps,
indirect_jump_resolvers=indirect_jump_resolvers,
indirect_jump_target_limit=indirect_jump_target_limit,
detect_tail_calls=detect_tail_calls,
low_priority=low_priority,
model=model,
)
# necessary warnings
regions_not_specified = regions is None and binary is None and not objects
if self.project.loader._auto_load_libs is True and end is None and len(self.project.loader.all_objects) > 3 \
and regions_not_specified:
l.warning('"auto_load_libs" is enabled. With libraries loaded in project, CFGFast will cover libraries, '
'which may take significantly more time than expected. You may reload the binary with '
'"auto_load_libs" disabled, or specify "regions" to limit the scope of CFG recovery.'
)
if collect_data_references is not None:
l.warning('"collect_data_references" is deprecated and will be removed soon. Please use '
'"data_references" instead')
data_references = collect_data_references
if extra_cross_references is not None:
l.warning('"extra_cross_references" is deprecated and will be removed soon. Please use '
'"cross_references" instead')
cross_references = extra_cross_references
if start is not None or end is not None:
l.warning('"start" and "end" are deprecated and will be removed soon. Please use "regions" to specify one '
'or more memory regions instead.'
)
if regions is None:
regions = [ (start, end) ]
else:
l.warning('"regions", "start", and "end" are all specified. Ignoring "start" and "end".')
if binary is not None and not objects:
objects = [ binary ]
regions = regions if regions is not None else self._executable_memory_regions(objects=objects,
force_segment=force_segment
)
if exclude_sparse_regions:
new_regions = [ ]
for start_, end_ in regions:
if not self._is_region_extremely_sparse(start_, end_, base_state=base_state):
new_regions.append((start_, end_))
regions = new_regions
if skip_specific_regions:
if base_state is not None:
l.warning("You specified both base_state and skip_specific_regions. They may conflict with each other.")
new_regions = [ ]
for start_, end_ in regions:
if not self._should_skip_region(start_):
new_regions.append((start_, end_))
regions = new_regions
if not regions and self.project.arch.name != 'Soot':
raise AngrCFGError("Regions are empty or all regions are skipped. You may want to manually specify regions.")
# sort the regions
regions = sorted(regions, key=lambda x: x[0])
self._regions_size = sum((b - a) for a, b in regions)
# initial self._regions as a sorted dict
self._regions = SortedDict(regions)
self._pickle_intermediate_results = pickle_intermediate_results
self._use_symbols = symbols
self._use_function_prologues = function_prologues
self._force_complete_scan = force_complete_scan
self._use_elf_eh_frame = elf_eh_frame
self._use_exceptions = exceptions
if heuristic_plt_resolving is None:
# If unspecified, we only enable heuristic PLT resolving when there is at least one binary loaded with the
# ELF backend
self._heuristic_plt_resolving = len(self.project.loader.all_elf_objects) > 0
else:
self._heuristic_plt_resolving = heuristic_plt_resolving
self._start_at_entry = start_at_entry
self._extra_function_starts = function_starts
self._extra_memory_regions = extra_memory_regions
self._cross_references = cross_references
# You need data refs to get cross refs
self._collect_data_ref = data_references or self._cross_references
self._use_patches = use_patches
self._arch_options = arch_options if arch_options is not None else CFGArchOptions(
self.project.arch, **extra_arch_options)
self._data_type_guessing_handlers = [ ] if data_type_guessing_handlers is None else data_type_guessing_handlers
self._cfb = cfb
l.debug("CFG recovery covers %d regions:", len(self._regions))
for start_addr in self._regions:
l.debug("... %#x - %#x", start_addr, self._regions[start_addr])
# mapping to all known thunks
self._known_thunks = {}
self._initial_state = None
self._next_addr = None
# Create the segment list
self._seg_list = SegmentList()
self._read_addr_to_run = defaultdict(list)
self._write_addr_to_run = defaultdict(list)
self._function_prologue_addrs = None
self._remaining_function_prologue_addrs = None
# exception handling
self._exception_handling_by_endaddr = SortedDict()
#
# Variables used during analysis
#
self._pending_jobs = None
self._traced_addresses = None
self._function_returns = None
self._function_exits = None
self._gp_value: Optional[int] = None
# A mapping between address and the actual data in memory
# self._memory_data = { }
# A mapping between address of the instruction that's referencing the memory data and the memory data itself
# self.insn_addr_to_memory_data = { }
# self._graph = None
# Start working!
self._analyze()
def __getstate__(self):
d = dict(self.__dict__)
d['_progress_callback'] = None
return d
def __setstate__(self, d):
self.__dict__.update(d)
#
# Utils
#
@staticmethod
def _calc_entropy(data, size=None):
"""
Calculate the entropy of a piece of data
:param data: The target data to calculate entropy on
:param size: Size of the data, Optional.
:return: A float
"""
if not data:
return 0
entropy = 0
if size is None:
size = len(data)
data = bytes(pyvex.ffi.buffer(data, size))
for x in range(0, 256):
p_x = float(data.count(x)) / size
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy
#
# Properties
#
@property
def graph(self):
return self._model.graph
@property
def _insn_addr_to_memory_data(self):
l.warning('_insn_addr_to_memory_data has been made public and is deprecated. Please fix your code accordingly.')
return self._model.insn_addr_to_memory_data
@property
def _memory_data(self):
return self._model.memory_data
@property
def memory_data(self):
return self._model.memory_data
@property
def jump_tables(self):
return self._model.jump_tables
@property
def insn_addr_to_memory_data(self):
return self._model.insn_addr_to_memory_data
#
# Private methods
#
# Methods for determining scanning scope
def _inside_regions(self, address):
"""
Check if the address is inside any existing region.
:param int address: Address to check.
:return: True if the address is within one of the memory regions, False otherwise.
:rtype: bool
"""
try:
start_addr = next(self._regions.irange(maximum=address, reverse=True))
except StopIteration:
return False
else:
return address < self._regions[start_addr]
def _get_min_addr(self):
"""
Get the minimum address out of all regions. We assume self._regions is sorted.
:return: The minimum address.
:rtype: int
"""
if not self._regions:
if self.project.arch.name != "Soot":
l.error("self._regions is empty or not properly set.")
return None
return next(self._regions.irange())
def _next_address_in_regions(self, address):
"""
Return the next immediate address that is inside any of the regions.
:param int address: The address to start scanning.
:return: The next address that is inside one of the memory regions.
:rtype: int
"""
if self._inside_regions(address):
return address
try:
return next(self._regions.irange(minimum=address, reverse=False))
except StopIteration:
return None
# Methods for scanning the entire image
def _next_unscanned_addr(self, alignment=None):
"""
Find the next address that we haven't processed
:param alignment: Assures the address returns must be aligned by this number
:return: An address to process next, or None if all addresses have been processed
"""
# TODO: Take care of those functions that are already generated
if self._next_addr is None:
self._next_addr = self._get_min_addr()
curr_addr = self._next_addr
else:
curr_addr = self._next_addr + 1
if not self._inside_regions(curr_addr):
curr_addr = self._next_address_in_regions(curr_addr)
if curr_addr is None:
l.debug("All addresses within memory regions have been scanned.")
return None
if self._seg_list.has_blocks:
curr_addr = self._seg_list.next_free_pos(curr_addr)
if alignment is not None:
if curr_addr % alignment > 0:
curr_addr = curr_addr - (curr_addr % alignment) + alignment
# Make sure curr_addr exists in binary
accepted = False
for start, end in self._regions.items():
if start <= curr_addr < end:
# accept
accepted = True
break
if curr_addr < start:
# accept, but we are skipping the gap
accepted = True
curr_addr = start
break
if not accepted:
# No memory available!
return None
self._next_addr = curr_addr
if self._inside_regions(curr_addr):
l.debug("Returning a new recon address: %#x", curr_addr)
return curr_addr
l.debug("%#x is beyond the ending point. Returning None.", curr_addr)
return None
def _load_a_byte_as_int(self, addr):
if self._base_state is not None:
try:
val = self._base_state.mem_concrete(addr, 1, inspect=False, disable_actions=True)
except SimValueError:
# Not concretizable
l.debug("Address %#x is not concretizable!", addr)
return None
else:
val = self._fast_memory_load_byte(addr)
if val is None:
return None
return val
def _scan_for_printable_strings(self, start_addr):
addr = start_addr
sz = []
is_sz = True
# Get data until we meet a null-byte
while self._inside_regions(addr):
l.debug("Searching address %x", addr)
val = self._load_a_byte_as_int(addr)
if val is None:
break
if val == 0:
if len(sz) < 4:
is_sz = False
break
if val not in self.PRINTABLES:
is_sz = False
break
sz.append(val)
addr += 1
if sz and is_sz:
l.debug("Got a string of %d chars: [%s]", len(sz), bytes(sz).decode())
string_length = len(sz) + 1
return string_length
# no string is found
return 0
def _scan_for_repeating_bytes(self, start_addr, repeating_byte, threshold=2):
"""
Scan from a given address and determine the occurrences of a given byte.
:param int start_addr: The address in memory to start scanning.
:param int repeating_byte: The repeating byte to scan for.
:param int threshold: The minimum occurrences.
:return: The occurrences of a given byte.
:rtype: int
"""
addr = start_addr
repeating_length = 0
while self._inside_regions(addr):
val = self._load_a_byte_as_int(addr)
if val is None:
break
if val == repeating_byte:
repeating_length += 1
else:
break
addr += 1
if repeating_length >= threshold:
return repeating_length
else:
return 0
def _next_code_addr_core(self):
"""
Call _next_unscanned_addr() first to get the next address that is not scanned. Then check if data locates at
that address seems to be code or not. If not, we'll continue to for the next un-scanned address.
"""
next_addr = self._next_unscanned_addr()
if next_addr is None:
return None
start_addr = next_addr
while True:
string_length = self._scan_for_printable_strings(start_addr)
if string_length:
self._seg_list.occupy(start_addr, string_length, "string")
start_addr += string_length
if self.project.arch.name in ('X86', 'AMD64'):
cc_length = self._scan_for_repeating_bytes(start_addr, 0xcc, threshold=1)
if cc_length:
self._seg_list.occupy(start_addr, cc_length, "alignment")
start_addr += cc_length
else:
cc_length = 0
zeros_length = self._scan_for_repeating_bytes(start_addr, 0x00)
if zeros_length:
self._seg_list.occupy(start_addr, zeros_length, "alignment")
start_addr += zeros_length
if string_length == 0 and cc_length == 0 and zeros_length == 0:
# umm now it's probably code
break
instr_alignment = self._initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
# occupy those few bytes
self._seg_list.occupy(start_addr, instr_alignment - (start_addr % instr_alignment), 'alignment')
start_addr = start_addr - start_addr % instr_alignment + \
instr_alignment
# trickiness: aligning the start_addr may create a new address that is outside any mapped region.
if not self._inside_regions(start_addr):
raise ContinueScanningNotification()
return start_addr
def _next_code_addr(self):
while True:
try:
addr = self._next_code_addr_core()
except ContinueScanningNotification:
continue
if addr is None:
return None
# if the new address is already occupied
if not self._seg_list.is_occupied(addr):
return addr
# Overriden methods from ForwardAnalysis
def _job_key(self, job):
return job.addr
def _pre_analysis(self):
# Call _initialize_cfg() before self.functions is used.
self._initialize_cfg()
# Scan for __x86_return_thunk and friends
self._known_thunks = self._find_thunks()
# Initialize variables used during analysis
self._pending_jobs = PendingJobs(self.functions, self._deregister_analysis_job)
self._traced_addresses = set()
self._function_returns = defaultdict(set)
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
self._function_exits = defaultdict(set)
# Create an initial state. Store it to self so we can use it globally.
self._initial_state = self.project.factory.blank_state(mode="fastpath",
add_options={o.SYMBOL_FILL_UNCONSTRAINED_MEMORY,
o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS})
initial_options = self._initial_state.options - {o.TRACK_CONSTRAINTS} - o.refs
initial_options |= {o.SUPER_FASTPATH}
# initial_options.remove(o.COW_STATES)
self._initial_state.options = initial_options
# Process known exception handlings
if self._use_exceptions:
self._preprocess_exception_handlings()
starting_points = set()
# clear all existing functions
self.kb.functions.clear()
if self._use_symbols:
starting_points |= self._function_addresses_from_symbols
if self._use_elf_eh_frame:
starting_points |= self._function_addresses_from_eh_frame
if self._extra_function_starts:
starting_points |= set(self._extra_function_starts)
# Sort it
starting_points = sorted(list(starting_points), reverse=False)
if self._start_at_entry and self.project.entry is not None and self._inside_regions(self.project.entry) and \
self.project.entry not in starting_points:
# make sure self.project.entry is inserted
starting_points = [ self.project.entry ] + starting_points
# Create jobs for all starting points
for sp in starting_points:
job = CFGJob(sp, sp, 'Ijk_Boring')
self._insert_job(job)
# register the job to function `sp`
self._register_analysis_job(sp, job)
self._updated_nonreturning_functions = set()
if self._use_function_prologues and self.project.concrete_target is None:
self._function_prologue_addrs = sorted(self._func_addrs_from_prologues())
# make a copy of those prologue addresses, so that we can pop from the list
self._remaining_function_prologue_addrs = self._function_prologue_addrs[::]
# make function_prologue_addrs a set for faster lookups
self._function_prologue_addrs = set(self._function_prologue_addrs)
def _pre_job_handling(self, job): # pylint:disable=arguments-differ
"""
Some pre job-processing tasks, like update progress bar.
:param CFGJob job: The CFGJob instance.
:return: None
"""
if self._low_priority:
self._release_gil(len(self._nodes), 20, 0.000001)
# a new entry is picked. Deregister it
self._deregister_analysis_job(job.func_addr, job)
if not self._inside_regions(job.addr):
obj = self.project.loader.find_object_containing(job.addr)
if obj is not None and isinstance(obj, self._cle_pseudo_objects):
pass
else:
# it's outside permitted regions. skip.
raise AngrSkipJobNotice()
# Do not calculate progress if the user doesn't care about the progress at all
if self._show_progressbar or self._progress_callback:
max_percentage_stage_1 = 50.0
percentage = self._seg_list.occupied_size * max_percentage_stage_1 / self._regions_size
if percentage > max_percentage_stage_1:
percentage = max_percentage_stage_1
self._update_progress(percentage, cfg=self)
def _intra_analysis(self):
pass
def _get_successors(self, job): # pylint:disable=arguments-differ
# current_function_addr = job.func_addr
# addr = job.addr
# if current_function_addr != -1:
# l.debug("Tracing new exit %#x in function %#x", addr, current_function_addr)
# else:
# l.debug("Tracing new exit %#x", addr)
jobs = self._scan_block(job)
# l.debug("... got %d jobs: %s", len(jobs), jobs)
for job_ in jobs: # type: CFGJob
# register those jobs
self._register_analysis_job(job_.func_addr, job_)
return jobs
def _handle_successor(self, job, successor, successors):
return [ successor ]
def _merge_jobs(self, *jobs):
pass
def _widen_jobs(self, *jobs):
pass
def _post_process_successors(self, irsb, successors):
if is_arm_arch(self.project.arch):
if irsb.addr % 2 == 1:
# we are in thumb mode. filter successors
successors = self._arm_thumb_filter_jump_successors(irsb,
successors,
lambda tpl: tpl[1],
lambda tpl: tpl[0],
lambda tpl: tpl[3],
)
return successors
def _post_job_handling(self, job, new_jobs, successors):
pass
def _job_queue_empty(self):
if self._pending_jobs:
# fastpath
# look for a job that comes from a function that must return
# if we can find one, just use it
job = self._pop_pending_job(returning=True)
if job is not None:
self._insert_job(job)
return
self._clean_pending_exits()
# did we finish analyzing any function?
# fill in self._completed_functions
self._make_completed_functions()
# analyze function features, most importantly, whether each function returns or not
self._analyze_all_function_features()
# Clear _changed_functions set
self._updated_nonreturning_functions = set()
if self._pending_jobs:
self._clean_pending_exits()
job = self._pop_pending_job(returning=True)
if job is not None:
self._insert_job(job)
return
# Try to see if there is any indirect jump left to be resolved
# it's possible that certain indirect jumps must be resolved before the returning status of a function can be
# determined. e.g., in AArch64
# __stubs:00000001000064B0 ___stack_chk_fail
# __stubs:00000001000064B0 NOP
# __stubs:00000001000064B4 LDR X16, =__imp____stack_chk_fail
# __stubs:00000001000064B8 BR X16
#
# we need to rely on indirect jump resolving to identify this call to stack_chk_fail before knowing that
# function 0x100006480 does not return. Hence, we resolve indirect jumps before popping undecided pending jobs.
if self._resolve_indirect_jumps and self._indirect_jumps_to_resolve:
self._process_unresolved_indirect_jumps()
if self._job_info_queue:
return
if self._pending_jobs:
job = self._pop_pending_job(returning=False)
if job is not None:
self._insert_job(job)
return
if self._use_function_prologues and self._remaining_function_prologue_addrs:
while self._remaining_function_prologue_addrs:
prolog_addr = self._remaining_function_prologue_addrs[0]
self._remaining_function_prologue_addrs = self._remaining_function_prologue_addrs[1:]
if self._seg_list.is_occupied(prolog_addr):
continue
job = CFGJob(prolog_addr, prolog_addr, 'Ijk_Boring')
self._insert_job(job)
self._register_analysis_job(prolog_addr, job)
return
if self._force_complete_scan:
addr = self._next_code_addr()
if addr is None:
l.debug("Force-scan jumping failed")
else:
l.debug("Force-scanning to %#x", addr)
if addr is not None:
# if this is ARM and addr % 4 != 0, it has to be THUMB
if is_arm_arch(self.project.arch):
if addr % 2 == 0 and addr % 4 != 0:
addr |= 1
else:
# load 8 bytes and test with THUMB-mode prologues
bytes_prefix = self._fast_memory_load_bytes(addr, 8)
if any(re.match(prolog, bytes_prefix) for prolog in self.project.arch.thumb_prologs):
addr |= 1
job = CFGJob(addr, addr, "Ijk_Boring", last_addr=None, job_type=CFGJob.JOB_TYPE_COMPLETE_SCANNING)
self._insert_job(job)
self._register_analysis_job(addr, job)
def _post_analysis(self):
self._make_completed_functions()
if self._normalize:
# Normalize the control flow graph first before rediscovering all functions
self.normalize()
if self.project.arch.name in ('X86', 'AMD64', 'MIPS32'):
self._remove_redundant_overlapping_blocks()
self._updated_nonreturning_functions = set()
# Revisit all edges and rebuild all functions to correctly handle returning/non-returning functions.
self.make_functions()
self._analyze_all_function_features(all_funcs_completed=True)
# Scan all functions, and make sure all fake ret edges are either confirmed or removed
for f in self.functions.values():
all_edges = f.transition_graph.edges(data=True)
callsites_to_functions = defaultdict(list) # callsites to functions mapping
for src, dst, data in all_edges:
if 'type' in data:
if data['type'] == 'call':
callsites_to_functions[src.addr].append(dst.addr)
edges_to_remove = [ ]
for src, dst, data in all_edges:
if 'type' in data:
if data['type'] == 'fake_return' and 'confirmed' not in data:
# Get all possible functions being called here
target_funcs = [ self.functions.function(addr=func_addr)
for func_addr in callsites_to_functions[src.addr]
]
if target_funcs and all(t is not None and t.returning is False for t in target_funcs):
# Remove this edge
edges_to_remove.append((src, dst))
else:
# Mark this edge as confirmed
f._confirm_fakeret(src, dst)
for edge in edges_to_remove:
f.transition_graph.remove_edge(*edge)
# Clear the cache
f._local_transition_graph = None
# Scan all functions, and make sure .returning for all functions are either True or False
for f in self.functions.values():
if f.returning is None:
f.returning = len(f.endpoints) > 0 # pylint:disable=len-as-condition
# Finally, mark endpoints of every single function
for function in self.kb.functions.values():
function.mark_nonreturning_calls_endpoints()
# optional: remove functions that must be alignments
self.mark_function_alignments()
# make return edges
self._make_return_edges()
if self.project.arch.name != 'Soot':
if self.project.loader.main_object.sections:
# this binary has sections
# make sure we have data entries assigned at the beginning of each data section
for sec in self.project.loader.main_object.sections:
if sec.memsize > 0 and not sec.is_executable and sec.is_readable:
for seg in self.project.loader.main_object.segments:
if seg.vaddr <= sec.vaddr < seg.vaddr + seg.memsize:
break
else:
continue
if sec.vaddr not in self.model.memory_data:
self.model.memory_data[sec.vaddr] = MemoryData(sec.vaddr, 0, MemoryDataSort.Unknown)
# If they asked for it, give it to them. All of it.
if self._cross_references:
self.do_full_xrefs()
r = True
while r:
r = self._tidy_data_references()
CFGBase._post_analysis(self)
self._finish_progress()
def do_full_xrefs(self, overlay_state=None):
"""
Perform xref recovery on all functions.
:param SimState overlay: An overlay state for loading constant data.
:return: None
"""
l.info("Building cross-references...")
# Time to make our CPU hurt
state = self.project.factory.blank_state() if overlay_state is None else overlay_state
for f_addr in self.functions:
f = None
try:
f = self.functions[f_addr]
if f.is_simprocedure:
continue
l.debug("\tFunction %s", f.name)
# constant prop
prop = self.project.analyses.Propagator(func=f, base_state=state)
# Collect all the refs
self.project.analyses.XRefs(func=f, replacements=prop.replacements)
except Exception: # pylint: disable=broad-except
if f is not None:
l.exception("Error collecting XRefs for function %s.", f.name, exc_info=True)
else:
l.exception("Error collecting XRefs for function %#x.", f_addr, exc_info=True)
# Methods to get start points for scanning
def _func_addrs_from_prologues(self):
"""
Scan the entire program image for function prologues, and start code scanning at those positions
:return: A list of possible function addresses
"""
# Pre-compile all regexes
regexes = list()
for ins_regex in self.project.arch.function_prologs:
r = re.compile(ins_regex)
regexes.append(r)
# EDG says: I challenge anyone bothering to read this to come up with a better
# way to handle CPU modes that affect instruction decoding.
# Since the only one we care about is ARM/Thumb right now
# we have this gross hack. Sorry about that.
thumb_regexes = list()
if hasattr(self.project.arch, 'thumb_prologs'):
for ins_regex in self.project.arch.thumb_prologs:
# Thumb prologues are found at even addrs, but their actual addr is odd!
# Isn't that great?
r = re.compile(ins_regex)
thumb_regexes.append(r)
# Construct the binary blob first
unassured_functions = [ ]
for start_, bytes_ in self._binary.memory.backers():
for regex in regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position)
# HACK part 2: Yes, i really have to do this
for regex in thumb_regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position+1)
l.info("Found %d functions with prologue scanning.", len(unassured_functions))
return unassured_functions
# Basic block scanning
def _scan_block(self, cfg_job):
"""
Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list
"""
addr = cfg_job.addr
current_func_addr = cfg_job.func_addr
# Fix the function address
# This is for rare cases where we cannot successfully determine the end boundary of a previous function, and
# as a consequence, our analysis mistakenly thinks the previous function goes all the way across the boundary,
# resulting the missing of the second function in function manager.
if addr in self._function_addresses_from_symbols:
current_func_addr = addr
if self._addr_hooked_or_syscall(addr):
entries = self._scan_procedure(cfg_job, current_func_addr)
else:
entries = self._scan_irsb(cfg_job, current_func_addr)
return entries
def _scan_procedure(self, cfg_job, current_func_addr):
"""
Checks the hooking procedure for this address searching for new static
exit points to add to successors (generating entries for them)
if this address has not been traced before. Updates previous CFG nodes
with edges.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_func_addr: Address of the current function.
:return: List of successors
:rtype: list
"""
addr = cfg_job.addr
try:
if self.project.is_hooked(addr):
procedure = self.project.hooked_by(addr)
name = procedure.display_name
else:
procedure = self.project.simos.syscall_from_addr(addr)
name = procedure.display_name
if addr not in self._nodes:
cfg_node = CFGNode(addr, 0, self.model,
function_address=current_func_addr,
simprocedure_name=name,
no_ret=procedure.NO_RET,
block_id=addr,
)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
else:
cfg_node = self._nodes[addr]
except (SimMemoryError, SimEngineError):
return [ ]
self._graph_add_edge(cfg_node, cfg_job.src_node, cfg_job.jumpkind, cfg_job.src_ins_addr,
cfg_job.src_stmt_idx
)
self._function_add_node(cfg_node, current_func_addr)
# Add edges going to this node in function graphs
cfg_job.apply_function_edges(self, clear=True)
# If we have traced it before, don't trace it anymore
if addr in self._traced_addresses:
return [ ]
else:
# Mark the address as traced
self._traced_addresses.add(addr)
entries = [ ]
if procedure.ADDS_EXITS:
# Get two blocks ahead
if cfg_job.src_node is None:
l.warning("%s is supposed to yield new exits, but it fails to do so.", name)
return []
grandparent_nodes = list(self.graph.predecessors(cfg_job.src_node))
grandparent_node = grandparent_nodes[0] if grandparent_nodes else None
blocks_ahead = [ ]
if grandparent_node is not None:
blocks_ahead.append(self._lift(grandparent_node.addr).vex)
blocks_ahead.append(self._lift(cfg_job.src_node.addr).vex)
procedure.project = self.project
procedure.arch = self.project.arch
new_exits = procedure.static_exits(blocks_ahead)
for new_exit in new_exits:
addr_ = new_exit['address']
jumpkind = new_exit['jumpkind']
namehint = new_exit.get('namehint', None)
if isinstance(addr_, claripy.ast.BV) and not addr_.symbolic: # pylint:disable=isinstance-second-argument-not-valid-type
addr_ = addr_._model_concrete.value
if not isinstance(addr_, int):
continue
entries += self._create_jobs(addr_, jumpkind, current_func_addr, None, addr_, cfg_node, None,
None,
)
if namehint and addr_ not in self.kb.labels:
unique_label = self.kb.labels.get_unique_label(namehint)
self.kb.labels[addr_] = unique_label
if not procedure.NO_RET:
# it returns
cfg_node.has_return = True
self._function_exits[current_func_addr].add(addr)
self._function_add_return_site(addr, current_func_addr)
else:
# the procedure does not return
self._updated_nonreturning_functions.add(current_func_addr)
return entries
def _scan_irsb(self, cfg_job, current_func_addr):
"""
Generate a list of successors (generating them each as entries) to IRSB.
Updates previous CFG nodes with edges.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_func_addr: Address of the current function
:return: a list of successors
:rtype: list
"""
addr, function_addr, cfg_node, irsb = self._generate_cfgnode(cfg_job, current_func_addr)
# Add edges going to this node in function graphs
cfg_job.apply_function_edges(self, clear=True)
# function_addr and current_function_addr can be different. e.g. when tracing an optimized tail-call that jumps
# into another function that has been identified before.
if cfg_node is None:
# exceptions occurred, or we cannot get a CFGNode for other reasons
return [ ]
self._graph_add_edge(cfg_node, cfg_job.src_node, cfg_job.jumpkind, cfg_job.src_ins_addr,
cfg_job.src_stmt_idx
)
self._function_add_node(cfg_node, function_addr)
if self.functions.get_by_addr(function_addr).returning is not True:
self._updated_nonreturning_functions.add(function_addr)
# If we have traced it before, don't trace it anymore
real_addr = get_real_address_if_arm(self.project.arch, addr)
if real_addr in self._traced_addresses:
# the address has been traced before
return [ ]
else:
# Mark the address as traced
self._traced_addresses.add(real_addr)
# irsb cannot be None here
# assert irsb is not None
# IRSB is only used once per CFGNode. We should be able to clean up the CFGNode here in order to save memory
cfg_node.irsb = None
caller_gp = None
if self.project.arch.name in {'MIPS32', 'MIPS64'}:
# the caller might have gp passed on
caller_gp = cfg_job.gp
self._process_block_arch_specific(addr, irsb, function_addr, caller_gp=caller_gp)
# Scan the basic block to collect data references
if self._collect_data_ref:
self._collect_data_references(irsb, addr)
# Get all possible successors
irsb_next, jumpkind = irsb.next, irsb.jumpkind
successors = [ ]
if irsb.statements:
last_ins_addr = None
ins_addr = addr
for i, stmt in enumerate(irsb.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
successors.append((i,
last_ins_addr if self.project.arch.branch_delay_slot else ins_addr,
stmt.dst,
stmt.jumpkind
)
)
elif isinstance(stmt, pyvex.IRStmt.IMark):
last_ins_addr = ins_addr
ins_addr = stmt.addr + stmt.delta
else:
for ins_addr, stmt_idx, exit_stmt in irsb.exit_statements:
branch_ins_addr = ins_addr
if self.project.arch.branch_delay_slot \
and irsb.instruction_addresses \
and ins_addr in irsb.instruction_addresses:
idx_ = irsb.instruction_addresses.index(ins_addr)
if idx_ > 0:
branch_ins_addr = irsb.instruction_addresses[idx_ - 1]
successors.append((
stmt_idx,
branch_ins_addr,
exit_stmt.dst,
exit_stmt.jumpkind
))
# default statement
default_branch_ins_addr = None
if irsb.instruction_addresses:
if self.project.arch.branch_delay_slot:
if len(irsb.instruction_addresses) > 1:
default_branch_ins_addr = irsb.instruction_addresses[-2]
else:
default_branch_ins_addr = irsb.instruction_addresses[-1]
successors.append((DEFAULT_STATEMENT, default_branch_ins_addr, irsb_next, jumpkind))
# exception handling
exc = self._exception_handling_by_endaddr.get(addr + irsb.size, None)
if exc is not None:
successors.append(
(DEFAULT_STATEMENT,
default_branch_ins_addr,
exc.handler_addr,
'Ijk_Exception')
)
entries = [ ]
successors = self._post_process_successors(irsb, successors)
# Process each successor
for suc in successors:
stmt_idx, ins_addr, target, jumpkind = suc
entries += self._create_jobs(target, jumpkind, function_addr, irsb, addr, cfg_node, ins_addr,
stmt_idx
)
return entries
def _create_jobs(self, target, jumpkind, current_function_addr, irsb, addr, cfg_node, ins_addr, stmt_idx):
"""
Given a node and details of a successor, makes a list of CFGJobs
and if it is a call or exit marks it appropriately so in the CFG
:param int target: Destination of the resultant job
:param str jumpkind: The jumpkind of the edge going to this node
:param int current_function_addr: Address of the current function
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param int addr: The predecessor address
:param CFGNode cfg_node: The CFGNode of the predecessor node
:param int ins_addr: Address of the source instruction.
:param int stmt_idx: ID of the source statement.
:return: a list of CFGJobs
:rtype: list
"""
if type(target) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
target_addr = target.con.value
elif type(target) in (pyvex.IRConst.U8, pyvex.IRConst.U16, pyvex.IRConst.U32, pyvex.IRConst.U64): # pylint: disable=unidiomatic-typecheck
target_addr = target.value
elif type(target) is int: # pylint: disable=unidiomatic-typecheck
target_addr = target
else:
target_addr = None
if target_addr in self._known_thunks and jumpkind == 'Ijk_Boring':
thunk_kind = self._known_thunks[target_addr][0]
if thunk_kind == 'ret':
jumpkind = 'Ijk_Ret'
target_addr = None
elif thunk_kind == 'jmp':
pass # ummmmmm not sure about this one
else:
raise AngrCFGError("This shouldn't be possible")
jobs = [ ]
is_syscall = jumpkind.startswith("Ijk_Sys")
# Special handling:
# If a call instruction has a target that points to the immediate next instruction, we treat it as a boring jump
if jumpkind == "Ijk_Call" and \
not self.project.arch.call_pushes_ret and \
cfg_node.instruction_addrs and \
ins_addr == cfg_node.instruction_addrs[-1] and \
target_addr == irsb.addr + irsb.size:
jumpkind = "Ijk_Boring"
if target_addr is None:
# The target address is not a concrete value
if jumpkind == "Ijk_Ret":
# This block ends with a return instruction.
if current_function_addr != -1:
self._function_exits[current_function_addr].add(addr)
self._function_add_return_site(addr, current_function_addr)
self.functions[current_function_addr].returning = True
self._pending_jobs.add_returning_function(current_function_addr)
cfg_node.has_return = True
elif self._resolve_indirect_jumps and \
(jumpkind in ('Ijk_Boring', 'Ijk_Call', 'Ijk_InvalICache') or jumpkind.startswith('Ijk_Sys')):
# This is an indirect jump. Try to resolve it.
# FIXME: in some cases, a statementless irsb will be missing its instr addresses
# and this next part will fail. Use the real IRSB instead
irsb = self._lift(cfg_node.addr, size=cfg_node.size).vex
cfg_node.instruction_addrs = irsb.instruction_addresses
resolved, resolved_targets, ij = self._indirect_jump_encountered(addr, cfg_node, irsb,
current_function_addr, stmt_idx)
if resolved:
for resolved_target in resolved_targets:
if jumpkind == 'Ijk_Call':
jobs += self._create_job_call(cfg_node.addr, irsb, cfg_node, stmt_idx, ins_addr,
current_function_addr, resolved_target, jumpkind)
else:
to_outside, target_func_addr = self._is_branching_to_outside(addr, resolved_target,
current_function_addr)
edge = FunctionTransitionEdge(cfg_node, resolved_target, current_function_addr,
to_outside=to_outside, stmt_idx=stmt_idx, ins_addr=ins_addr,
dst_func_addr=target_func_addr,
)
ce = CFGJob(resolved_target, target_func_addr, jumpkind,
last_addr=resolved_target, src_node=cfg_node, src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr, func_edges=[ edge ],
)
jobs.append(ce)
return jobs
if ij is None:
# this is not a valid indirect jump. maybe it failed sanity checks.
# for example, `jr $v0` might show up in a MIPS binary without a following instruction (because
# decoding failed). in this case, `jr $v0` shouldn't be a valid instruction, either.
return [ ]
if jumpkind in ("Ijk_Boring", 'Ijk_InvalICache'):
resolved_as_plt = False
if irsb and self._heuristic_plt_resolving:
# Test it on the initial state. Does it jump to a valid location?
# It will be resolved only if this is a .plt entry
resolved_as_plt = self._resolve_plt(addr, irsb, ij)
if resolved_as_plt:
# this is definitely a PLT stub
jump_target = next(iter(ij.resolved_targets))
target_func_addr = jump_target # TODO: FIX THIS
edge = FunctionTransitionEdge(cfg_node, jump_target, current_function_addr,
to_outside=True, dst_func_addr=jump_target,
stmt_idx=stmt_idx, ins_addr=ins_addr,
)
ce = CFGJob(jump_target, target_func_addr, jumpkind, last_addr=jump_target,
src_node=cfg_node, src_stmt_idx=stmt_idx, src_ins_addr=ins_addr,
func_edges=[edge],
)
jobs.append(ce)
if resolved_as_plt:
# has been resolved as a PLT entry. Remove it from indirect_jumps_to_resolve
if ij.addr in self._indirect_jumps_to_resolve:
self._indirect_jumps_to_resolve.remove(ij.addr)
self._deregister_analysis_job(current_function_addr, ij)
else:
is_plt = addr in self.functions and self.functions.get_by_addr(addr).is_plt
if is_plt:
# this is definitely a PLT entry, but we could not resolve it. this is probably due to
# missing SimProcedures. we do not want to resolve this indirect jump again in the future.
self._indirect_jump_unresolved(ij)
else:
# add it to indirect_jumps_to_resolve
self._indirect_jumps_to_resolve.add(ij)
# register it as a job for the current function
self._register_analysis_job(current_function_addr, ij)
else: # jumpkind == "Ijk_Call" or jumpkind.startswith('Ijk_Sys')
self._indirect_jumps_to_resolve.add(ij)
self._register_analysis_job(current_function_addr, ij)
jobs += self._create_job_call(addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr, None,
jumpkind, is_syscall=is_syscall
)
elif target_addr is not None:
# This is a direct jump with a concrete target.
# pylint: disable=too-many-nested-blocks
if jumpkind in {'Ijk_Boring', 'Ijk_InvalICache', 'Ijk_Exception'}:
to_outside, target_func_addr = self._is_branching_to_outside(addr, target_addr, current_function_addr)
edge = FunctionTransitionEdge(cfg_node, target_addr, current_function_addr,
to_outside=to_outside,
dst_func_addr=target_func_addr,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
is_exception=jumpkind == 'Ijk_Exception',
)
ce = CFGJob(target_addr, target_func_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_ins_addr=ins_addr, src_stmt_idx=stmt_idx, func_edges=[ edge ],
)
jobs.append(ce)
elif jumpkind == 'Ijk_Call' or jumpkind.startswith("Ijk_Sys"):
jobs += self._create_job_call(addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr,
target_addr, jumpkind, is_syscall=is_syscall
)
else:
# TODO: Support more jumpkinds
l.debug("Unsupported jumpkind %s", jumpkind)
l.debug("Instruction address: %#x", ins_addr)
return jobs
def _create_job_call(self, addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr, target_addr, jumpkind,
is_syscall=False):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = [ ]
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(mode="fastpath", addr=cfg_node.addr,
add_options={o.SYMBOL_FILL_UNCONSTRAINED_MEMORY,
o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS})
# Find the first successor with a syscall jumpkind
successors = self._simulate_block_with_resilience(tmp_state)
if successors is not None:
succ = next(iter(succ for succ in successors.flat_successors
if succ.history.jumpkind and succ.history.jumpkind.startswith("Ijk_Sys")), None)
else:
succ = None
if succ is None:
# For some reason, there is no such successor with a syscall jumpkind
target_addr = self._unresolvable_call_target_addr
else:
try:
syscall_stub = self.project.simos.syscall(succ)
if syscall_stub: # can be None if simos is not a subclass of SimUserspace
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_call_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_call_target_addr
if isinstance(target_addr, SootAddressDescriptor):
new_function_addr = target_addr.method
else:
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
if self.project.arch.name != 'Soot':
return_site = addr + irsb.size # We assume the program will always return to the succeeding position
else:
# For Soot, we return to the next statement, which is not necessarily the next block (as Shimple does
# not break blocks at calls)
assert isinstance(ins_addr, SootAddressDescriptor)
soot_block = irsb
return_block_idx = ins_addr.block_idx
if stmt_idx + 1 >= soot_block.label + len(soot_block.statements):
# tick the block ID
return_block_idx += 1
return_site = SootAddressDescriptor(ins_addr.method, return_block_idx, stmt_idx + 1)
edge = None
if new_function_addr is not None:
edge = FunctionCallEdge(cfg_node, new_function_addr, return_site, current_function_addr, syscall=is_syscall,
ins_addr=ins_addr, stmt_idx=stmt_idx,
)
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(target_addr, new_function_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, syscall=is_syscall, func_edges=[ edge ],
gp=self.kb.functions[current_function_addr].info.get('gp', None),
)
jobs.append(ce)
callee_might_return = True
callee_function = None
if new_function_addr is not None:
if is_syscall or self.project.is_hooked(new_function_addr):
# we can create the function if it is a syscall or a SimProcedure and it does not exist yet. Note that
# syscalls are handled as SimProcedures anyway.
callee_function = self.kb.functions.function(addr=new_function_addr, syscall=is_syscall, create=True)
else:
callee_function = self.kb.functions.function(addr=new_function_addr, syscall=is_syscall)
if callee_function is not None:
callee_might_return = not (callee_function.returning is False)
if callee_might_return:
func_edges = [ ]
if return_site is not None:
if callee_function is not None and callee_function.returning is True:
fakeret_edge = FunctionFakeRetEdge(cfg_node, return_site, current_function_addr, confirmed=True)
func_edges.append(fakeret_edge)
ret_edge = FunctionReturnEdge(new_function_addr, return_site, current_function_addr)
func_edges.append(ret_edge)
# Also, keep tracing from the return site
ce = CFGJob(return_site, current_function_addr, 'Ijk_FakeRet', last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, returning_source=new_function_addr,
syscall=is_syscall, func_edges=func_edges)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
elif callee_function is not None and callee_function.returning is False:
pass # Don't go past a call that does not return!
else:
# HACK: We don't know where we are jumping. Let's assume we fakeret to the
# next instruction after the block
# TODO: FIXME: There are arch-specific hints to give the correct ret site
# Such as looking for constant values of LR in this block for ARM stuff.
fakeret_edge = FunctionFakeRetEdge(cfg_node, return_site, current_function_addr, confirmed=None)
func_edges.append(fakeret_edge)
fr = FunctionReturn(new_function_addr, current_function_addr, addr, return_site)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].add(fr)
ce = CFGJob(return_site, current_function_addr, 'Ijk_FakeRet', last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, returning_source=new_function_addr,
syscall=is_syscall, func_edges=func_edges)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
return jobs
def _simulate_block_with_resilience(self, state):
"""
Execute a basic block with "On Error Resume Next". Give up when there is no way moving forward.
:param SimState state: The initial state to start simulation with.
:return: A SimSuccessors instance or None if we are unable to resume execution with resilience.
:rtype: SimSuccessors or None
"""
stmt_idx = 0
successors = None # make PyCharm's linting happy
while True:
try:
successors = self.project.factory.successors(state, skip_stmts=stmt_idx)
break
except SimOperationError as ex:
stmt_idx = ex.stmt_idx + 1
continue
except SimError:
return None
return successors
def _is_branching_to_outside(self, src_addr, target_addr, current_function_addr):
"""
Determine if a branch is branching to a different function (i.e., branching to outside the current function).
:param int src_addr: The source address.
:param int target_addr: The destination address.
:param int current_function_addr: Address of the current function.
:return: A tuple of (to_outside, target_func_addr)
:rtype: tuple
"""
if not self._addrs_belong_to_same_section(src_addr, target_addr):
# if the target address is at another section, it has to be jumping to a new function
target_func_addr = target_addr
to_outside = True
else:
# it might be a jumpout
target_func_addr = None
real_target_addr = get_real_address_if_arm(self.project.arch, target_addr)
if real_target_addr in self._traced_addresses:
node = self.model.get_any_node(target_addr)
if node is not None:
target_func_addr = node.function_address
if target_func_addr is None:
target_func_addr = current_function_addr
to_outside = not target_func_addr == current_function_addr
return to_outside, target_func_addr
# Data reference processing
def _collect_data_references(self, irsb, irsb_addr):
"""
Unoptimizes IRSB and _add_data_reference's for individual statements or
for parts of statements (e.g. Store)
:param pyvex.IRSB irsb: Block to scan for data references
:param int irsb_addr: Address of block
:return: None
"""
if irsb.data_refs:
self._process_irsb_data_refs(irsb)
elif irsb.statements:
# for each statement, collect all constants that are referenced or used.
self._collect_data_references_by_scanning_stmts(irsb, irsb_addr)
def _process_irsb_data_refs(self, irsb):
for ref in irsb.data_refs:
if ref.data_size:
self._seg_list.occupy(ref.data_addr, ref.data_size, "unknown")
self._add_data_reference(
irsb.addr,
ref.stmt_idx,
ref.ins_addr,
ref.data_addr,
data_size=ref.data_size,
data_type=ref.data_type_str
)
def _collect_data_references_by_scanning_stmts(self, irsb, irsb_addr):
# helper methods
def _process(stmt_idx_, data_, insn_addr, next_insn_addr, data_size=None, data_type=None):
"""
Helper method used for calling _add_data_reference after checking
for manipulation of constants
:param pyvex.IRSB irsb_: Edited block (as might be de-optimised)
:param pyvex.IRStmt.* stmt_: Statement
:param int stmt_idx_: Statement ID
:param data_: data manipulated by statement
:param int insn_addr: instruction address
:param int next_insn_addr: next instruction address
:param data_size: Size of the data being manipulated
:param str data_type: Type of the data being manipulated
:return: None
"""
if type(data_) is pyvex.expr.Const: # pylint: disable=unidiomatic-typecheck
val = data_.con.value
elif type(data_) is int:
val = data_
else:
return
if val != next_insn_addr:
if data_size:
# Mark the region as unknown so we won't try to create a code block covering this region in the
# future.
self._seg_list.occupy(val, data_size, "unknown")
self._add_data_reference(irsb_addr, stmt_idx_, insn_addr, val, data_size=data_size, data_type=data_type)
# get all instruction addresses
instr_addrs = irsb.instruction_addresses
# for each statement, collect all constants that are referenced or used.
instr_addr = None
next_instr_addr = None
for stmt_idx, stmt in enumerate(irsb.statements):
if type(stmt) is pyvex.IRStmt.IMark: # pylint: disable=unidiomatic-typecheck
instr_addr = stmt.addr + stmt.delta
# there can be weird cases sometimes... I've seen two IMarks with the exact same address showing up one
# after the other.
if instr_addrs and instr_addr == instr_addrs[0]:
instr_addr = instr_addrs[0]
instr_addrs = instr_addrs[1 : ]
next_instr_addr = instr_addrs[0] if instr_addrs else None
elif type(stmt) is pyvex.IRStmt.WrTmp: # pylint: disable=unidiomatic-typecheck
if type(stmt.data) is pyvex.IRExpr.Load: # pylint: disable=unidiomatic-typecheck
# load
# e.g. t7 = LDle:I64(0x0000000000600ff8)
size = stmt.data.result_size(irsb.tyenv) // 8 # convert to bytes
_process(stmt_idx, stmt.data.addr, instr_addr, next_instr_addr, data_size=size, data_type='integer')
elif type(stmt.data) in (pyvex.IRExpr.Binop, ): # pylint: disable=unidiomatic-typecheck
# rip-related addressing
if stmt.data.op in ('Iop_Add32', 'Iop_Add64') and \
all(type(arg) is pyvex.expr.Const for arg in stmt.data.args):
# perform the addition
loc = stmt.data.args[0].con.value + stmt.data.args[1].con.value
_process(stmt_idx, loc, instr_addr, next_instr_addr)
else:
# binary operation
for arg in stmt.data.args:
_process(stmt_idx, arg, instr_addr, next_instr_addr)
elif type(stmt.data) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
_process(stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt.data) is pyvex.IRExpr.ITE:
for child_expr in stmt.data.child_expressions:
_process(stmt_idx, child_expr, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Put: # pylint: disable=unidiomatic-typecheck
# put
# e.g. PUT(rdi) = 0x0000000000400714
if stmt.offset not in (self._initial_state.arch.ip_offset, ):
_process(stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Store: # pylint: disable=unidiomatic-typecheck
# store addr
_process(stmt_idx, stmt.addr, instr_addr, next_instr_addr)
# store data
_process(stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Dirty:
_process(stmt_idx, stmt.mAddr, instr_addr, next_instr_addr, data_size=stmt.mSize, data_type='fp')
def _add_data_reference(self, irsb_addr, stmt_idx, insn_addr, data_addr, # pylint: disable=unused-argument
data_size=None, data_type=None):
"""
Checks addresses are in the correct segments and creates or updates
MemoryData in _memory_data as appropriate, labelling as segment
boundaries or data type
:param int irsb_addr: irsb address
:param int stmt_idx: Statement ID
:param int insn_addr: instruction address
:param data_addr: address of data manipulated by statement
:param data_size: Size of the data being manipulated
:param str data_type: Type of the data being manipulated
:return: None
"""
# Make sure data_addr is within a valid memory range
if not self.project.loader.find_segment_containing(data_addr):
# data might be at the end of some section or segment...
# let's take a look
for segment in self.project.loader.main_object.segments:
if segment.vaddr + segment.memsize == data_addr:
# yeah!
new_data = False
if data_addr not in self._memory_data:
data = MemoryData(data_addr, 0, MemoryDataSort.SegmentBoundary)
self._memory_data[data_addr] = data
new_data = True
if new_data or self._cross_references:
cr = XRef(ins_addr=insn_addr, block_addr=irsb_addr, stmt_idx=stmt_idx,
memory_data=self.model.memory_data[data_addr], xref_type=XRefType.Offset,
)
self.kb.xrefs.add_xref(cr)
break
return
new_data = False
if data_addr not in self._memory_data:
if data_type is not None and data_size is not None:
data = MemoryData(data_addr, data_size, data_type, max_size=data_size)
else:
data = MemoryData(data_addr, 0, MemoryDataSort.Unknown)
self._memory_data[data_addr] = data
new_data = True
if new_data or self._cross_references:
cr = XRef(ins_addr=insn_addr, block_addr=irsb_addr, stmt_idx=stmt_idx,
memory_data=self.model.memory_data[data_addr],
xref_type=XRefType.Offset,
)
self.kb.xrefs.add_xref(cr)
self.insn_addr_to_memory_data[insn_addr] = self._memory_data[data_addr]
def _tidy_data_references(self):
"""
:return: True if new data entries are found, False otherwise.
:rtype: bool
"""
# Make sure all memory data entries cover all data sections
keys = sorted(self._memory_data.keys())
for i, data_addr in enumerate(keys):
data = self._memory_data[data_addr]
if self._addr_in_exec_memory_regions(data.address):
# TODO: Handle data among code regions (or executable regions)
pass
else:
if i + 1 != len(keys):
next_data_addr = keys[i + 1]
else:
next_data_addr = None
# goes until the end of the section/segment
# TODO: the logic needs more testing
sec = self.project.loader.find_section_containing(data_addr)
next_sec_addr = None
if sec is not None:
last_addr = sec.vaddr + sec.memsize
else:
# it does not belong to any section. what's the next adjacent section? any memory data does not go
# beyong section boundaries
next_sec = self.project.loader.find_section_next_to(data_addr)
if next_sec is not None:
next_sec_addr = next_sec.vaddr
seg = self.project.loader.find_segment_containing(data_addr)
if seg is not None:
last_addr = seg.vaddr + seg.memsize
else:
# We got an address that is not inside the current binary...
l.warning('_tidy_data_references() sees an address %#08x that does not belong to any '
'section or segment.', data_addr
)
last_addr = None
if next_data_addr is None:
boundary = last_addr
elif last_addr is None:
boundary = next_data_addr
else:
boundary = min(last_addr, next_data_addr)
if next_sec_addr is not None:
boundary = min(boundary, next_sec_addr)
if boundary is not None:
data.max_size = boundary - data_addr
if data.max_size is None:
print('wtf')
keys = sorted(self._memory_data.keys())
new_data_found = False
i = 0
# pylint:disable=too-many-nested-blocks
while i < len(keys):
data_addr = keys[i]
i += 1
memory_data = self._memory_data[data_addr]
if memory_data.sort == MemoryDataSort.SegmentBoundary:
continue
content_holder = [ ]
# let's see what sort of data it is
if memory_data.sort in (MemoryDataSort.Unknown, MemoryDataSort.Unspecified) or \
(memory_data.sort == MemoryDataSort.Integer and memory_data.size == self.project.arch.bytes):
data_type, data_size = self._guess_data_type(data_addr, memory_data.max_size,
content_holder=content_holder)
else:
data_type, data_size = memory_data.sort, memory_data.size
if data_type is not None:
memory_data.size = data_size
memory_data.sort = data_type
if len(content_holder) == 1:
memory_data.content = content_holder[0]
if memory_data.max_size is not None and (0 < memory_data.size < memory_data.max_size):
# Create another memory_data object to fill the gap
new_addr = data_addr + memory_data.size
new_md = MemoryData(new_addr, None, None, max_size=memory_data.max_size - memory_data.size)
self._memory_data[new_addr] = new_md
# Make a copy of all old references
old_crs = self.kb.xrefs.get_xrefs_by_dst(data_addr)
crs = [ ]
for old_cr in old_crs:
cr = old_cr.copy()
cr.memory_data = new_md
crs.append(cr)
self.kb.xrefs.add_xrefs(crs)
keys.insert(i, new_addr)
if data_type == MemoryDataSort.PointerArray:
# make sure all pointers are identified
pointer_size = self.project.arch.bytes
old_crs = self.kb.xrefs.get_xrefs_by_dst(data_addr)
for j in range(0, data_size, pointer_size):
ptr = self._fast_memory_load_pointer(data_addr + j)
# is this pointer coming from the current binary?
obj = self.project.loader.find_object_containing(ptr, membership_check=False)
if obj is not self.project.loader.main_object:
# the pointer does not come from current binary. skip.
continue
if self._seg_list.is_occupied(ptr):
sort = self._seg_list.occupied_by_sort(ptr)
if sort == 'code':
continue
if sort == 'pointer-array':
continue
# TODO: other types
if ptr not in self._memory_data:
new_md = MemoryData(ptr, 0, MemoryDataSort.Unknown, pointer_addr=data_addr + j)
self._memory_data[ptr] = new_md
# Make a copy of the old reference
crs = [ ]
for old_cr in old_crs:
cr = old_cr.copy()
cr.memory_data = new_md
crs.append(cr)
self.kb.xrefs.add_xrefs(crs)
new_data_found = True
else:
memory_data.size = memory_data.max_size
self._seg_list.occupy(data_addr, memory_data.size, memory_data.sort)
return new_data_found
def _guess_data_type(self, data_addr, max_size, content_holder=None):
"""
Make a guess to the data type.
Users can provide their own data type guessing code when initializing CFGFast instance, and each guessing
handler will be called if this method fails to determine what the data is.
:param int data_addr: Address of the data.
:param int max_size: The maximum size this data entry can be.
:return: a tuple of (data type, size). (None, None) if we fail to determine the type or the size.
:rtype: tuple
"""
if max_size is None:
max_size = 0
# quick check: if it's at the beginning of a binary, it might be the ELF header
elfheader_sort, elfheader_size = self._guess_data_type_elfheader(data_addr, max_size)
if elfheader_sort:
return elfheader_sort, elfheader_size
try:
ref = next(iter(self.kb.xrefs.get_xrefs_by_dst(data_addr))) # type: XRef
irsb_addr = ref.block_addr
stmt_idx = ref.stmt_idx
except StopIteration:
irsb_addr, stmt_idx = None, None
if self._seg_list.is_occupied(data_addr) and self._seg_list.occupied_by_sort(data_addr) == 'code':
# it's a code reference
# TODO: Further check if it's the beginning of an instruction
return MemoryDataSort.CodeReference, 0
pointer_size = self.project.arch.bytes
# who's using it?
if isinstance(self.project.loader.main_object, cle.MetaELF):
plt_entry = self.project.loader.main_object.reverse_plt.get(irsb_addr, None)
if plt_entry is not None:
# IRSB is owned by plt!
return MemoryDataSort.GOTPLTEntry, pointer_size
# is it in a section with zero bytes, like .bss?
obj = self.project.loader.find_object_containing(data_addr)
if obj is None:
return None, None
section = obj.find_section_containing(data_addr)
if section is not None and section.only_contains_uninitialized_data:
# Nothing much you can do
return None, None
pointers_count = 0
max_pointer_array_size = min(512 * pointer_size, max_size)
for i in range(0, max_pointer_array_size, pointer_size):
ptr = self._fast_memory_load_pointer(data_addr + i)
if ptr is not None:
#if self._seg_list.is_occupied(ptr) and self._seg_list.occupied_by_sort(ptr) == 'code':
# # it's a code reference
# # TODO: Further check if it's the beginning of an instruction
# pass
if self.project.loader.find_section_containing(ptr) is not None or \
self.project.loader.find_segment_containing(ptr) is not None or \
(self._extra_memory_regions and
next(((a < ptr < b) for (a, b) in self._extra_memory_regions), None)
):
# it's a pointer of some sort
# TODO: Determine what sort of pointer it is
pointers_count += 1
else:
break
if pointers_count:
return MemoryDataSort.PointerArray, pointer_size * pointers_count
try:
data = self.project.loader.memory.load(data_addr, 1024)
except KeyError:
data = b''
# Is it an unicode string?
# TODO: Support unicode string longer than the max length
if len(data) >= 4 and data[1] == 0 and data[3] == 0 and data[0] in self.PRINTABLES:
def can_decode(n):
try:
data[:n*2].decode('utf_16_le')
except UnicodeDecodeError:
return False
return True
if can_decode(4) or can_decode(5) or can_decode(6):
running_failures = 0
last_success = 4
for i in range(4, len(data) // 2):
if can_decode(i):
last_success = i
running_failures = 0
if data[i*2-2] == 0 and data[i*2-1] == 0:
break
else:
running_failures += 1
if running_failures > 3:
break
return MemoryDataSort.UnicodeString, last_success
if data:
try:
zero_pos = data.index(0)
except ValueError:
zero_pos = None
if (zero_pos is not None and zero_pos > 0 and all(c in self.PRINTABLES for c in data[:zero_pos])) or \
all(c in self.PRINTABLES for c in data):
# it's a string
# however, it may not be terminated
string_data = data if zero_pos is None else data[:zero_pos]
if content_holder is not None:
content_holder.append(string_data)
return MemoryDataSort.String, min(len(string_data) + 1, 1024)
for handler in self._data_type_guessing_handlers:
irsb = None if irsb_addr is None else self.model.get_any_node(irsb_addr).block.vex
sort, size = handler(self, irsb, irsb_addr, stmt_idx, data_addr, max_size)
if sort is not None:
return sort, size
return None, None
def _guess_data_type_elfheader(self, data_addr, max_size):
"""
Is the specified data chunk an ELF header?
:param int data_addr: Address of the data chunk
:param int max_size: Size of the data chunk.
:return: A tuple of ('elf-header', size) if it is, or (None, None) if it is not.
:rtype: tuple
"""
obj = self.project.loader.find_object_containing(data_addr)
if obj is None:
# it's not mapped
return None, None
if data_addr == obj.min_addr and 4 < max_size < 1000:
# Does it start with the ELF magic bytes?
try:
data = self.project.loader.memory.load(data_addr, 4)
except KeyError:
return None, None
if data == b"\x7fELF":
# yes!
return MemoryDataSort.ELFHeader, max_size
return None, None
# Indirect jumps processing
def _resolve_plt(self, addr, irsb, indir_jump: IndirectJump):
"""
Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to
resolve the jump target.
:param int addr: Address of the block.
:param irsb: The basic block.
:param indir_jump: The IndirectJump instance.
:return: True if the IRSB represents a PLT stub and we successfully resolved the target.
False otherwise.
:rtype: bool
"""
# is the address identified by CLE as a PLT stub?
if self.project.loader.all_elf_objects:
# restrict this heuristics to ELF files only
if not any([ addr in obj.reverse_plt for obj in self.project.loader.all_elf_objects ]):
return False
# Make sure the IRSB has statements
if not irsb.has_statements:
irsb = self.project.factory.block(irsb.addr, size=irsb.size, opt_level=1, cross_insn_opt=False).vex
# try to resolve the jump target
simsucc = self.project.factory.default_engine.process(self._initial_state, irsb, force_addr=addr)
if len(simsucc.successors) == 1:
ip = simsucc.successors[0].ip
if ip._model_concrete is not ip:
target_addr = ip._model_concrete.value
obj = self.project.loader.find_object_containing(target_addr, membership_check=False)
if (obj is not None and obj is not self.project.loader.main_object) \
or self.project.is_hooked(target_addr):
# resolved!
# Fill the IndirectJump object
indir_jump.resolved_targets.add(target_addr)
l.debug("Address %#x is resolved as a PLT entry, jumping to %#x", addr, target_addr)
return True
return False
def _indirect_jump_resolved(self, jump: IndirectJump, jump_addr, resolved_by, targets: List[int]):
"""
Called when an indirect jump is successfully resolved.
:param jump: The resolved indirect jump.
:param IndirectJumpResolver resolved_by: The resolver used to resolve this indirect jump.
:param list targets: List of indirect jump targets.
:return: None
"""
source_addr = jump.addr
if jump.jumptable:
# Fill in the jump_tables dict
self.jump_tables[jump.addr] = jump
# occupy the jump table region
if jump.jumptable_addr is not None:
self._seg_list.occupy(jump.jumptable_addr, jump.jumptable_size, "data")
jump.resolved_targets = targets
all_targets = set(targets)
for addr in all_targets:
to_outside = jump.jumpkind == 'Ijk_Call' or addr in self.functions or not self._addrs_belong_to_same_section(jump.addr, addr)
# TODO: get a better estimate of the function address
target_func_addr = jump.func_addr if not to_outside else addr
func_edge = FunctionTransitionEdge(self._nodes[source_addr], addr, jump.func_addr, to_outside=to_outside,
dst_func_addr=target_func_addr
)
job = CFGJob(addr, target_func_addr, jump.jumpkind,
last_addr=source_addr,
src_node=self._nodes[source_addr],
src_ins_addr=jump.ins_addr,
src_stmt_idx=jump.stmt_idx,
func_edges=[func_edge],
)
self._insert_job(job)
self._register_analysis_job(target_func_addr, job)
self._deregister_analysis_job(jump.func_addr, jump)
CFGBase._indirect_jump_resolved(self, jump, jump.addr, resolved_by, targets)
def _indirect_jump_unresolved(self, jump):
"""
Called when we cannot resolve an indirect jump.
:param IndirectJump jump: The unresolved indirect jump.
:return: None
"""
# add a node from this node to UnresolvableJumpTarget or UnresolvalbeCallTarget node,
# depending on its jump kind
src_node = self._nodes[jump.addr]
if jump.jumpkind == 'Ijk_Boring':
unresolvable_target_addr = self._unresolvable_jump_target_addr
simprocedure_name = 'UnresolvableJumpTarget'
elif jump.jumpkind == 'Ijk_Call':
unresolvable_target_addr = self._unresolvable_call_target_addr
simprocedure_name = 'UnresolvableCallTarget'
else:
raise AngrCFGError('It should be impossible')
dst_node = CFGNode(unresolvable_target_addr, 0, self.model,
function_address=unresolvable_target_addr,
simprocedure_name=simprocedure_name,
block_id=unresolvable_target_addr,
)
# add the dst_node to self._nodes
if unresolvable_target_addr not in self._nodes:
self._nodes[unresolvable_target_addr] = dst_node
self._nodes_by_addr[unresolvable_target_addr].append(dst_node)
self._graph_add_edge(dst_node, src_node, jump.jumpkind, jump.ins_addr, jump.stmt_idx)
# mark it as a jumpout site for that function
self._function_add_transition_edge(unresolvable_target_addr, src_node, jump.func_addr,
to_outside=True,
dst_func_addr=unresolvable_target_addr,
ins_addr=jump.ins_addr,
stmt_idx=jump.stmt_idx,
)
self._deregister_analysis_job(jump.func_addr, jump)
CFGBase._indirect_jump_unresolved(self, jump)
# Exception handling
def _preprocess_exception_handlings(self):
self._exception_handling_by_endaddr.clear()
bin_count = 0
for obj in self.project.loader.all_objects:
if isinstance(obj, cle.MetaELF) and hasattr(obj, "exception_handlings") and obj.exception_handlings:
bin_count += 1
for exc in obj.exception_handlings:
if exc.handler_addr is not None and self._inside_regions(exc.handler_addr):
if (exc.start_addr + exc.size) in self._exception_handling_by_endaddr:
l.warning("Multiple exception handlings ending at %#x. Please report it to GitHub.",
exc.start_addr + exc.size)
continue
self._exception_handling_by_endaddr[exc.start_addr + exc.size] = exc
l.info("Loaded %d exception handlings from %d binaries.",
len(self._exception_handling_by_endaddr),
bin_count,
)
# Removers
def _remove_redundant_overlapping_blocks(self):
"""
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
"""
sorted_nodes = sorted(self.graph.nodes(), key=lambda n: n.addr if n is not None else 0)
all_plt_stub_addrs = set(itertools.chain.from_iterable(obj.reverse_plt.keys() for obj in self.project.loader.all_objects if isinstance(obj, cle.MetaELF)))
# go over the list. for each node that is the beginning of a function and is not properly aligned, if its
# leading instruction is a single-byte or multi-byte nop, make sure there is another CFGNode starts after the
# nop instruction
nodes_to_append = {}
# pylint:disable=too-many-nested-blocks
for a in sorted_nodes:
if a.addr in self.functions and a.addr not in all_plt_stub_addrs and \
not self._addr_hooked_or_syscall(a.addr):
all_in_edges = self.graph.in_edges(a, data=True)
if not any([data['jumpkind'] == 'Ijk_Call' for _, _, data in all_in_edges]):
# no one is calling it
# this function might be created from linear sweeping
try:
block = self._lift(a.addr, size=0x10 - (a.addr % 0x10))
except SimTranslationError:
continue
nop_length = None
if self._is_noop_block(self.project.arch, block):
# fast path: in most cases, the entire block is a single byte or multi-byte nop, which VEX
# optimizer is able to tell
nop_length = block.size
else:
# this is not a no-op block. Determine where nop instructions terminate.
insns = block.capstone.insns
if insns:
nop_length = self._get_nop_length(insns)
if nop_length is None or nop_length <= 0:
continue
# leading nop for alignment.
next_node_addr = a.addr + nop_length
if nop_length < a.size and \
not (next_node_addr in self._nodes or next_node_addr in nodes_to_append):
# create a new CFGNode that starts there
next_node_size = a.size - nop_length
next_node = CFGNode(next_node_addr, next_node_size, self.model,
function_address=next_node_addr,
instruction_addrs=[i for i in a.instruction_addrs
if next_node_addr <= i
< next_node_addr + next_node_size
],
thumb=a.thumb,
byte_string=None if a.byte_string is None else a.byte_string[nop_length:],
block_id=next_node_addr,
)
self.graph.add_node(next_node)
# create edges accordingly
all_out_edges = self.graph.out_edges(a, data=True)
for _, dst, data in all_out_edges:
self.graph.add_edge(next_node, dst, **data)
nodes_to_append[next_node_addr] = next_node
# make sure there is a function begins there
try:
snippet = self._to_snippet(addr=next_node_addr, size=next_node_size,
base_state=self._base_state)
self.functions._add_node(next_node_addr, snippet)
except (SimEngineError, SimMemoryError):
continue
# append all new nodes to sorted nodes
if nodes_to_append:
sorted_nodes = sorted(sorted_nodes + list(nodes_to_append.values()), key=lambda n: n.addr if n is not None else 0)
removed_nodes = set()
a = None # it always hold the very recent non-removed node
for i in range(len(sorted_nodes)): # pylint:disable=consider-using-enumerate
if a is None:
a = sorted_nodes[0]
continue
b = sorted_nodes[i]
if self._addr_hooked_or_syscall(b.addr):
continue
if b in removed_nodes:
# skip all removed nodes
continue
if a.addr <= b.addr and \
(a.addr + a.size > b.addr):
# They are overlapping
try:
block = self.project.factory.fresh_block(a.addr, b.addr - a.addr, backup_state=self._base_state)
except SimTranslationError:
a = b
continue
if block.capstone.insns and all([ self._is_noop_insn(insn) for insn in block.capstone.insns ]):
# It's a big nop - no function starts with nop
# add b to indices
self._nodes[b.addr] = b
self._nodes_by_addr[b.addr].append(b)
# shrink a
self._shrink_node(a, b.addr - a.addr, remove_function=False)
a = b
continue
all_functions = self.kb.functions
# now things are a little harder
# if there is no incoming edge to b, we should replace b with a
# this is mostly because we misidentified the function beginning. In fact a is the function beginning,
# but somehow we thought b is the beginning
if a.addr + a.size == b.addr + b.size:
in_edges = len([ _ for _, _, data in self.graph.in_edges([b], data=True) ])
if in_edges == 0:
# we use node a to replace node b
# link all successors of b to a
for _, dst, data in self.graph.out_edges([b], data=True):
self.graph.add_edge(a, dst, **data)
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
# skip b
removed_nodes.add(b)
continue
# next case - if b is directly from function prologue detection, or a basic block that is a successor of
# a wrongly identified basic block, we might be totally misdecoding b
if b.instruction_addrs[0] not in a.instruction_addrs:
# use a, truncate b
new_b_addr = a.addr + a.size # b starts right after a terminates
new_b_size = b.addr + b.size - new_b_addr # this may not be the size we want, since b might be
# misdecoded
# totally remove b
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
removed_nodes.add(b)
if new_b_size > 0:
# there are still some parts left in node b - we don't want to lose it
dummy_job = CFGJob(new_b_addr, a.function_address, None)
self._scan_block(dummy_job)
continue
# for other cases, we'll let them be for now
a = b # update a
def _remove_node(self, node):
"""
Remove a CFGNode from self.graph as well as from the function manager (if it is the beginning of a function)
:param CFGNode node: The CFGNode to remove from the graph.
:return: None
"""
self.graph.remove_node(node)
if node.addr in self._nodes:
del self._nodes[node.addr]
# We wanna remove the function as well
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if node.addr in self.kb.functions.callgraph:
self.kb.functions.callgraph.remove_node(node.addr)
def _shrink_node(self, node, new_size, remove_function=True):
"""
Shrink the size of a node in CFG.
:param CFGNode node: The CFGNode to shrink
:param int new_size: The new size of the basic block
:param bool remove_function: If there is a function starting at `node`, should we remove that function or not.
:return: None
"""
# Generate the new node
new_node = CFGNode(node.addr, new_size, self.model,
function_address=None if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs
if node.addr <= i < node.addr + new_size
],
thumb=node.thumb,
byte_string=None if node.byte_string is None else node.byte_string[:new_size],
block_id=node.addr,
)
old_in_edges = self.graph.in_edges(node, data=True)
for src, _, data in old_in_edges:
self.graph.add_edge(src, new_node, **data)
successor_node_addr = node.addr + new_size
if successor_node_addr in self._nodes:
successor = self._nodes[successor_node_addr]
else:
successor_size = node.size - new_size
successor = CFGNode(successor_node_addr, successor_size, self.model,
function_address=successor_node_addr if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs if i >= node.addr + new_size],
thumb=node.thumb,
byte_string=None if node.byte_string is None else node.byte_string[new_size:]
)
self.graph.add_edge(new_node, successor, jumpkind='Ijk_Boring')
# if the node B already has resolved targets, we will skip all unresolvable successors when adding old out edges
# from node A to node B.
# this matters in cases where node B is resolved as a special indirect jump entry (like a PLT stub), but (node
# A + node B) wasn't properly resolved.
unresolvable_target_addrs = (self._unresolvable_jump_target_addr, self._unresolvable_call_target_addr)
has_resolved_targets = any([ node_.addr not in unresolvable_target_addrs
for node_ in self.graph.successors(successor) ]
)
old_out_edges = self.graph.out_edges(node, data=True)
for _, dst, data in old_out_edges:
if (has_resolved_targets and dst.addr not in unresolvable_target_addrs) or \
not has_resolved_targets:
self.graph.add_edge(successor, dst, **data)
# remove the old node from indices
if node.addr in self._nodes and self._nodes[node.addr] is node:
del self._nodes[node.addr]
if node.addr in self._nodes_by_addr and node in self._nodes_by_addr[node.addr]:
self._nodes_by_addr[node.addr].remove(node)
# remove the old node form the graph
self.graph.remove_node(node)
# add the new node to indices
self._nodes[new_node.addr] = new_node
self._nodes_by_addr[new_node.addr].append(new_node)
# the function starting at this point is probably totally incorrect
# hopefull future call to `make_functions()` will correct everything
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if not remove_function:
# add functions back
self._function_add_node(node, node.addr)
successor_node = self.model.get_any_node(successor_node_addr)
if successor_node and successor_node.function_address == node.addr:
# if there is absolutely no predecessors to successor_node, we'd like to add it as a new function
# so that it will not be left behind
if not list(self.graph.predecessors(successor_node)):
self._function_add_node(successor_node, successor_node_addr)
#if node.addr in self.kb.functions.callgraph:
# self.kb.functions.callgraph.remove_node(node.addr)
def _analyze_all_function_features(self, all_funcs_completed=False):
"""
Iteratively analyze all changed functions, update their returning attribute, until a fix-point is reached (i.e.
no new returning/not-returning functions are found).
:return: None
"""
while True:
new_changes = self._iteratively_analyze_function_features(all_funcs_completed=all_funcs_completed)
new_returning_functions = new_changes['functions_return']
new_not_returning_functions = new_changes['functions_do_not_return']
if not new_returning_functions and not new_not_returning_functions:
break
for returning_function in new_returning_functions:
self._pending_jobs.add_returning_function(returning_function.addr)
if returning_function.addr in self._function_returns:
for fr in self._function_returns[returning_function.addr]:
# Confirm them all
if not self.kb.functions.contains_addr(fr.caller_func_addr):
# FIXME: A potential bug might arise here. After post processing (phase 2), if the function
# specified by fr.caller_func_addr has been merged to another function during phase 2, we
# will simply skip this FunctionReturn here. It might lead to unconfirmed fake_ret edges
# in the newly merged function. Fix this bug in the future when it becomes an issue.
continue
if self.kb.functions.get_by_addr(fr.caller_func_addr).returning is not True:
self._updated_nonreturning_functions.add(fr.caller_func_addr)
return_to_node = self._nodes.get(fr.return_to, None)
if return_to_node is None:
return_to_snippet = self._to_snippet(addr=fr.return_to, base_state=self._base_state)
else:
return_to_snippet = self._to_snippet(cfg_node=self._nodes[fr.return_to])
self.kb.functions._add_return_from_call(fr.caller_func_addr, fr.callee_func_addr,
return_to_snippet)
del self._function_returns[returning_function.addr]
for nonreturning_function in new_not_returning_functions:
self._pending_jobs.add_nonreturning_function(nonreturning_function.addr)
if nonreturning_function.addr in self._function_returns:
for fr in self._function_returns[nonreturning_function.addr]:
# Remove all pending FakeRet edges
if self.kb.functions.contains_addr(fr.caller_func_addr) and \
self.kb.functions.get_by_addr(fr.caller_func_addr).returning is not True:
self._updated_nonreturning_functions.add(fr.caller_func_addr)
del self._function_returns[nonreturning_function.addr]
def _pop_pending_job(self, returning=True):
return self._pending_jobs.pop_job(returning=returning)
def _clean_pending_exits(self):
self._pending_jobs.cleanup()
#
# Graph utils
#
def _graph_add_edge(self, cfg_node, src_node, src_jumpkind, src_ins_addr, src_stmt_idx):
"""
Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int or str src_stmt_idx: source statements ID
:return: None
"""
if src_node is None:
self.graph.add_node(cfg_node)
else:
self.graph.add_edge(src_node, cfg_node, jumpkind=src_jumpkind, ins_addr=src_ins_addr,
stmt_idx=src_stmt_idx)
@staticmethod
def _get_return_endpoints(func):
all_endpoints = func.endpoints_with_type
return all_endpoints.get('return', [])
def _get_jumpout_targets(self, func):
jumpout_targets = set()
callgraph_outedges = self.functions.callgraph.out_edges(func.addr, data=True)
# find the ones whose type is transition
for _, dst, data in callgraph_outedges:
if data.get('type', None) == 'transition':
jumpout_targets.add(dst)
return jumpout_targets
def _get_return_sources(self, func):
# We will create a return edge for each returning point of this function
# Get all endpoints
all_endpoints = func.endpoints_with_type
# However, we do not want to create return edge if the endpoint is not a returning endpoint.
# For example, a PLT stub on x86/x64 always jump to the real library function, so we should create a return
# edge from that library function to the call site, instead of creating a return edge from the PLT stub to
# the call site.
if all_endpoints['transition']:
# it has jump outs
# it is, for example, a PLT stub
# we take the endpoints of the function it calls. this is not always correct, but it can handle many
# cases.
jumpout_targets = self._get_jumpout_targets(func)
jumpout_target_endpoints = set()
for jumpout_func_addr in jumpout_targets:
if jumpout_func_addr in self.functions:
jumpout_target_endpoints |= set(self._get_return_endpoints(self.functions[jumpout_func_addr]))
endpoints = jumpout_target_endpoints
else:
endpoints = set()
# then we take all return endpoints of the current function
endpoints |= all_endpoints.get('return', set())
return endpoints
def _get_tail_caller(self, tailnode, seen):
"""
recursively search predecessors for the actual caller
for a tailnode that we will return to
:return: list of callers for a possible tailnode
"""
if tailnode.addr in seen:
return []
seen.add(tailnode.addr)
callers = self.model.get_predecessors(tailnode, jumpkind='Ijk_Call')
direct_jumpers = self.model.get_predecessors(tailnode, jumpkind='Ijk_Boring')
jump_callers = []
for jn in direct_jumpers:
jf = self.model.get_any_node(jn.function_address)
if jf is not None:
jump_callers.extend(self._get_tail_caller(jf, seen))
callers.extend(jump_callers)
return callers
def _make_return_edges(self):
"""
For each returning function, create return edges in self.graph.
:return: None
"""
for func_addr, func in self.functions.items():
if func.returning is False:
continue
# get the node on CFG
if func.startpoint is None:
l.warning('Function %#x does not have a startpoint (yet).', func_addr)
continue
startpoint = self.model.get_any_node(func.startpoint.addr)
if startpoint is None:
# weird...
l.warning('No CFGNode is found for function %#x in _make_return_edges().', func_addr)
continue
endpoints = self._get_return_sources(func)
# get all callers
callers = self.model.get_predecessors(startpoint, jumpkind='Ijk_Call')
# handle callers for tailcall optimizations if flag is enabled
if self._detect_tail_calls and startpoint.addr in self._tail_calls:
l.debug("Handling return address for tail call for func %x", func_addr)
seen = set()
tail_callers = self._get_tail_caller(startpoint, seen)
callers.extend(tail_callers)
# for each caller, since they all end with a call instruction, get the immediate successor
return_targets = itertools.chain.from_iterable(
self.model.get_successors(caller, excluding_fakeret=False, jumpkind='Ijk_FakeRet') for caller in callers
)
return_targets = set(return_targets)
for ep in endpoints:
src = self.model.get_any_node(ep.addr)
for rt in return_targets:
if not src.instruction_addrs:
ins_addr = None
else:
if self.project.arch.branch_delay_slot:
if len(src.instruction_addrs) > 1:
ins_addr = src.instruction_addrs[-2]
else:
l.error('At %s: expecting more than one instruction. Only got one.', src)
ins_addr = None
else:
ins_addr = src.instruction_addrs[-1]
self._graph_add_edge(rt, src, 'Ijk_Ret', ins_addr, DEFAULT_STATEMENT)
#
# Function utils
#
def _function_add_node(self, cfg_node, function_addr):
"""
Adds node to function manager, converting address to CodeNode if
possible
:param CFGNode cfg_node: A CFGNode instance.
:param int function_addr: Address of the current function.
:return: None
"""
snippet = self._to_snippet(cfg_node=cfg_node)
self.kb.functions._add_node(function_addr, snippet)
def _function_add_transition_edge(self, dst_addr, src_node, src_func_addr, to_outside=False, dst_func_addr=None,
stmt_idx=None, ins_addr=None, is_exception=False):
"""
Add a transition edge to the function transiton map.
:param int dst_addr: Address that the control flow transits to.
:param CFGNode src_node: The source node that the control flow transits from.
:param int src_func_addr: Function address.
:return: True if the edge is correctly added. False if any exception occurred (for example, the target address
does not exist)
:rtype: bool
"""
try:
target_node = self._nodes.get(dst_addr, None)
if target_node is None:
target_snippet = self._to_snippet(addr=dst_addr, base_state=self._base_state)
else:
target_snippet = self._to_snippet(cfg_node=target_node)
if src_node is None:
# Add this basic block into the function manager
self.kb.functions._add_node(src_func_addr, target_snippet)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
if not to_outside:
self.kb.functions._add_transition_to(src_func_addr, src_snippet, target_snippet, stmt_idx=stmt_idx,
ins_addr=ins_addr, is_exception=is_exception
)
else:
self.kb.functions._add_outside_transition_to(src_func_addr, src_snippet, target_snippet,
to_function_addr=dst_func_addr,
stmt_idx=stmt_idx, ins_addr=ins_addr,
is_exception=is_exception,
)
return True
except (SimMemoryError, SimEngineError):
return False
def _function_add_call_edge(self, addr, src_node, function_addr, syscall=False, stmt_idx=None, ins_addr=None):
"""
Add a call edge to the function transition map.
:param int addr: Address that is being called (callee).
:param CFGNode src_node: The source CFG node (caller).
:param int ret_addr: Address that returns to (in case the function returns).
:param int function_addr: Function address..
:param bool syscall: If this is a call to a syscall or not.
:param int or str stmt_idx: Statement ID of this call.
:param int or None ins_addr: Instruction address of this call.
:return: True if the edge is added. False if any exception occurred.
:rtype: bool
"""
try:
if src_node is None:
self.kb.functions._add_node(function_addr, addr, syscall=syscall)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
return_to_outside = False
ret_snippet = None
self.kb.functions._add_call_to(function_addr, src_snippet, addr, ret_snippet, syscall=syscall,
stmt_idx=stmt_idx, ins_addr=ins_addr,
return_to_outside=return_to_outside,
)
return True
except (SimMemoryError, SimEngineError):
return False
def _function_add_fakeret_edge(self, addr, src_node, src_func_addr, confirmed=None):
"""
Generate CodeNodes for target and source, if no source node add node
for function, otherwise creates fake return to in function manager
:param int addr: target address
:param angr.analyses.CFGNode src_node: source node
:param int src_func_addr: address of function
:param confirmed: used as attribute on eventual digraph
:return: None
"""
target_node = self._nodes.get(addr, None)
if target_node is None:
target_snippet = self._to_snippet(addr=addr, base_state=self._base_state)
else:
target_snippet = self._to_snippet(cfg_node=target_node)
if src_node is None:
self.kb.functions._add_node(src_func_addr, target_snippet)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
self.kb.functions._add_fakeret_to(src_func_addr, src_snippet, target_snippet, confirmed=confirmed)
def _function_add_return_site(self, addr, function_addr):
"""
Generate CodeNodes for target address, registers node for function to
function manager as return site
:param int addr: target address
:param int function_addr: address of function
:return: None
"""
try:
target = self._to_snippet(self._nodes[addr])
except KeyError:
target = addr
self.kb.functions._add_return_from(function_addr, target)
def _function_add_return_edge(self, return_from_addr, return_to_addr, function_addr):
"""
Generate CodeNodes for return_to_addr, add this node for function to
function manager generating new edge
:param int return_from_addr: target address
:param int return_to_addr: target address
:param int function_addr: address of function
:return: None
"""
return_to_node = self._nodes.get(return_to_addr, None)
if return_to_node is None:
return_to_snippet = self._to_snippet(addr=return_to_addr, base_state=self._base_state)
to_outside = False
else:
return_to_snippet = self._to_snippet(cfg_node=return_to_node)
to_outside = return_to_node.function_address != function_addr
self.kb.functions._add_return_from_call(function_addr, return_from_addr, return_to_snippet,
to_outside=to_outside)
#
# Architecture-specific methods
#
def _arm_track_lr_on_stack(self, addr, irsb, function):
"""
At the beginning of the basic block, we check if the first instruction stores the LR register onto the stack.
If it does, we calculate the offset of that store, and record the offset in function.info.
For instance, here is the disassembly of a THUMB mode function:
000007E4 STR.W LR, [SP,#var_4]!
000007E8 MOV R2, R1
000007EA SUB SP, SP, #0xC
000007EC MOVS R1, #0
...
00000800 ADD SP, SP, #0xC
00000802 LDR.W PC, [SP+4+var_4],#4
The very last basic block has a jumpkind of Ijk_Boring, which is because VEX cannot do such complicated analysis
to determine the real jumpkind.
As we can see, instruction 7e4h stores LR at [sp-4], and at the end of this function, instruction 802 loads LR
from [sp], then increments sp by 4. We execute the first instruction, and track the following things:
- if the value from register LR is stored onto the stack.
- the difference between the offset of the LR store on stack, and the SP after the store.
If at the end of the function, the LR is read out from the stack at the exact same stack offset, we will change
the jumpkind of the final IRSB to Ijk_Ret.
This method can be enabled by setting "ret_jumpkind_heuristics", which is an architecture-specific option on
ARM, to True.
:param int addr: Address of the basic block.
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
"""
if irsb.statements is None:
return
if 'lr_saved_on_stack' in function.info:
return
# if it does, we log it down to the Function object.
lr_offset = self.project.arch.registers['lr'][0]
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
initial_lr = 0xabcdef
tmps = {}
# pylint:disable=too-many-nested-blocks
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
if stmt.addr + stmt.delta != addr:
break
elif isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get):
if data.offset == sp_offset:
tmps[stmt.tmp] = initial_sp
elif data.offset == lr_offset:
tmps[stmt.tmp] = initial_lr
elif isinstance(data, pyvex.IRExpr.Binop):
if data.op == 'Iop_Sub32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] - arg1.con.value
elif isinstance(stmt, (pyvex.IRStmt.Store, pyvex.IRStmt.StoreG)):
data = stmt.data
storing_lr = False
if isinstance(data, pyvex.IRExpr.RdTmp):
if data.tmp in tmps:
val = tmps[data.tmp]
if val == initial_lr:
# we are storing LR to somewhere
storing_lr = True
if storing_lr:
if isinstance(stmt.addr, pyvex.IRExpr.RdTmp):
if stmt.addr.tmp in tmps:
storing_addr = tmps[stmt.addr.tmp]
function.info['lr_saved_on_stack'] = True
function.info['lr_on_stack_offset'] = storing_addr - initial_sp
break
if 'lr_saved_on_stack' not in function.info:
function.info['lr_saved_on_stack'] = False
def _arm_track_read_lr_from_stack(self, irsb, function): # pylint:disable=unused-argument
"""
At the end of a basic block, simulate the very last instruction to see if the return address is read from the
stack and written in PC. If so, the jumpkind of this IRSB will be set to Ijk_Ret. For detailed explanations,
please see the documentation of _arm_track_lr_on_stack().
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
"""
if 'lr_saved_on_stack' not in function.info or not function.info['lr_saved_on_stack']:
return
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
last_sp = None
tmps = {}
tmp_irsb = self._lift(irsb.instruction_addresses[-1]).vex
# pylint:disable=too-many-nested-blocks
for stmt in tmp_irsb.statements:
if isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get) and data.offset == sp_offset:
# t0 = GET:I32(sp)
tmps[stmt.tmp] = initial_sp
elif isinstance(data, pyvex.IRExpr.Binop):
# only support Add
if data.op == 'Iop_Add32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] + arg1.con.value
elif isinstance(data, pyvex.IRExpr.Load):
if isinstance(data.addr, pyvex.IRExpr.RdTmp):
if data.addr.tmp in tmps:
tmps[stmt.tmp] = ('load', tmps[data.addr.tmp])
elif isinstance(stmt, pyvex.IRStmt.Put):
if stmt.offset == sp_offset and isinstance(stmt.data, pyvex.IRExpr.RdTmp):
if stmt.data.tmp in tmps:
# loading things into sp
last_sp = tmps[stmt.data.tmp]
if last_sp is not None and isinstance(tmp_irsb.next, pyvex.IRExpr.RdTmp):
val = tmps.get(tmp_irsb.next.tmp, None)
# val being None means there are statements that we do not handle
if isinstance(val, tuple) and val[0] == 'load':
# the value comes from memory
memory_addr = val[1]
if isinstance(last_sp, int):
lr_on_stack_offset = memory_addr - last_sp
else:
lr_on_stack_offset = memory_addr - last_sp[1]
if lr_on_stack_offset == function.info['lr_on_stack_offset']:
# the jumpkind should be Ret instead of boring
irsb.jumpkind = 'Ijk_Ret'
#
# Other methods
#
def _generate_cfgnode(self, cfg_job, current_function_addr):
"""
Generate a CFGNode that starts at `cfg_job.addr`.
Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes.
If the current architecture is ARM, this method will try to lift the block in the mode specified by the address
(determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try
the other mode. If the basic block is successfully decoded in the other mode (different from the initial one),
`addr` and `current_function_addr` are updated.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_function_addr: Address of the current function.
:return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object)
:rtype: tuple
"""
addr = cfg_job.addr
try:
if addr in self._nodes:
cfg_node = self._nodes[addr]
irsb = cfg_node.irsb
if cfg_node.function_address != current_function_addr:
# the node has been assigned to another function before.
# we should update the function address.
current_function_addr = cfg_node.function_address
return addr, current_function_addr, cfg_node, irsb
is_x86_x64_arch = self.project.arch.name in ('X86', 'AMD64')
if is_arm_arch(self.project.arch):
real_addr = addr & (~1)
else:
real_addr = addr
# extra check for ARM
if is_arm_arch(self.project.arch) and self._seg_list.occupied_by_sort(addr) == "code":
existing_node = self.get_any_node(addr, anyaddr=True)
if existing_node is not None and (addr & 1) != (existing_node.addr & 1):
# we are trying to break an existing ARM node with a THUMB node, or vice versa
# this is probably because our current node is unexpected
return None, None, None, None
distance = VEX_IRSB_MAX_SIZE
# if there is exception handling code, check the distance between `addr` and the cloest ending address
if self._exception_handling_by_endaddr:
next_end = next(self._exception_handling_by_endaddr.irange(minimum=real_addr), None)
if next_end is not None:
distance = min(distance, next_end - real_addr)
# if possible, check the distance between `addr` and the end of this section
obj = self.project.loader.find_object_containing(addr, membership_check=False)
if obj:
# is there a section?
has_executable_section = self._object_has_executable_sections(obj)
section = obj.find_section_containing(addr)
if has_executable_section and section is None:
# the basic block should not exist here...
return None, None, None, None
if section is not None:
if not section.is_executable:
# the section is not executable...
return None, None, None, None
distance_ = section.vaddr + section.memsize - real_addr
distance = min(distance_, VEX_IRSB_MAX_SIZE)
# TODO: handle segment information as well
# also check the distance between `addr` and the closest function.
# we don't want to have a basic block that spans across function boundaries
next_func = self.functions.ceiling_func(addr + 1)
if next_func is not None:
distance_to_func = (next_func.addr & (~1) if is_arm_arch(self.project.arch) else next_func.addr) - real_addr
if distance_to_func != 0:
if distance is None:
distance = distance_to_func
else:
distance = min(distance, distance_to_func)
# in the end, check the distance between `addr` and the closest occupied region in segment list
next_noncode_addr = self._seg_list.next_pos_with_sort_not_in(addr, { "code" }, max_distance=distance)
if next_noncode_addr is not None:
distance_to_noncode_addr = next_noncode_addr - real_addr
distance = min(distance, distance_to_noncode_addr)
# Let's try to create the pyvex IRSB directly, since it's much faster
nodecode = False
irsb = None
irsb_string = None
lifted_block = None
try:
lifted_block = self._lift(addr, size=distance, collect_data_refs=True, strict_block_end=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode') and \
is_arm_arch(self.project.arch) and \
self._arch_options.switch_mode_on_nodecode:
# maybe the current mode is wrong?
nodecode = False
if addr % 2 == 0:
addr_0 = addr + 1
else:
addr_0 = addr - 1
if addr_0 in self._nodes:
# it has been analyzed before
cfg_node = self._nodes[addr_0]
irsb = cfg_node.irsb
return addr_0, cfg_node.function_address, cfg_node, irsb
try:
lifted_block = self._lift(addr_0, size=distance, collect_data_refs=True, strict_block_end=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if not (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode'):
# it is decodeable
if current_function_addr == addr:
current_function_addr = addr_0
addr = addr_0
if nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode':
# decoding error
# is the current location already occupied and marked as non-code?
# it happens in cases like the following:
#
# BL a_nonreturning_func (but we don't know it does not return)
# alignment (mov r8, r8)
# data_ref_0:
# DCD "type found!"
#
occupied_sort = self._seg_list.occupied_by_sort(real_addr)
if occupied_sort and occupied_sort != "code":
# no wonder we cannot decode it
return None, None, None, None
# we still occupy that location since it cannot be decoded anyways
if irsb is None:
irsb_size = 0
else:
irsb_size = irsb.size
# special handling for ud, ud1, and ud2 on x86 and x86-64
if irsb_string[-2:] == b'\x0f\x0b' and self.project.arch.name == 'AMD64':
# VEX supports ud2 and make it part of the block size, only in AMD64.
valid_ins = True
nodecode_size = 0
elif lifted_block is not None \
and is_x86_x64_arch \
and len(lifted_block.bytes) - irsb_size > 2 \
and lifted_block.bytes[irsb_size : irsb_size + 2] in {
b'\x0f\xff', # ud0
b'\x0f\xb9', # ud1
b'\x0f\x0b', # ud2
}:
# ud0, ud1, and ud2 are actually valid instructions.
valid_ins = True
# VEX does not support ud0 or ud1 or ud2 under AMD64. they are not part of the block size.
nodecode_size = 2
else:
valid_ins = False
nodecode_size = 1
self._seg_list.occupy(addr, irsb_size, 'code')
self._seg_list.occupy(addr + irsb_size, nodecode_size, 'nodecode')
if not valid_ins:
l.error("Decoding error occurred at address %#x of function %#x.",
addr + irsb_size,
current_function_addr
)
return None, None, None, None
is_thumb = False
# Occupy the block in segment list
if irsb.size > 0:
if is_arm_arch(self.project.arch) and addr % 2 == 1:
# thumb mode
is_thumb=True
self._seg_list.occupy(real_addr, irsb.size, "code")
# Create a CFG node, and add it to the graph
cfg_node = CFGNode(addr, irsb.size, self.model,
function_address=current_function_addr,
block_id=addr,
irsb=irsb,
thumb=is_thumb,
byte_string=irsb_string,
)
if self._cfb is not None:
self._cfb.add_obj(addr, lifted_block)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
return addr, current_function_addr, cfg_node, irsb
except (SimMemoryError, SimEngineError):
return None, None, None, None
def _process_block_arch_specific(self, addr: int, irsb: pyvex.IRSB, func_addr: int,
caller_gp: Optional[int]=None) -> None: # pylint: disable=unused-argument
"""
According to arch types ['ARMEL', 'ARMHF', 'MIPS32'] does different
fixes
For ARM deals with link register on the stack
(see _arm_track_lr_on_stack)
For MIPS32 simulates a new state where the global pointer is 0xffffffff
from current address after three steps if the first successor does not
adjust this value updates this function address (in function manager)
to use a conrete global pointer
:param addr: irsb address
:param irsb: irsb
:param func_addr: function address
:param caller_gp: The gp register value that the caller function has. MIPS-specific.
"""
if is_arm_arch(self.project.arch):
if self._arch_options.ret_jumpkind_heuristics:
if addr == func_addr:
self._arm_track_lr_on_stack(addr, irsb, self.functions[func_addr])
elif 'lr_saved_on_stack' in self.functions[func_addr].info and \
self.functions[func_addr].info['lr_saved_on_stack'] and \
irsb.jumpkind == 'Ijk_Boring' and \
irsb.next is not None and \
isinstance(irsb.next, pyvex.IRExpr.RdTmp):
# do a bunch of checks to avoid unnecessary simulation from happening
self._arm_track_read_lr_from_stack(irsb, self.functions[func_addr])
elif self.project.arch.name in {"MIPS32", "MIPS64"}:
func = self.kb.functions.function(func_addr)
if 'gp' not in func.info and addr >= func_addr and addr - func_addr < 15 * 4:
gp_value = self._mips_determine_function_gp(addr, irsb, func_addr)
if gp_value is not None and self._gp_value is None:
self._gp_value = gp_value
if gp_value is None:
gp_value = caller_gp # fallback
if gp_value is None:
gp_value = self._gp_value # fallback to a previously found value
func.info['gp'] = gp_value
def _mips_determine_function_gp(self, addr: int, irsb: pyvex.IRSB, func_addr: int) -> Optional[int]:
# check if gp is being written to
last_gp_setting_insn_id = None
insn_ctr = 0
if not irsb.statements:
# Get an IRSB with statements
irsb = self.project.factory.block(irsb.addr, size=irsb.size, opt_level=1, cross_insn_opt=False).vex
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
insn_ctr += 1
if insn_ctr >= 10:
break
elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == self.project.arch.registers['gp'][0]:
last_gp_setting_insn_id = insn_ctr
if last_gp_setting_insn_id is None:
return None
# Prudently search for $gp values
state = self.project.factory.blank_state(addr=addr, mode="fastpath", remove_options=o.refs,
add_options={o.NO_CROSS_INSN_OPT},
)
state.regs._t9 = func_addr
state.regs._gp = 0xffffffff
try:
succ = self.project.factory.successors(state, num_inst=last_gp_setting_insn_id + 1)
except SimIRSBNoDecodeError:
# if last_gp_setting_insn_id is the last instruction, a SimIRSBNoDecodeError will be raised since
# there is no instruction left in the current block
return None
if not succ.flat_successors:
return None
state = succ.flat_successors[0]
gp = state.regs._gp
if not gp.symbolic and state.solver.is_false(gp == 0xffffffff):
return gp._model_concrete.value
return None
def _find_thunks(self):
if self.project.arch.name not in self.SPECIAL_THUNKS:
return {}
result = {}
for code, meaning in self.SPECIAL_THUNKS[self.project.arch.name].items():
for addr in self.project.loader.memory.find(code):
if self._addr_in_exec_memory_regions(addr):
result[addr] = meaning
return result
def _lift(self, addr, *args, opt_level=1, cross_insn_opt=False, **kwargs): # pylint:disable=arguments-differ
kwargs['extra_stop_points'] = set(self._known_thunks)
if self._use_patches:
# let's see if there is a patch at this location
all_patches = self.kb.patches.get_all_patches(addr, VEX_IRSB_MAX_SIZE)
if all_patches:
# Use bytes from patches instead
offset = addr
byte_string = b""
for p in all_patches:
if offset < p.addr:
byte_string += self._fast_memory_load_bytes(offset, p.addr - offset)
offset = p.addr
assert p.addr <= offset < p.addr + len(p)
byte_string += p.new_bytes[offset - p.addr: min(VEX_IRSB_MAX_SIZE - (offset-addr), p.addr + len(p) - offset)]
offset = p.addr + len(p)
kwargs['byte_string'] = byte_string
return super(CFGFast, self)._lift(addr, *args, opt_level=opt_level, cross_insn_opt=cross_insn_opt, **kwargs)
#
# Public methods
#
def copy(self):
n = CFGFast.__new__(CFGFast)
for attr, value in self.__dict__.items():
if attr.startswith('__') and attr.endswith('__'):
continue
setattr(n, attr, value)
n._exec_mem_regions = self._exec_mem_regions[::]
n._seg_list = self._seg_list.copy()
n._function_addresses_from_symbols = self._function_addresses_from_symbols.copy()
n._model = self._model.copy()
return n
def output(self):
s = "%s" % self._graph.edges(data=True)
return s
@deprecated(replacement="angr.analyses.CFB")
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size))
lst = sorted(lst, key=lambda x: x[0])
return lst
from angr.analyses import AnalysesHub
AnalysesHub.register_default('CFGFast', CFGFast)
| 43.729375
| 162
| 0.577851
|
c0887dfd30009a5c85bae8707571a8f5a25464be
| 170
|
py
|
Python
|
scraper.py
|
hackcouver/python-workshop
|
2cac35e87b877345141ac00953ca876fde019e69
|
[
"MIT"
] | null | null | null |
scraper.py
|
hackcouver/python-workshop
|
2cac35e87b877345141ac00953ca876fde019e69
|
[
"MIT"
] | null | null | null |
scraper.py
|
hackcouver/python-workshop
|
2cac35e87b877345141ac00953ca876fde019e69
|
[
"MIT"
] | 1
|
2020-06-13T23:58:12.000Z
|
2020-06-13T23:58:12.000Z
|
import twint
c = twint.Config()
c.Username = "realDonaldTrump" # switch to whichever Twitter account you would like
c.Output = "data/trump.txt"
s = twint.run.Search(c)
| 24.285714
| 84
| 0.735294
|
efef888fb64360962d6e90e0b67ea3a37e6824ea
| 1,384
|
py
|
Python
|
python/tink/core/_tink_error.py
|
hazaelsan/tink
|
9be3f690611bfab85284624de46ad6f36119d8bb
|
[
"Apache-2.0"
] | 1
|
2019-01-08T16:38:47.000Z
|
2019-01-08T16:38:47.000Z
|
python/tink/core/_tink_error.py
|
bhaskatripathi/tink
|
841802758ae6fadb6bcb60e9053fb4c169549f29
|
[
"Apache-2.0"
] | 1
|
2020-08-18T16:42:11.000Z
|
2020-08-25T16:13:11.000Z
|
python/tink/core/_tink_error.py
|
bhaskatripathi/tink
|
841802758ae6fadb6bcb60e9053fb4c169549f29
|
[
"Apache-2.0"
] | 1
|
2020-11-30T06:38:02.000Z
|
2020-11-30T06:38:02.000Z
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines basic exceptions in Tink."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tink.cc.pybind import tink_bindings
KNOWN_STATUS_NOT_OK_TYPES = (tink_bindings.StatusNotOk,)
def register_status_not_ok_type(status_not_ok_type):
global KNOWN_STATUS_NOT_OK_TYPES
if status_not_ok_type not in KNOWN_STATUS_NOT_OK_TYPES:
assert issubclass(status_not_ok_type, Exception)
KNOWN_STATUS_NOT_OK_TYPES += (status_not_ok_type,)
def use_tink_errors(func):
"""Transforms StatusNotOk errors into TinkErrors."""
def wrapper(*args):
try:
return func(*args)
except KNOWN_STATUS_NOT_OK_TYPES as e:
raise TinkError(e)
return wrapper
class TinkError(Exception):
"""Common exception in Tink."""
| 30.755556
| 74
| 0.774566
|
dbd3110622015b851f69746366c2dbd6783d2cff
| 7,027
|
py
|
Python
|
aae/auto_pose/eval/ae_multi_reconst.py
|
shbe-aau/multi-pose-estimation
|
0425ed9dcc7969f0281cb435615abc33c640e157
|
[
"MIT"
] | 4
|
2021-12-28T09:25:06.000Z
|
2022-01-13T12:55:44.000Z
|
aae/auto_pose/eval/ae_multi_reconst.py
|
shbe-aau/multi-view-pose-estimation
|
22cea6cd09684fe655fb2214bc14856f589048e1
|
[
"MIT"
] | null | null | null |
aae/auto_pose/eval/ae_multi_reconst.py
|
shbe-aau/multi-view-pose-estimation
|
22cea6cd09684fe655fb2214bc14856f589048e1
|
[
"MIT"
] | 1
|
2022-01-13T13:00:15.000Z
|
2022-01-13T13:00:15.000Z
|
# -*- coding: utf-8 -*-
import os
import configparser
import argparse
import numpy as np
import signal
import shutil
import cv2
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import progressbar
import tensorflow as tf
from auto_pose.ae import ae_factory as factory
from auto_pose.ae import utils as u
from auto_pose.eval import eval_plots
def main():
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
if workspace_path == None:
print('Please define a workspace path:\n')
print('export AE_WORKSPACE_PATH=/path/to/workspace\n')
exit(-1)
gentle_stop = np.array((1,), dtype=np.bool)
gentle_stop[0] = False
def on_ctrl_c(signal, frame):
gentle_stop[0] = True
signal.signal(signal.SIGINT, on_ctrl_c)
parser = argparse.ArgumentParser()
parser.add_argument("experiment_name")
parser.add_argument("-d", action='store_true', default=False)
parser.add_argument("-gen", action='store_true', default=False)
parser.add_argument("-vis_emb", action='store_true', default=False)
parser.add_argument('--at_step', default=None, type=int, required=False)
arguments = parser.parse_args()
full_name = arguments.experiment_name.split('/')
experiment_name = full_name.pop()
experiment_group = full_name.pop() if len(full_name) > 0 else ''
debug_mode = arguments.d
generate_data = arguments.gen
at_step = arguments.at_step
cfg_file_path = u.get_config_file_path(workspace_path, experiment_name, experiment_group)
log_dir = u.get_log_dir(workspace_path, experiment_name, experiment_group)
checkpoint_file = u.get_checkpoint_basefilename(log_dir)
ckpt_dir = u.get_checkpoint_dir(log_dir)
train_fig_dir = u.get_train_fig_dir(log_dir)
dataset_path = u.get_dataset_path(workspace_path)
if not os.path.exists(cfg_file_path):
print('Could not find config file:\n')
print('{}\n'.format(cfg_file_path))
exit(-1)
args = configparser.ConfigParser()
args.read(cfg_file_path)
num_iter = args.getint('Training', 'NUM_ITER') if not debug_mode else np.iinfo(np.int32).max
save_interval = args.getint('Training', 'SAVE_INTERVAL')
num_gpus = 1
model_type = args.get('Dataset', 'MODEL')
with tf.variable_scope(experiment_name, reuse=tf.AUTO_REUSE):
dataset = factory.build_dataset(dataset_path, args)
multi_queue = factory.build_multi_queue(dataset, args)
dev_splits = np.array_split(np.arange(24), num_gpus)
iterator = multi_queue.create_iterator(dataset_path, args)
all_object_views = tf.concat([inp[0] for inp in multi_queue.next_element],0)
bs = multi_queue._batch_size
encoding_splits = []
for dev in range(num_gpus):
with tf.device('/device:GPU:%s' % dev):
encoder = factory.build_encoder(all_object_views[dev_splits[dev][0]*bs:(dev_splits[dev][-1]+1)*bs], args, is_training=False)
encoding_splits.append(tf.split(encoder.z, len(dev_splits[dev]),0))
with tf.variable_scope(experiment_name):
decoders = []
for dev in range(num_gpus):
with tf.device('/device:GPU:%s' % dev):
for j,i in enumerate(dev_splits[dev]):
decoders.append(factory.build_decoder(multi_queue.next_element[i], encoding_splits[dev][j], args, is_training=False, idx=i))
ae = factory.build_ae(encoder, decoders, args)
codebook = factory.build_codebook(encoder, dataset, args)
train_op = factory.build_train_op(ae, args)
saver = tf.train.Saver(save_relative_paths=True)
dataset.load_bg_images(dataset_path)
multi_queue.create_tfrecord_training_images(dataset_path, args)
widgets = ['Training: ', progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.Counter(), ' / %s' % num_iter,
' ', progressbar.ETA(), ' ']
bar = progressbar.ProgressBar(maxval=num_iter,widgets=widgets)
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction = 0.9)
config = tf.ConfigProto(gpu_options=gpu_options,log_device_placement=True,allow_soft_placement=True)
with tf.Session(config=config) as sess:
sess.run(multi_queue.bg_img_init.initializer)
sess.run(iterator.initializer)
chkpt = tf.train.get_checkpoint_state(ckpt_dir)
if chkpt and chkpt.model_checkpoint_path:
if at_step is None:
checkpoint_file_basename = u.get_checkpoint_basefilename(log_dir,latest=args.getint('Training', 'NUM_ITER'))
else:
checkpoint_file_basename = u.get_checkpoint_basefilename(log_dir,latest=at_step)
print('loading ', checkpoint_file_basename)
saver.restore(sess, checkpoint_file_basename)
else:
if encoder._pre_trained_model != 'False':
encoder.saver.restore(sess, encoder._pre_trained_model)
all_vars = set([var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)])
var_list = all_vars.symmetric_difference([v[1] for v in list(encoder.fil_var_list.items())])
sess.run(tf.variables_initializer(var_list))
print(sess.run(tf.report_uninitialized_variables()))
else:
sess.run(tf.global_variables_initializer())
if not debug_mode:
print('Training with %s model' % args.get('Dataset','MODEL'), os.path.basename(args.get('Paths','MODEL_PATH')))
bar.start()
while True:
this,_,reconstr_train,enc_z = sess.run([multi_queue.next_element,multi_queue.next_bg_element,[decoder.x for decoder in decoders], encoder.z])
this_x = np.concatenate([el[0] for el in this])
this_y = np.concatenate([el[2] for el in this])
print(this_x.shape)
reconstr_train = np.concatenate(reconstr_train)
print(this_x.shape)
cv2.imshow('sample batch', np.hstack(( u.tiles(this_x, 4, 6), u.tiles(reconstr_train, 4,6),u.tiles(this_y, 4, 6))) )
k = cv2.waitKey(0)
idx = np.random.randint(0,24)
this_y = np.repeat(this_y[idx:idx+1, :, :], 24, axis=0)
reconstr_train = sess.run([decoder.x for decoder in decoders],feed_dict={encoder._input:this_y})
reconstr_train = np.array(reconstr_train)
print(reconstr_train.shape)
reconstr_train = reconstr_train.squeeze()
cv2.imshow('sample batch 2', np.hstack((u.tiles(this_y, 4, 6), u.tiles(reconstr_train, 4, 6))))
k = cv2.waitKey(0)
if k == 27:
break
if gentle_stop[0]:
break
if not debug_mode:
bar.finish()
if not gentle_stop[0] and not debug_mode:
print('To create the embedding run:\n')
print('ae_embed {}\n'.format(full_name))
if __name__ == '__main__':
main()
| 39.926136
| 154
| 0.655045
|
b28d9f511993ae23dee8a12ad4023cc11cac137a
| 6,475
|
py
|
Python
|
model/net.py
|
Tianweidadada/RelationExtract-Pytorch
|
61ab88bae8dafb36c8efc03167df4ec599ca9e66
|
[
"MIT"
] | 29
|
2019-10-31T07:54:39.000Z
|
2022-01-21T02:41:25.000Z
|
model/net.py
|
Tianweidadada/RelationExtract-Pytorch
|
61ab88bae8dafb36c8efc03167df4ec599ca9e66
|
[
"MIT"
] | 3
|
2020-12-19T14:24:27.000Z
|
2021-09-22T12:39:10.000Z
|
model/net.py
|
Tianweidadada/RelationExtract-Pytorch
|
61ab88bae8dafb36c8efc03167df4ec599ca9e66
|
[
"MIT"
] | 12
|
2020-10-13T09:22:59.000Z
|
2022-01-03T07:14:58.000Z
|
"""Define the neural network, loss function"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, data_loader, params):
super(CNN, self).__init__()
# loading embedding vectors of dataset
embedding_vectors = data_loader.get_loaded_embedding_vectors()
# word and position embedding layer
self.word_embedding = nn.Embedding.from_pretrained(embeddings=embedding_vectors, freeze=False)
self.pos1_embedding = nn.Embedding(params.pos_dis_limit * 2 + 3, params.pos_emb_dim)
self.pos2_embedding = nn.Embedding(params.pos_dis_limit * 2 + 3, params.pos_emb_dim)
self.max_len = params.max_len
# dropout layer
self.dropout = nn.Dropout(params.dropout_ratio)
feature_dim = params.word_emb_dim + params.pos_emb_dim * 2
# encode sentence level features via cnn
self.covns = nn.ModuleList([nn.Sequential(nn.Conv1d(in_channels=feature_dim,
out_channels=params.filter_num,
kernel_size=k),nn.Tanh(),nn.MaxPool1d(kernel_size=self.max_len-k+1)) for k in params.filters])
filter_dim = params.filter_num * len(params.filters)
labels_num = len(data_loader.label2idx)
# output layer
self.linear = nn.Linear(filter_dim, labels_num)
self.loss = nn.CrossEntropyLoss()
if params.gpu >= 0:
self.cuda(device=params.gpu)
def forward(self, x):
batch_sents = x['sents']
batch_pos1s = x['pos1s']
batch_pos2s = x['pos2s']
word_embs = self.word_embedding(batch_sents)
pos1_embs = self.pos1_embedding(batch_pos1s)
pos2_embs = self.pos2_embedding(batch_pos2s)
input_feature = torch.cat([word_embs, pos1_embs, pos2_embs], dim=2) # batch_size x seq_len x feature_dim
input_feature = input_feature.permute(0,2,1) #(batch_size,feature_dim,seq_len)
input_feature = self.dropout(input_feature)
out = [conv(input_feature) for conv in self.covns] #(batch_size,filter_num,1)
"""
对应第二个维度(行)拼接起来,比如说5*2*1,5*3*1的拼接变成5*5*1 (维度1是max_pool的结果)
每个Window_size 产生filter_num个feature,然后把这些feature拼接起来
"""
out = torch.cat(out,dim=1)
out = self.dropout(out)
out = out.view(-1,out.size(1)) #(batch_size, (filter_num*window_num))
x = self.dropout(out)
x = self.linear(x)
return x
class BiLSTM_Att(nn.Module):
def __init__(self,data_loader,params):
super(BiLSTM_Att, self).__init__()
embedding_vectors = data_loader.get_loaded_embedding_vectors()
self.out_size = len(data_loader.label2idx)
self.hidden_dim = params.hidden_dim
self.batch_size = params.batch_size
self.feature_dim = params.word_emb_dim + params.pos_emb_dim * 2
self.lstm = nn.LSTM(self.feature_dim,self.hidden_dim//2,bidirectional=True)
self.word_embedding = nn.Embedding.from_pretrained(embedding_vectors,freeze=False)
self.pos1_embedding = nn.Embedding(params.pos_dis_limit * 2 + 3, params.pos_emb_dim)
self.pos2_embedding = nn.Embedding(params.pos_dis_limit * 2 + 3, params.pos_emb_dim)
self.att_weight = nn.Parameter(torch.randn((self.batch_size, 1, self.hidden_dim)))
self.dropout_emb = nn.Dropout(p=0.3)
self.dropout_lstm = nn.Dropout(p=0.3)
self.dropout_att = nn.Dropout(p=0.5)
self.dense = nn.Linear(self.hidden_dim,self.out_size)
self.device = None
self.loss = nn.CrossEntropyLoss()
if params.gpu >= 0:
self.device = self.cuda(device=params.gpu)
def begin_state(self):
state = (
torch.zeros(2, self.batch_size, self.hidden_dim // 2),
torch.zeros(2, self.batch_size, self.hidden_dim // 2))
if self.device:
return state.to(self.device)
else:
return state
'''
H: (batch_size,hidden_dim,seq_len)
att_weight: (batch_size,1,hidden_dim)
'''
def attention(self, H):
M = torch.tanh(H)
a = torch.bmm(self.att_weight, M)
a = F.softmax(a, dim=2) # (batch_size,1,seq_len)
a = a.transpose(1, 2) # after a: (batch_size,seq_len,1)
return torch.bmm(H, a) # (batch_size,hidden_dim,1)
def forward(self, X):
batch_sents = X['sents']
batch_pos1s = X['pos1s']
batch_pos2s = X['pos2s']
word_embs = self.word_embedding(batch_sents)
pos1_embs = self.pos1_embedding(batch_pos1s)
pos2_embs = self.pos2_embedding(batch_pos2s)
input_feature = torch.cat([word_embs, pos1_embs, pos2_embs], dim=2).transpose(0,1)
# (seq_len,batch_size,vector_size)
embeds = self.dropout_emb(input_feature)
lstm_out, state = self.lstm(embeds, self.begin_state()) # list_out : (seq_len,batch_size,hidden_dim)
lstm_out = lstm_out.permute(1, 2, 0) # (batch_size,hidden_dim,seq_len)
lstm_out = self.dropout_lstm(lstm_out)
att_out = torch.tanh(self.attention(lstm_out)) # (batch_size,hidden_dim,1)
att_out = self.dropout_att(att_out)
out = self.dense(att_out.view(self.batch_size, self.hidden_dim)) # 经过一个全连接矩阵 W*h + b
return out
class BiLSTM_MaxPooling(nn.Module):
def __init__(self,data_loader,params):
super(BiLSTM_MaxPooling, self).__init__()
embedding_vectors = data_loader.get_loaded_embedding_vectors()
self.out_size = len(data_loader.label2idx)
self.hidden_dim = params.hidden_dim
self.batch_size = params.batch_size
self.feature_dim = params.word_emb_dim + params.pos_emb_dim * 2
self.lstm = nn.LSTM(self.feature_dim,self.hidden_dim//2,bidirectional=True)
self.word_embedding = nn.Embedding.from_pretrained(embedding_vectors,freeze=False)
self.pos1_embedding = nn.Embedding(params.pos_dis_limit * 2 + 3, params.pos_emb_dim)
self.pos2_embedding = nn.Embedding(params.pos_dis_limit * 2 + 3, params.pos_emb_dim)
self.att_weight = nn.Parameter(torch.randn((self.batch_size, 1, self.hidden_dim)))
self.dense = nn.Linear(self.hidden_dim,self.out_size)
self.device = None
self.loss = nn.CrossEntropyLoss()
if params.gpu >= 0:
self.device = self.cuda(device=params.gpu)
def begin_state(self):
state = (
torch.zeros(2, self.batch_size, self.hidden_dim // 2),
torch.zeros(2, self.batch_size, self.hidden_dim // 2))
if self.device:
return state.to(self.device)
else:
return state
def forward(self, X):
batch_sents = X['sents']
batch_pos1s = X['pos1s']
batch_pos2s = X['pos2s']
word_embs = self.word_embedding(batch_sents)
pos1_embs = self.pos1_embedding(batch_pos1s)
pos2_embs = self.pos2_embedding(batch_pos2s)
input_feature = torch.cat([word_embs, pos1_embs, pos2_embs], dim=2).transpose(0,1)
lstm_out, state = self.lstm(input_feature, self.begin_state()) # list_out : (seq_len,batch_size,hidden_dim)
out,_ = torch.max(lstm_out,dim=0) # (1,batch_size,hidden_dim)
out = self.dense(out.squeeze(0)) # 经过一个全连接矩阵 W*h + b
return out
| 35.382514
| 110
| 0.736988
|
0ab944737199a0f2f8a923affc731a753dac9b98
| 431
|
py
|
Python
|
malib/algorithm/dqn/__init__.py
|
ReinholdM/play_football_with_human
|
9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c
|
[
"MIT"
] | 258
|
2021-05-10T11:17:45.000Z
|
2022-03-30T13:41:09.000Z
|
malib/algorithm/dqn/__init__.py
|
ReinholdM/play_football_with_human
|
9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c
|
[
"MIT"
] | 28
|
2021-05-13T06:50:04.000Z
|
2022-03-30T14:19:15.000Z
|
malib/algorithm/dqn/__init__.py
|
ReinholdM/play_football_with_human
|
9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c
|
[
"MIT"
] | 34
|
2021-05-31T16:17:49.000Z
|
2022-03-26T06:59:59.000Z
|
from .policy import DQN
from .trainer import DQNTrainer
from .loss import DQNLoss
NAME = "DQN"
LOSS = DQNLoss
TRAINER = DQNTrainer
POLICY = DQN
# custom_config
CONFIG = {
"training": {"tau": 0.01},
"policy": {
"gamma": 0.98,
"eps_min": 1e-2,
"eps_max": 1.0,
"eps_decay": 2000,
"dueling": False,
"use_cuda": False,
},
}
__all__ = ["NAME", "LOSS", "TRAINER", "POLICY"]
| 17.24
| 47
| 0.563805
|
7a74cda68faf5584260544b3af415aaf92fcb224
| 46
|
py
|
Python
|
auxjad/spanners/__init__.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 6
|
2020-05-18T09:28:29.000Z
|
2021-12-22T00:40:54.000Z
|
auxjad/spanners/__init__.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 1
|
2021-04-21T20:29:38.000Z
|
2021-04-22T19:44:54.000Z
|
auxjad/spanners/__init__.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 1
|
2021-04-21T18:54:46.000Z
|
2021-04-21T18:54:46.000Z
|
"""
spanners
========
Auxjad's spanners.
"""
| 6.571429
| 18
| 0.5
|
838aca80aedc5f21f25fc6d3dce7e9e268d9b416
| 1,521
|
py
|
Python
|
Code/GCcalculation.py
|
baby-phage/GC-Plotter
|
b8495b392a485944f495363639dad5874544c427
|
[
"MIT"
] | 1
|
2021-11-11T03:04:10.000Z
|
2021-11-11T03:04:10.000Z
|
Code/GCcalculation.py
|
baby-phage/GC-Plotter
|
b8495b392a485944f495363639dad5874544c427
|
[
"MIT"
] | null | null | null |
Code/GCcalculation.py
|
baby-phage/GC-Plotter
|
b8495b392a485944f495363639dad5874544c427
|
[
"MIT"
] | null | null | null |
# Functions for GC% calculation
def Multiple_Fasta_Parser(filepath: str) -> (list, list):
"""Parses given FASTA file and returns a DNA sequence list and their corresponding trimmed down FASTA ID list."""
# Import regex
import re
with open(filepath, "r") as file:
DNA_seq_list = []
FASTA_ID_list = []
DNA_seq = ""
for fileline in file.readlines():
fileline = fileline.strip("\n")
if fileline.startswith(">"):
FASTA_ID_list.append(fileline)
if DNA_seq != "":
DNA_seq_list.append(DNA_seq)
DNA_seq = ""
else:
DNA_seq += fileline
else:
DNA_seq_list.append(DNA_seq)
# Trimming FASTA ID to obtain only the unique identifier
FASTA_ID_trimmed_list = []
for FASTA_ID in FASTA_ID_list:
for match in re.findall(r"^>[A-Za-z\d.:\-_]+", FASTA_ID):
FASTA_ID_trimmed_list.append(match)
return DNA_seq_list, FASTA_ID_trimmed_list
def GC_Calc(DNA_seq):
"""Returns GC % of a DNA sequence rounded up to 3 decimal places."""
DNA_len = len(DNA_seq)
GC_count = 0
for nucleotide in DNA_seq:
if nucleotide == "G" or nucleotide == "C":
GC_count += 1
try:
GC_percent = round((GC_count / DNA_len) * 100, 3)
except ZeroDivisionError:
GC_percent = 0.0
return GC_percent
| 27.654545
| 118
| 0.558185
|
3d1b624a40f09e64f8ddf4f3f213bfbf6184d57f
| 30,491
|
py
|
Python
|
preprocessing/feature_extraction.py
|
Artisan-Lab/SMTimer
|
8e0bbb854afd360dcc61d6b098c4ae8931bae14c
|
[
"MIT"
] | 5
|
2021-04-25T08:33:15.000Z
|
2022-03-10T02:20:34.000Z
|
preprocessing/feature_extraction.py
|
Artisan-Lab/SMTimer
|
8e0bbb854afd360dcc61d6b098c4ae8931bae14c
|
[
"MIT"
] | 1
|
2021-12-12T08:34:09.000Z
|
2021-12-12T10:21:32.000Z
|
preprocessing/feature_extraction.py
|
Artisan-Lab/SMTimer
|
8e0bbb854afd360dcc61d6b098c4ae8931bae14c
|
[
"MIT"
] | null | null | null |
import json
import math
import sys
# import torch
import traceback
from pysmt.smtlib.parser import SmtLibParser
from six.moves import cStringIO
from pysmt.operators import __OP_STR__
from .Operators import op, none_op, bv_constant, bool_constant, reserved_word
sys.setrecursionlimit(1000000)
from collections import defaultdict
import pysmt
from preprocessing.Tree import varTree as Tree
import re
import numpy as np
# SMT script file information, program name, solving time with symbolic tools, solving time with different solvers
class Script_Info:
def __init__(self, string, is_json=False):
self.script = None
self.filename = None
self.solving_time = None
# a dict of solving time with different solvers as dict key, some solving time of each to avoid variety
self.solving_time_dic = None
self.is_json = is_json
self.load(string)
def load(self, string):
if self.is_json:
try:
string = json.loads(string)
self.is_json = True
except:
self.is_json = False
self.get_attr(string, None)
def get_attr(self, str, input):
if self.is_json:
try:
if 'smt_script' in str.keys():
self.script = str["smt_script"]
else:
self.script = str["script"]
self.solving_time = str['time']
self.filename = str['filename'].split("/")[-1]
if 'solving_time_dic' in str.keys():
self.solving_time_dic = str['solving_time_dic']
else:
self.solving_time_dic = {"z3":str['double_check_time']}
except:
pass
else:
data_list = str.split("\n")
try:
if data_list[0].startswith("filename"):
self.filename = data_list[0].split("/")[-1]
data_list = data_list[1:]
# else:
# self.filename = input.split("/")[-1]
if data_list[-1] == "":
data_list = data_list[:-1]
if "time:" in data_list[-1] or "Elapsed" in data_list[-1]:
solving_time = data_list[-1].split(" ")[-1]
if solving_time[-1] == 's':
solving_time = solving_time[:-1]
self.solving_time = float(solving_time)
self.solving_time_dic = None
data_list = data_list[:-1]
self.script = "\n".join(data_list)
except:
self.script = str
# main preprocessing, parse the SMT scripts to give out abstract tree or feature vector for later prediction
class feature_extractor:
def __init__(self, script_info, time_selection="original", limit=100):
self.feature_list = []
self.logic_tree = None
self.val_list = []
self.val_dic = {}
self.used = defaultdict(bool)
self.mid_val = {}
self.origin_time = None
self.adjust_time = None
self.cut_num = 0
self.feature = np.array([0] * (len(op) + 4))
self.script_info = script_info
self.time_selection = time_selection
self.type = []
self.constant = []
self.feature_number_limit = limit
self.treeforassert = False
# calculate the solving time label for data after adjustment(average), 0 for the lack of data
def cal_training_label(self):
solver_list = ["msat", "cvc4", "yices", "btor", "z3"]
if isinstance(self.script_info.solving_time_dic, dict):
if self.time_selection in solver_list:
time_list = self.script_info.solving_time_dic[self.time_selection]
else:
time_list = list(self.script_info.solving_time_dic.values())[0]
else:
time_list = self.script_info.solving_time_dic
if time_list:
valid_time_list = []
for x in time_list:
if float(x) > 0:
valid_time_list.append(float(x))
# self.adjust_time = max(time_list)
if len(valid_time_list) == 0:
self.adjust_time = 0
else:
self.adjust_time = sum(valid_time_list) / len(valid_time_list)
else:
self.adjust_time = 0
if self.script_info.solving_time:
self.origin_time = float(self.script_info.solving_time)
else:
self.origin_time = 0
def script_to_feature(self):
data = self.script_info.script
self.cal_training_label()
assertions = self.handle_variable_defination(data)
# parse define-fun with pysmt parser
# to do: handle more reserved word parser in SMT-LIB
# if define_fun:
# last_reserved_word = None
# left_count = 0
# data_list = data.split("\n")
# for i in range(len(data_list)):
# if "declare-fun" in data_list[i]:
# define_list.append(data_list[i])
# left_count, finish, last_reserved_word = finished(data_list[i], left_count)
# elif "define-fun" in data_list[i]:
# define_list.append(data_list[i])
# left_count, finish, last_reserved_word = finished(data_list[i], left_count)
# elif last_reserved_word !=
# self.construct_define(define_list)
try:
# parse assertion stack into abstract trees
self.assertions_to_feature_list(assertions)
# merging sub tree: bottom_up_merging or accumulation
self.accumulation()
# self.bottom_up_merging()
# truncate tree by depth. default 60
self.cut_length()
# collecting tree structure information
self.feature[-4] = self.logic_tree.node
self.feature[-2] = self.logic_tree.depth
except TimeoutError:
raise TimeoutError
except (KeyError,IndexError) as e:
self.logic_tree = vartree('unknown', None, None, None)
# raise e
# record variables, other type defined with reserve word "declare-fun", "declare-sort", ...,
# for later variable replacement
# also allow the define after assertion has been added
def handle_variable_defination(self, data):
last_reserved_word = None
left_count = 0
# replace variable with general symbol
data_list = data.split("\n")
define_fun = False
sl = data.split("(assert")
asserts = ["(assert" + x for x in sl[1:]]
asserts_str = "".join(asserts)
need_assert = False
if "declare-fun" in asserts_str or "define-fun" in asserts_str:
need_assert = True
asserts = []
define_list = []
assert_str = ""
for i in range(len(data_list)):
if "declare-fun" in data_list[i] or "declare-sort" in data_list[i] or "define-sort" in data_list[i]:
try:
var_name = data_list[i].split(" ", maxsplit=1)[1]
var_name = var_name.split(" (", maxsplit=1)[0]
var_name = var_name.rstrip(" ")
except:
continue
if "declare-fun" in data_list[i]:
self.val_list.append(var_name)
self.val_dic[var_name] = "var" + str(len(self.val_list))
elif "declare-sort" in data_list[i]:
self.constant.append(var_name)
elif "define-sort" in data_list[i]:
self.type.append(var_name)
define_list.append(data_list[i])
left_count, last_reserved_word = finished(data_list[i], left_count)
elif "assert" in data_list[i]:
if need_assert:
assert_str = assert_str + data_list[i]
left_count, last_reserved_word = finished(data_list[i], left_count)
if not left_count:
asserts.append(assert_str)
assert_str = ""
elif "declare" in data_list[i] or "define" in data_list[i]:
define_fun = True
define_list.append(data_list[i])
left_count, last_reserved_word = finished(data_list[i], left_count)
elif last_reserved_word == "assert":
if need_assert:
assert_str = assert_str + data_list[i]
left_count, word = finished(data_list[i], left_count)
if not left_count:
last_reserved_word = word
asserts.append(assert_str)
assert_str = ""
elif last_reserved_word != None:
define_list.append(data_list[i])
left_count, word = finished(data_list[i], left_count)
if not left_count:
last_reserved_word = word
# else:
# print(last_reserved_word)
if define_fun:
self.construct_define(define_list)
self.feature[-1] = len(self.val_list)
asserts = [sl[0]] + ["(assert" + x for x in sl[1:]]
return asserts
def get_variable(self, data):
data_list = data.split("\n")
for i in range(len(data_list)):
if "declare-fun" in data_list[i]:
var_name = data_list[i].split(" ", maxsplit=1)[1]
var_name = var_name.split(" (", maxsplit=1)[0]
var_name = var_name.rstrip(" ")
self.val_list.append(var_name)
self.val_dic[var_name] = "var" + str(len(self.val_list))
self.feature[-1] += 1
elif "assert" in data_list[i]:
break
sl = data.split("(assert")
asserts = [sl[0]] + ["(assert" + x for x in sl[1:]]
return asserts
def construct_define(self, define_list):
define_str = "\n".join(define_list)
try:
smt_parser = SmtLibParser()
script = smt_parser.get_script(cStringIO(define_str))
except (KeyError,IndexError, pysmt.exceptions.PysmtTypeError):
return
try:
assert_list = script.commands
for assertion in assert_list:
if assertion.name == "define-fun":
new_tree = self.fnode_to_tree(assertion.args[3])
self.mid_val[assertion.args[0]] = new_tree
self.used[assertion.args[0]] = False
except (KeyError,IndexError):
return
def fnode_to_tree(self, fnode):
transtable = list(__OP_STR__.values())
# print(fnode)
if fnode.is_symbol():
if fnode.symbol_name() in self.val_list:
root = vartree(self.val_dic[fnode.symbol_name()])
else:
root = vartree("constant")
elif fnode.is_constant():
root = vartree("constant")
elif fnode.is_term():
if fnode.is_and() and fnode.arg(1).is_true():
root = self.fnode_to_tree(fnode.arg(0))
else:
subnode_list = []
for subnode in fnode.args():
subnode_list.append(self.fnode_to_tree(subnode))
subnode_list.extend([None, None, None])
root = vartree(transtable[fnode.node_type()], subnode_list[0], subnode_list[1], subnode_list[2])
else:
root = vartree("unknown")
return root
def cut_length(self):
root = self.logic_tree
if self.treeforassert:
self.depth = 40
else:
self.depth = 60
self._cut(root, 0)
def _cut(self, root, depth):
if root:
if depth > self.depth:
self.cut_num += 1
return self.generate_replace(root)
if hasattr(root, "feature"):
del root.feature
root.left = self._cut(root.left, depth + 1)
root.mid = self._cut(root.mid, depth + 1)
root.right = self._cut(root.right, depth + 1)
return root
def generate_replace(self, root):
try:
newroot = vartree(np.log(root.feature + 1).tolist(), None, None, None)
except (AttributeError, ValueError):
var_list = list(root.var) + ['constant', None, None]
for i in [0, 1, 2]:
if var_list[i] != None:
var_list[i] = vartree(var_list[i])
root.left = var_list[0]
root.mid = var_list[1]
root.right = var_list[2]
newroot = vartree('compressed_op', var_list[0], var_list[1], var_list[2])
return newroot
def bottom_up_merging(self):
if len(self.feature_list) and not isinstance(self.feature_list[0], Tree):
self.feature_list = list(map(lambda x:vartree(np.log(np.array(x) + 1).tolist()), self.feature_list))
tl = self.feature_list
while len(tl) != 1:
new_tl = []
if len(tl) % 3 != 0:
tl.append(None)
if len(tl) % 3 != 0:
tl.append(None)
for i in range(0, len(tl), 3):
new_tl.append(vartree("and", tl[i], tl[i + 1], tl[i + 2]))
tl = new_tl
self.logic_tree = tl[0]
def accumulation(self):
if len(self.feature_list) and not isinstance(self.feature_list[0], Tree):
self.feature_list = list(map(lambda x:vartree(np.log(np.array(x) + 1).tolist()), self.feature_list))
for ind, feature in enumerate(self.feature_list):
if feature.node > 500:
# print("cut large tree")
self.feature_list[ind] = self.generate_replace(self.feature_list[ind])
tl = self.feature_list[1:]
try:
root = self.feature_list[0]
except IndexError:
return
# print(self.script)
while len(tl) != 0:
if len(tl) == 1:
root = vartree("and", root, tl[0])
else:
root = vartree("and", root, tl[0], tl[1])
tl = tl[2:]
self.logic_tree = root
def assertions_to_feature_list(self, assertions):
limit = self.feature_number_limit
assertions[-1] = assertions[-1].replace("(check-sat)", "")
assertions[-1] = assertions[-1].replace("(exit)", "")
if len(assertions) > limit:
assertions[-limit] = "\n".join(assertions[:-limit + 1])
assertions = assertions[-limit:]
# assertion
for assertion in assertions:
val = list(map(lambda x:math.log(x+1),self.count_feature(assertion)))
root = vartree(val)
self.feature_list.append(root)
# if not self.parse_smt_comp(assertion):
# return
# data_lines = assertion.split("\n")
# # one line
# for data_line in data_lines:
# if data_line == "(check-sat)" or data_line == "":
# continue
# if "time:" in data_line:
# break
# else:
# self.parse_angr_smt(data_line)
# parse a wider SMT script(mainly on QF_ABV, QF_URA), second version, abandoned after switching to pysmt parse, if
# pysmt parse failed, you may use this instead
def parse_smt_comp(self, assertion):
data_list = assertion.split(" ")
current_ind = 0
data_len = len(data_list)
stack = []
swap_stack = ["define"]
try:
while current_ind < data_len:
current = data_list[current_ind].replace("\n", "")
current = current.strip(")")
current = current.strip("(")
if data_list[current_ind].startswith("("):
stack.extend(["("] * data_list[current_ind].count("("))
if current == "assert":
current_ind += 1
continue
elif current == "":
current_ind += 1
continue
elif current in ["let", "forall", "exists"]:
swap_stack,stack = stack, swap_stack
# stack = ["define"]
elif current == "_":
if data_list[current_ind + 1] in none_op:
stack[-1] = data_list[current_ind + 1]
elif data_list[current_ind + 1].startswith("bv"):
stack[-1] = vartree("constant")
else:
raise SyntaxError("unknown single op", data_list[current_ind + 1])
if ")" in data_list[current_ind + 2]:
current_ind += 2
else:
current_ind += 3
data_list[current_ind] = data_list[current_ind].replace(")", "", 1)
elif current in op:
stack.append(current)
self.feature[op.index(current)] += 1
elif current in self.mid_val or current[1:-1] in self.mid_val:
# if stack[0] == "define":
# pa_count = stack.count("(")
# current_ind += 1
# while pa_count != 0:
# pa_count += data_list[current_ind].count("(") - data_list[current_ind].count(")")
# current_ind += 1
# swap_stack, stack = stack, swap_stack
# swap_stack = ["define"]
# continue
if current[1:-1] in self.mid_val:
current = current[1:-1]
stack.append(self.mid_val[current])
# if self.used[current] == False:
# stack.append(self.mid_val[current])
# self.used[current] = True
# else:
# if self.mid_val[current].node > 10:
# stack.append(self.generate_replace(self.mid_val[current]))
# else:
# stack.append(copy(self.mid_val[current]))
if stack[-2] == "(" and isinstance(stack[-1], Tree):
left = 0
while (left >= 0 and current_ind < data_len - 1):
current_ind += 1
current = data_list[current_ind]
left = left + current.count("(") - current.count(")")
# nested string trigger replace error
# elif current.startswith("var"):
# stack.append(vartree(current))
elif current in self.type:
pass
elif current in self.constant:
stack.append(vartree("constant"))
elif current in self.val_list:
var_n = self.val_dic[current]
stack.append(vartree(var_n))
elif re.match("bv[0-9]+", current) or current in ["true", "false"] or current[0] == '"' or is_number(current):
stack.append(vartree("constant"))
self.feature[-3] += 1
elif current.isalpha():
pass
print("unknown symbol", current, data_list)
else:
stack.append(vartree("var"))
res = data_list[current_ind].count(")")
while (res != 0 and "(" in stack):
stack_rev = stack[::-1]
i = stack_rev.index("(")
tree_val = stack[-i:]
if len(tree_val) > 4:
pop_list = []
for ind, tr in enumerate(tree_val):
if ind != 0 and tr.val == "constant":
pop_list.append(ind)
tree_val = [tree_val[x] for x in range(len(tree_val)) if x not in pop_list] + [vartree("constant")] * 3
else:
tree_val = tree_val + [None] * 3
if isinstance(tree_val[0], Tree):
self.mid_val["val"] = tree_val[0]
else:
self.mid_val["val"] = vartree(tree_val[0], tree_val[1], tree_val[2], tree_val[3])
stack = stack[:-i - 1]
res -= 1
stack.append(self.mid_val["val"])
if stack[0] == "define" and len(stack) == 5:
if not isinstance(stack[3], Tree):
self.mid_val[stack[3]] = stack[4]
stack[4].set_name(stack[3])
self.used[stack[3]] = False
stack = stack[:2]
if res >= 2:
stack[0] = "define-done"
break
else:
res = 0
if current_ind + 1 == data_len and stack.count("(") + 1 == len(stack):
stack[0] = stack[-1]
break
current_ind += 1
if stack[0] == "define-done":
stack = swap_stack
swap_stack = ["define"]
if current_ind == data_len:
self.feature_list.append(stack[0])
except TimeoutError:
raise TimeoutError
except Exception as e:
traceback.print_exc()
# print(assertion)
# raise e
return False
return True
# count the operators and variables of a piece of assertion of SMT, str->[int]*150
def count_feature(self, assertion):
for var_name in self.val_list:
if " " in var_name:
assertion = assertion.replace(var_name, self.val_dic[var_name])
assertion = assertion.replace("(", " ")
assertion = assertion.replace(")", " ")
assertion = assertion.replace("\n", " ")
from collections import Counter
counts = Counter(assertion.split(" "))
feature = [0] * 150
for d in counts:
if d in op:
feature[op.index(d)] = counts[d]
elif d in self.val_list:
ind = min(int(self.val_dic[d][3:]), 20)
feature[111 + ind] = counts[d]
elif d[:3] == "var":
try:
ind = min(int(d[3:]), 20)
feature[111 + ind] = counts[d]
except (KeyError,IndexError,ValueError):
pass
elif d.startswith("?x") or d.startswith("$x"):
feature[21] += 1
for i in range(len(self.feature) - 4):
self.feature[i] += feature[i]
# root = vartree(feature)
# self.feature_list.append(vartree(feature))
return feature
# parse angr SMT script(mainly on QF_BV), first version, abandoned after switching to pysmt parse
def parse_angr_smt(self, data_line):
for var_name in self.val_list:
if " " in var_name:
data_line = data_line.replace(var_name, self.val_dic[var_name])
data_list = data_line.split(" ")
stack = []
name = None
try:
if "let" not in data_line:
name = "midval"
for da in data_list:
if name and da.startswith("("):
for i in range(da.count("(")):
stack.append("(")
d = da.strip("(")
d = d.strip(")")
if d in ['', '_', "let", "assert"]:
continue
elif d == "true" or d == "false":
stack.append(vartree(bool_constant))
elif d in op:
stack.append(d)
self.feature[op.index(d)] += 1
elif d.startswith("?x") or d.startswith("$x"):
if name:
# stack.append(self.mid_val[d])
if self.used[d] == False:
stack.append(self.mid_val[d])
self.used[d] = True
else:
stack.append(self.generate_replace(self.mid_val[d]))
# if self.mid_val[d].node > 20:
# stack.append(self.generate_replace(self.mid_val[d]))
# else:
# stack.append(copy(self.mid_val[d]))
else:
name = d
if d in self.mid_val:
return
# nested string trigger replace error
# elif d.startswith("var"):
# stack.append(vartree(d))
elif d in self.val_list or d[:3] == "var":
var_n = self.val_dic[d]
stack.append(vartree(var_n))
elif re.match("bv[0-9]+", d):
stack.append(vartree(bv_constant))
self.feature[-3] += 1
elif is_number(d):
pass
elif d.isalpha():
pass
print("unknown symbol", d, data_line)
else:
pass
print("unknown term", data_line, d)
res = da.count(")")
if len(stack) >= 2 and stack[-2] in none_op and isinstance(stack[-1], Tree):
single_tree = vartree(stack[-2], stack[-1])
stack = stack[:-2]
stack.append(single_tree)
while (res != 0 and "(" in stack):
if len(stack) >= 2 and stack[-2] in none_op and isinstance(stack[-1], Tree):
single_tree = vartree(stack[-2], stack[-1])
stack = stack[:-2]
stack.append(single_tree)
stack_rev = stack[::-1]
i = stack_rev.index("(")
if len(stack[-i:]) == 1 or stack[-i] in none_op:
self.mid_val["val"] = stack[-i:][0]
else:
tree_val = stack[-i:] + [None] * 3
self.mid_val["val"] = vartree(tree_val[0], tree_val[1], tree_val[2], tree_val[3])
stack = stack[:-i - 1]
res -= 1
stack.append(self.mid_val["val"])
if len(stack) != 0:
stack = stack + [None] * 3
if "let" in data_line and isinstance(stack[0], Tree):
self.mid_val[name] = stack[0]
stack[0].set_name(name)
self.used[name] = False
# print("let", stack[1])
else:
if isinstance(stack[0], Tree):
self.feature_list.append(stack[0])
# print("assert", self.feature_list[-1])
except (KeyError,IndexError) as e:
# traceback.print_exc()
if isinstance(e, TimeoutError):
raise TimeoutError
with open("parse_error.txt", "w") as f:
f.write(data_line + "\n")
data_line = data_line.replace("(", "")
data_line = data_line.replace(")", "")
data_list = data_line.split(" ")
stack = []
name = None
if "let" not in data_line:
name = "midval"
for d in data_list:
if d.startswith("?x") or d.startswith("$x"):
if name:
if self.used[d] == False:
stack.append(self.mid_val[d])
self.used[d] = True
else:
stack.append(self.generate_replace(self.mid_val[d]))
# if self.mid_val[d].node > 20:
# stack.append(self.generate_replace(self.mid_val[d]))
# else:
# stack.append(copy(self.mid_val[d]))
else:
name = d
elif re.match("bv[0-9]+", d):
stack.append(vartree(bv_constant))
elif d == "true" or d == "false":
stack.append(vartree(bool_constant))
elif d.startswith("var"):
stack.append(vartree(d))
stack = stack + [None] * 3
if "let" in data_line:
tree = vartree("unknown", stack[0], stack[1], stack[2])
tree.set_name(name)
self.mid_val[name] = tree
self.used[name] = False
else:
self.feature_list.append(vartree("unknown", stack[0], stack[1], stack[2]))
def copy(tree):
ret = None
if tree:
ret = Tree(tree.val)
ret.left = copy(tree.left)
ret.mid = copy(tree.mid)
ret.right = copy(tree.right)
ret.var = tree.var
ret.depth = tree.depth
ret.compress_depth = tree.compress_depth
return ret
def vartree(val,left= None,mid= None,right= None):
if left == None and isinstance(val, Tree):
return val
if isinstance(val, list):
ret = Tree("")
ret.val = val
return ret
try:
ret = Tree(val, left, mid, right)
ret.cal()
except TypeError:
ret = Tree("unknown", None, None, None)
return ret
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
if re.match("b\#[0-9]+", s):
return True
return False
def finished(string, left_count):
word = None
for i in reserved_word:
if i in string:
word = i
left_count = 0
count = left_count + string.count("(")
count = count - string.count(")")
if not count:
word = None
return count, word
| 42.056552
| 127
| 0.490866
|
114e7948dbe67e0a7db7d258f76f824c3cbb2f9f
| 1,166
|
py
|
Python
|
test_unittest.py
|
danilovex/dojo-python-fizzbuzz
|
1f5647443c03b2a0078e912f9394a410d55f08c1
|
[
"MIT"
] | null | null | null |
test_unittest.py
|
danilovex/dojo-python-fizzbuzz
|
1f5647443c03b2a0078e912f9394a410d55f08c1
|
[
"MIT"
] | null | null | null |
test_unittest.py
|
danilovex/dojo-python-fizzbuzz
|
1f5647443c03b2a0078e912f9394a410d55f08c1
|
[
"MIT"
] | null | null | null |
import service # The code to test
import unittest # The test framework
class Test_TestIncrementDecrement(unittest.TestCase):
def test_deve_retornar_fizz_numeros_divisiveis_por3(self):
self.assertEqual(service.eh_divisivel_por3(15), True)
def test_deve_retornar_true_numeros_divisiveis_por5(self):
self.assertEqual(service.eh_divisivel_por5(5), True)
def test_deve_retornar_true_numeros_divisiveis_por3e5(self):
self.assertEqual(service.eh_divisivel_por3e5(15), True)
def test_3_deve_retornar_fizz(self):
self.assertEqual(service.substituir(3), 'FIZZ')
def test_5_deve_retornar_buzz(self):
self.assertEqual(service.substituir(5), 'BUZZ')
def test_15_deve_retornar_fizzbuzz(self):
self.assertEqual(service.substituir(15), 'FIZZBUZZ')
def test_4_deve_retornar_4(self):
self.assertEqual(service.substituir(4), '4')
def test_contem_31_retornar_fizz(self):
self.assertEqual(service.substituir(31),'FIZZ')
def test_contem_51_retornar_fizzbuzz(self):
self.assertEqual(service.substituir(51),'FIZZBUZZ')
if __name__ == '__main__':
unittest.main()
| 34.294118
| 64
| 0.744425
|
33ed3d7bcd38b90c7a8f6692bb324ff0068e750e
| 1,979
|
py
|
Python
|
python/ql/test/query-tests/Functions/general/protocols.py
|
robertbrignull/ql
|
2ecef33c9d2c9a66b5359b68437c3229fcf54964
|
[
"MIT"
] | 26
|
2020-06-30T03:07:19.000Z
|
2022-03-31T03:57:23.000Z
|
python/ql/test/query-tests/Functions/general/protocols.py
|
robertbrignull/ql
|
2ecef33c9d2c9a66b5359b68437c3229fcf54964
|
[
"MIT"
] | 2
|
2020-06-30T06:00:59.000Z
|
2021-04-21T19:53:33.000Z
|
python/ql/test/query-tests/Functions/general/protocols.py
|
robertbrignull/ql
|
2ecef33c9d2c9a66b5359b68437c3229fcf54964
|
[
"MIT"
] | 10
|
2021-03-24T13:09:08.000Z
|
2022-02-10T07:39:30.000Z
|
class Iterator:
#Support both 2 and 3 protocol
def __next__(self):
pass
def next(self):
pass
def __iter__(self):
return self
class X(object):
def __iter__(self):
return object()
class IteratorMissingNext:
def __iter__(self):
return self
class IterableMissingNext:
def __iter__(self):
return IteratorMissingNext()
class IteratorMissingIter:
def next(self):
pass
def __next__(self):
pass
class IterableMissingIter:
def __iter__(self):
return IteratorMissingIter()
class IterableWithGenerator:
# returning a generator from __iter__ in an iterable is ok
def __iter__(self):
i = 0
while True:
yield i
i += 1
#Iterator not returning self
class AlmostIterator(object):
def __next__(self):
pass
def next(self):
pass
def __iter__(self):
return X.Xiter(X())
class AlmostIterable(object):
def __iter__(self):
return AlmostIterator()
#Overly complex __del__ method
class MegaDel(object):
def __del__(self):
a = self.x + self.y
if a:
print(a)
if sys._getframe().f_lineno > 100:
print("Hello")
sum = 0
for a in range(100):
sum += a
print(sum)
class MiniDel(object):
def close(self):
pass
def __del__(self):
self.close()
class IncorrectSpecialMethods(object):
def __add__(self, other):
raise NotImplementedError()
def __getitem__(self, index):
raise ZeroDivisionError()
def __getattr__(self):
raise ZeroDivisionError()
def f(self):
pass
class MissingMethods(object):
__repr__ = f # This should be OK
__add__ = f # But not this
__set__ = f # or this
#OK Special method
class OK(object):
def __call__(self):
yield 0
raise StopIteration
| 16.630252
| 62
| 0.590702
|
c98af38e2fdce25e8b8e68467d698dc02b66bac5
| 2,451
|
py
|
Python
|
tfce_toolbox/tfce_computation.py
|
celine-alameda/cluster-analysis
|
0168bb8238f2e368f92d5fddbfe9330fd3465438
|
[
"BSD-3-Clause"
] | null | null | null |
tfce_toolbox/tfce_computation.py
|
celine-alameda/cluster-analysis
|
0168bb8238f2e368f92d5fddbfe9330fd3465438
|
[
"BSD-3-Clause"
] | null | null | null |
tfce_toolbox/tfce_computation.py
|
celine-alameda/cluster-analysis
|
0168bb8238f2e368f92d5fddbfe9330fd3465438
|
[
"BSD-3-Clause"
] | null | null | null |
import concurrent
import math
# values as per doi:10.1016/j.neuroimage.2008.03.061
dh = 0.1
extend_weight = 0.5
height_weight = 2
def tfces_from_distributions_st(distributions: list):
tfces = []
for distribution in distributions:
tfce = tfce_from_distribution(distribution)
tfces.append(tfce)
return tfces
def tfces_from_distributions_mt(distributions: list, n_workers):
tfces = []
with concurrent.futures.ProcessPoolExecutor(max_workers=n_workers) as executor:
future_to_url = {executor.submit(tfce_from_distribution, distrib): distrib for distrib in
distributions}
for future in concurrent.futures.as_completed(future_to_url):
try:
tfce = future.result()
tfces.append(tfce)
except Exception as exc:
print('Exception: {}'.format(exc))
return tfces
def tfce_from_distribution(distribution: list):
"""Given a distribution (1D list of values), computes the Threshold-Free Cluster Enhancement"""
tfce_values = []
for i in range(len(distribution)):
# floor to 0.1
# notations are similar to those in the paper
if distribution[i] == 0:
tfce_values.append(0)
continue
signum = distribution[i] / abs(distribution[i])
h_p = math.floor(abs(distribution[i]) / dh) * dh
height = dh
tfce = 0
while height <= h_p:
# extent is how many samples have values of at least h
# reach forward
extend = 1 # at least this sample
index = i + 1
while index < len(distribution):
signum_at_index = distribution[index] / abs(distribution[index])
if abs(distribution[index] < height) or signum_at_index != signum:
break
extend += 1
index += 1
# reach backward
index = i - 1
while index >= 0:
signum_at_index = distribution[index] / abs(distribution[index])
if abs(distribution[index] < height) or signum_at_index != signum:
break
extend += 1
index -= 1
tfce = tfce + math.pow(extend, extend_weight) * math.pow(height, height_weight)
height += dh
tfce = tfce * signum
tfce_values.append(tfce)
return tfce_values
| 35.521739
| 99
| 0.587923
|
5f41b53106d4226c2773b4d0f47db1d50245abee
| 15,268
|
py
|
Python
|
src/mrack/providers/beaker.py
|
Tiboris/mrack
|
c3a91ad9c36113ad14d4ebbab8985616a0a1359c
|
[
"Apache-2.0"
] | 2
|
2021-05-26T15:57:13.000Z
|
2021-08-21T02:14:01.000Z
|
src/mrack/providers/beaker.py
|
Tiboris/mrack
|
c3a91ad9c36113ad14d4ebbab8985616a0a1359c
|
[
"Apache-2.0"
] | 81
|
2020-10-02T08:30:56.000Z
|
2022-03-31T11:47:41.000Z
|
src/mrack/providers/beaker.py
|
Tiboris/mrack
|
c3a91ad9c36113ad14d4ebbab8985616a0a1359c
|
[
"Apache-2.0"
] | 7
|
2020-10-02T08:13:57.000Z
|
2022-03-31T11:22:53.000Z
|
# Copyright 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beaker Provider interface."""
import asyncio
import logging
import os
import socket
import xml.etree.ElementTree as eTree
from copy import deepcopy
from datetime import datetime, timedelta
from xml.dom.minidom import Document as xml_doc
from xmlrpc.client import Fault
from bkr.client import BeakerJob, BeakerRecipe, BeakerRecipeSet
from bkr.common.hub import HubProxy
from bkr.common.pyconfig import PyConfigParser
from mrack.context import global_context
from mrack.errors import ProvisioningError, ValidationError
from mrack.host import (
STATUS_ACTIVE,
STATUS_DELETED,
STATUS_ERROR,
STATUS_OTHER,
STATUS_PROVISIONING,
)
from mrack.providers.provider import STRATEGY_ABORT, Provider
from mrack.utils import add_dict_to_node
logger = logging.getLogger(__name__)
PROVISIONER_KEY = "beaker"
def parse_bkr_exc_str(exc_str):
"""Parse exception string and return more readable string for mrack error."""
# we expect exception string to look like following:
# '<class \'bkr.common.bexceptions.BX\'>:No distro tree matches Recipe:
# <distroRequires>
# <and>
# <distro_name op="like" value="Fedora-33%"/>
# </and>
# </distroRequires>'
if (
":" not in exc_str.faultString
and "bkr.common.bexceptions" not in exc_str.faultString
):
# we got string we do not expect so just use the traceback
return str(exc_str)
# because of expected format we split by ":" and use last 2 values from list
# in above example it would be
# [
# '\tNo distro tree matches Recipe',
# '\t<distroRequires><and><distro_name op="like" value="Fedora-33%"/> ...
# ]
fault = [f"\t{f.strip()}" for f in exc_str.faultString.split(":")[-2:]]
return "\n".join(fault)
class BeakerProvider(Provider):
"""Beaker Provider."""
def __init__(self):
"""Object initialization."""
self._name = PROVISIONER_KEY
self.dsp_name = "Beaker"
self.conf = PyConfigParser()
self.poll_sleep = 45 # seconds
self.pubkey = None
self.max_retry = 1 # for retry strategy
self.status_map = {
"Reserved": STATUS_ACTIVE,
"New": STATUS_PROVISIONING,
"Scheduled": STATUS_PROVISIONING,
"Queued": STATUS_PROVISIONING,
"Processed": STATUS_PROVISIONING,
"Waiting": STATUS_PROVISIONING,
"Installing": STATUS_PROVISIONING,
"Running": STATUS_PROVISIONING,
"Cancelled": STATUS_DELETED,
"Aborted": STATUS_ERROR,
"Completed": STATUS_OTHER,
"MRACK_REACHED_TIMEOUT": STATUS_ERROR,
"MRACK_RESULT_NOT_PASSED": STATUS_ERROR,
}
async def init(
self,
distros,
timeout,
reserve_duration,
pubkey,
strategy=STRATEGY_ABORT,
max_retry=1,
):
"""Initialize provider with data from Beaker configuration."""
logger.info(f"{self.dsp_name}: Initializing provider")
self.strategy = strategy
self.max_retry = max_retry
self.distros = distros
self.timeout = timeout
self.reserve_duration = reserve_duration
self.pubkey = pubkey
login_start = datetime.now()
default_config = os.path.expanduser(
os.environ.get("BEAKER_CONF", "/etc/beaker/client.conf") # TODO use provc
) # get the beaker config for initialization of hub
self.conf.load_from_file(default_config)
self.hub = HubProxy(logger=logger, conf=self.conf)
login_end = datetime.now()
login_duration = login_end - login_start
logger.info(f"{self.dsp_name}: Init duration {login_duration}")
async def validate_hosts(self, reqs):
"""Validate that host requirements are well specified."""
for req in reqs:
req_dstr = req.get("distro")
if not req.get("meta_distro") and req_dstr not in self.distros:
raise ValidationError(
f"{self.dsp_name} provider does not support "
f"'{req_dstr}' distro in provisioning config"
)
return
async def prepare_provisioning(self, reqs):
"""Prepare provisioning."""
return bool(reqs)
async def can_provision(self, hosts):
"""Check that hosts can be provisioned."""
return True
def _allow_ssh_key(self, pubkey):
with open(os.path.expanduser(pubkey), "r") as key_file:
key_content = key_file.read()
return [
"""%%post
mkdir -p /root/.ssh
cat >>/root/.ssh/authorized_keys << "__EOF__"
%s__EOF__
restorecon -R /root/.ssh
chmod go-w /root /root/.ssh /root/.ssh/authorized_keys
%%end"""
% "".join(key_content)
]
def _req_to_bkr_job(self, req): # pylint: disable=too-many-locals
"""Transform requirement to beaker job xml."""
specs = deepcopy(req) # work with own copy, do not modify the input
# Job attributes:
specs.update({"retention_tag": "audit"})
specs.update({"product": "[internal]"})
specs.update({"whiteboard": "This job has been created using mrack."})
# RecipeSet attributes
specs.update({"priority": "Normal"})
# Add allowed keys
specs.update({"ks_append": self._allow_ssh_key(self.pubkey)})
# Use ks_meta
specs.update({"ks_meta": "harness='restraint-rhts beakerlib-redhat'"})
# Recipe task definition
specs.update(
{ # we use dummy task because beaker reuire a task in recipe
"tasks": [{"name": "/distribution/dummy", "role": "STANDALONE"}]
}
)
# Create recipe with the specifications
recipe = BeakerRecipe(**specs)
recipe.addBaseRequires(**specs)
# Specify the architecture
arch_node = xml_doc().createElement("distro_arch")
arch_node.setAttribute("op", "=")
arch_node.setAttribute("value", specs["arch"])
recipe.addDistroRequires(arch_node)
host_requires = global_context.PROV_CONFIG[PROVISIONER_KEY].get(
"hostRequires",
specs.get(f"mrack_{PROVISIONER_KEY}", {}).get("hostRequires", {}),
)
if host_requires: # suppose to be dict like {"or": [dict()], "and": [dict()]}
for operand, operand_value in host_requires.items():
if operand.startswith("_"):
recipe.node.getElementsByTagName("hostRequires")[0].setAttribute(
operand[1:],
operand_value,
)
continue
# known operands are ["and", "or"]
req_node = xml_doc().createElement(operand)
for dct in operand_value:
req_node = add_dict_to_node(req_node, dct)
recipe.node.getElementsByTagName("hostRequires")[0].appendChild(
req_node
)
# Specify the custom xml distro_tag node with values from provisioning config
distro_tags = global_context.PROV_CONFIG["beaker"].get("distro_tags")
if distro_tags:
for tag in distro_tags.get(specs["distro"], []):
tag_node = xml_doc().createElement("distro_tag")
tag_node.setAttribute("op", "=")
tag_node.setAttribute("value", tag)
recipe.addDistroRequires(tag_node)
# Add ReserveSys element to reserve system after provisioning
recipe.addReservesys(duration=str(self.reserve_duration))
for task in specs["tasks"]:
recipe.addTask(task=task["name"], role=task["role"])
# Create RecipeSet and add our Recipe to it.
recipe_set = BeakerRecipeSet(**specs)
recipe_set.addRecipe(recipe)
# Create job instance and inject created RecipeSet to it
job = BeakerJob(**specs)
job.addRecipeSet(recipe_set)
return job
async def create_server(self, req):
"""Issue creation of a server.
req - dict of server requirements
The req object can contain following additional attributes:
* 'name': name for the VM
* 'distro': beaker distribution to use
* 'arch': architecture to request from beaker
* 'variant': variant of the system
Returns:
A tuple containing, respectively, a string (<created beaker job id>)
and a dict (<requirements for VM>)
:rtype: (str, dict)
"""
logger.info(f"{self.dsp_name}: Creating server")
job = self._req_to_bkr_job(req) # Generate the job
try:
job_id = self.hub.jobs.upload(job.toxml()) # schedule beaker job
except Fault as bkr_fault:
# use the name as id for the logging purposes
req["host_id"] = req["name"]
raise ProvisioningError(
parse_bkr_exc_str(bkr_fault),
req,
) from bkr_fault
return (job_id, req)
def prov_result_to_host_data(self, prov_result, req):
"""Transform provisioning result to needed host data."""
try:
ip_address = socket.gethostbyname(prov_result["system"])
except (TypeError, socket.gaierror):
ip_address = None
result = {
"id": prov_result["JobID"],
"name": prov_result.get("mrack_req").get("name"),
"addresses": [ip_address],
"status": prov_result["status"],
"fault": None,
"os": prov_result.get("mrack_req").get("os"),
"group": prov_result.get("mrack_req").get("group"),
}
if prov_result["result"] != "Pass":
result.update(
{
"fault": prov_result["result"],
"status": "MRACK_RESULT_NOT_PASSED",
}
)
return result
def _get_recipe_info(self, beaker_id):
"""Get info about the recipe for beaker job id."""
bkr_job_xml = self.hub.taskactions.to_xml(beaker_id).encode("utf8")
resources = []
for recipe in eTree.fromstring(bkr_job_xml).iter("recipe"):
resources.append(
{
"system": recipe.get("system"),
"status": recipe.get("status"),
"result": recipe.get("result"),
"rid": recipe.get("id"),
"id": recipe.get("job_id"),
}
)
return resources[0] if len(resources) == 1 else []
async def wait_till_provisioned(self, resource):
"""Wait for Beaker provisioning result."""
beaker_id, req = resource
resource = {}
prev_status = ""
job_url = ""
# let us use timeout variable which is in minutes to define
# maximum time to wait for beaker recipe to provide VM
timeout_time = datetime.now() + timedelta(minutes=self.timeout)
while datetime.now() < timeout_time:
resource = self._get_recipe_info(beaker_id)
status = resource["status"]
job_url = (
f"{self.hub._hub_url}" # pylint: disable=protected-access
f"/jobs/{resource['id']}"
)
if prev_status != status:
logger.info(
f"{self.dsp_name}: Job {job_url} "
f"has changed status ({prev_status} -> {status})"
)
prev_status = status
else:
logger.info(
f"{self.dsp_name}: Job {job_url} has not changed status "
f"({status}), waiting another {self.poll_sleep:.1f}s"
)
if self.status_map.get(status) == STATUS_PROVISIONING:
await asyncio.sleep(self.poll_sleep)
elif self.status_map.get(status) == STATUS_ACTIVE:
break
elif self.status_map.get(status) in [STATUS_ERROR, STATUS_DELETED]:
logger.warning(
f"{self.dsp_name}: Job {job_url} has errored with status "
f"{status} and result {resource['result']}"
)
resource.update({"result": f"Job {job_url} failed to provision"})
break
else:
logger.error(
f"{self.dsp_name}: Job {job_url} has switched to unexpected "
f"status {status} with result {resource['result']}"
)
resource.update({"result": f"Job {job_url} failed to provision"})
break
else:
# In this case we failed to provision host in time:
# we need to create failed host object for mrack
# to delete the resource by cancelling the beaker job.
logger.error(
f"{self.dsp_name}: Job {job_url} failed to provide resource in"
f" the timeout of {self.timeout} minutes"
)
resource.update(
{
"status": "MRACK_REACHED_TIMEOUT",
"result": f"Job {job_url} reached timeout",
}
)
resource.update(
{
"JobID": beaker_id,
"mrack_req": req,
}
)
return resource, req
async def delete_host(self, host_id):
"""Delete provisioned hosts based on input from provision_hosts."""
# host_id should start with 'J:' this way we know job has been scheduled
# and proper response from beaker hub has beed returned.
# Other way (In case of hub error or invalid host definition)
# the provider uses hostname from metadata of the VM which has failed
# to validate the requirements for the provider
if not host_id.startswith("J:"):
logger.warning(
f"{self.dsp_name}: Job for host '{host_id}' does not exist yet"
)
return True
logger.info(
f"{self.dsp_name}: Deleting host by cancelling Job "
f"{self.hub._hub_url}" # pylint: disable=protected-access
f"/jobs/{host_id.split(':')[1]}"
)
return self.hub.taskactions.stop(
host_id, "cancel", "Job has been stopped by mrack."
)
def to_host(self, provisioning_result, req, username="root"):
"""Transform provisioning result into Host object."""
return super().to_host(provisioning_result, req, username)
| 36.439141
| 86
| 0.584228
|
1cd4d6519784f39b941570a118d4ced5254228aa
| 1,044
|
py
|
Python
|
tests/test_fitfuncs.py
|
david-hoffman/dphtools
|
f9f19ef5b0a00169562947c78a41c1f02e222a6a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fitfuncs.py
|
david-hoffman/dphtools
|
f9f19ef5b0a00169562947c78a41c1f02e222a6a
|
[
"Apache-2.0"
] | 4
|
2021-02-18T18:15:39.000Z
|
2022-02-09T06:49:58.000Z
|
tests/test_fitfuncs.py
|
david-hoffman/dphtools
|
f9f19ef5b0a00169562947c78a41c1f02e222a6a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_fitfuncs.py
"""
Testing for fitfuncs.
Copyright (c) 2021, David Hoffman
"""
import unittest
import numpy as np
from dphtools.utils.fitfuncs import exponent, exponent_fit
from numpy.testing import assert_allclose
class TestExponentFit(unittest.TestCase):
"""Test exponent fit.
This is not even close to testing edge cases.
"""
def setUp(self):
"""Set up."""
self.x = np.linspace(0, 10)
self.params = (10, 3, 5)
self.data = exponent(self.x, *self.params)
self.data_noisy = np.random.randn(self.x.size)
def test_positive(self):
"""Test a decaying signal."""
popt, pcov = exponent_fit(self.data, self.x)
assert_allclose(popt, self.params, rtol=1e-3)
def test_negative(self):
"""Test a rising signal."""
popt, pcov = exponent_fit(-self.data, self.x)
amp, k, offset = self.params
new_params = -amp, k, -offset
assert_allclose(popt, new_params, rtol=1e-3)
| 24.857143
| 58
| 0.630268
|
a03095ee947027261254579a36557325dec1d234
| 4,688
|
py
|
Python
|
model-optimizer/extensions/front/tf/pooling_ext_test.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 2
|
2021-04-19T06:08:35.000Z
|
2021-08-25T02:43:43.000Z
|
model-optimizer/extensions/front/tf/pooling_ext_test.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 6
|
2022-01-11T18:56:22.000Z
|
2022-02-21T13:20:20.000Z
|
model-optimizer/extensions/front/tf/pooling_ext_test.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 3
|
2021-02-05T17:11:17.000Z
|
2021-04-19T08:33:31.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.front.tf.pooling_ext import AvgPoolFrontExtractor, MaxPoolFrontExtractor
from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
class PoolingExtractorTest(BaseExtractorsTestingClass):
@classmethod
def setUpClass(cls):
cls.strides = [1, 2, 3, 4]
cls.ksize = [1, 3, 3, 1]
cls.patcher = 'mo.ops.pooling.Pooling.infer'
def test_pool_defaults(self):
pb = PB({'attr': {
'data_format': PB({
's': b"NHWC"
}),
'strides': PB({
'list': PB({
"i": self.strides
})
}),
'ksize': PB({
'list': PB({"i": self.ksize})
}),
'padding': PB({
's': b'VALID'
})
}})
self.expected = {
'pad': None, # will be inferred when input shape is known
'pad_spatial_shape': None,
'type': 'Pooling',
'exclude_pad': 'true',
}
node = PB({'pb': pb})
AvgPoolFrontExtractor.extract(node)
self.res = node
self.res["infer"](None)
self.call_args = self.infer_mock.call_args
self.expected_call_args = (None, None)
self.compare()
def test_avg_pool_nhwc(self):
pb = PB({'attr': {
'data_format': PB({
's': b"NHWC"
}),
'strides': PB({
'list': PB({"i": self.strides})
}),
'ksize': PB({
'list': PB({"i": self.ksize})
}),
'padding': PB({
's': b'VALID'
})
}})
self.expected = {
'window': np.array(self.ksize, dtype=np.int8),
'spatial_dims': [1, 2],
'stride': np.array(self.strides, dtype=np.int8),
'pool_method': "avg",
}
node = PB({'pb': pb})
AvgPoolFrontExtractor.extract(node)
self.res = node
self.res["infer"](None)
self.call_args = self.infer_mock.call_args
self.expected_call_args = (None, "avg")
self.compare()
def test_avg_pool_nchw(self):
pb = PB({'attr': {
'data_format': PB({
's': b"NCHW"
}),
'strides': PB({
'list': PB({
"i": self.strides
})
}),
'ksize': PB({
'list': PB({
"i": self.ksize
})
}),
'padding': PB({
's': b'VALID'
})
}})
self.expected = {
'window': np.array(self.ksize, dtype=np.int8),
'spatial_dims': [2, 3],
'stride': np.array(self.strides, dtype=np.int8),
'pool_method': "avg",
}
node = PB({'pb': pb})
AvgPoolFrontExtractor.extract(node)
self.res = node
self.res["infer"](None)
self.call_args = self.infer_mock.call_args
self.expected_call_args = (None, "avg")
self.compare()
def test_max_pool_nhwc(self):
pb = PB({'attr': {
'data_format': PB({
's': b"NHWC"
}),
'strides': PB({
'list': PB({
"i": self.strides
})
}),
'ksize': PB({
'list': PB({
"i": self.ksize
})
}),
'padding': PB({
's': b'VALID'
})
}})
self.expected = {
'window': np.array(self.ksize, dtype=np.int8),
'spatial_dims': [1, 2],
'stride': np.array(self.strides, dtype=np.int64),
'pool_method': "max",
}
node = PB({'pb': pb})
MaxPoolFrontExtractor.extract(node)
self.res = node
self.res["infer"](None)
self.call_args = self.infer_mock.call_args
self.expected_call_args = (None, "max")
self.compare()
| 30.245161
| 88
| 0.473763
|
855c33562f8b643d52b4221e8a9d697358e43676
| 7,014
|
py
|
Python
|
cni_challenge/evaluation/classification_metrics.py
|
sz144/pl-cni_challenge
|
eded4050329f3a54d5a67d91e2680a6b0dadc471
|
[
"MIT"
] | 3
|
2019-06-07T21:46:33.000Z
|
2019-07-04T19:24:14.000Z
|
cni_challenge/evaluation/classification_metrics.py
|
sz144/pl-cni_challenge
|
eded4050329f3a54d5a67d91e2680a6b0dadc471
|
[
"MIT"
] | null | null | null |
cni_challenge/evaluation/classification_metrics.py
|
sz144/pl-cni_challenge
|
eded4050329f3a54d5a67d91e2680a6b0dadc471
|
[
"MIT"
] | 11
|
2019-06-30T20:14:02.000Z
|
2021-05-19T19:01:37.000Z
|
#!/shared/python3shared/bin/python3
"""
:Summary:
With the lack of consensus of which metric is most suitable to determine the most appropriate classifier, we use an inclusive approach.
This includes multiple measures which are commonly used in classification tasks, such as accuracy and AUC, allowing for a more intuitive
interpretation of the results, in addition to measures such as Geometric-mean and optimized precision
(cf. M Hossin and MN Sulaiman. A review on evaluation metrics for data classification evaluations. International Journal of Data Mining & Knowledge Management Process, 5(2):1,2015.) .
:Description:
We utilize accuracy, error rate, sensitivity, specificity, precision, recall, F-Measure, Geometric-mean,
AUC, optimized precision
(cf. Hossin and Sulaiman. A review on evaluation metrics for data classification evaluations. International Journal of Data Mining & Knowledge Management Process, 5(2):1,2015).
It compares estimated classification (est) and "ground truth" (gt)
:Requires:
:TODO:
:AUTHOR: MDS
:ORGANIZATION: MGH/HMS
:CONTACT: software@markus-schirmer.com
:SINCE: 2018-11-12
:VERSION: 0.1
"""
#=============================================
# Metadata
#=============================================
__author__ = 'mds'
__contact__ = 'software@markus-schirmer.com'
__copyright__ = ''
__license__ = ''
__date__ = '2019-04'
__version__ = '0.1'
#=============================================
# Import statements
#=============================================
import sys
import os
import numpy as np
import sklearn.metrics as skm
import getopt
import csv
import pdb
#=============================================
# Helper functions
#=============================================
TP = np.inf
FP = np.inf
TN = np.inf
FN = np.inf
num_p = np.inf
num_n = np.inf
def help():
print("usage: classification_metrics.py -p <prediction_file> -g <groundtruth_file> -o <outputfile>")
sys.exit()
def get_confusion_matrix(est, gt):
global TP, FP, TN, FN, num_p, num_n
TP = np.float(np.sum(np.logical_and((est==1), (gt==1)).astype(int)))
TN = np.float(np.sum(np.logical_and((est==0), (gt==0)).astype(int)))
FP = np.float(np.sum(np.logical_and((est==1), (gt==0)).astype(int)))
FN = np.float(np.sum(np.logical_and((gt==1), (est==0)).astype(int)))
def get_tpr():
# sensitivity / recall / hit rate/ true positive rate
if (TP+FN) == 0:
return np.nan
return TP/(TP+FN)
def get_tnr():
# specificity / selectivity / true negative rate
if (TN+FP) == 0:
return np.nan
return TN/(TN+FP)
def get_ppv():
# precision / positive predictive value
if (TP+FP) == 0:
return np.nan
return TP/(TP+FP)
def get_npv():
# negative predictive value
if (TN + FN) == 0:
return np.nan
return TN/(TN + FN)
def get_fnr():
# false negative rate
if (FN+TP) == 0 :
return np.nan
return FN/(FN+TP)
def get_fpr():
# false positive rate
if (FP+TN) == 0:
return np.nan
return FP/(FP+TN)
def get_fdr():
if (FP+TP) == 0 :
return np.nan
# false discovery rate
return FP/(FP+TP)
def get_for():
# false omission rate
if (FN+TN)==0:
return np.nan
return FN/(FN+TN)
def get_accuracy():
# accuracy
if (TP+TN+FP+FN) == 0:
return np.nan
return (TP+TN)/(TP+TN+FP+FN)
def get_f1_score():
# harmonic mean of recall and precision
if (get_tpr() == 0) or (get_ppv() == 0):
return np.nan
return 1./((1./get_tpr() + 1./get_ppv())/2.)
def get_geom_mean():
# geometric mean of recall and precision
return np.sqrt(get_tpr() * get_ppv())
def get_mcc():
# matthews correlation coefficient
if np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)) == 0:
return np.nan
return (TP*TN - FP * FN)/np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))
def get_bm():
# informedness / bookmaker informedness
return (get_tpr() + get_tnr() - 1)
def get_markedness():
return (get_ppv() + get_npv() - 1)
def get_plr():
# positive likelihood ratio
if get_fpr()==0:
return np.nan
return get_tpr()/get_fpr()
def get_nlr():
# negative likelihood ratio
if get_tnr() == 0:
return np.nan
return get_fnr()/get_tnr()
def get_dor():
# diagnostic odds ratio
if get_nlr()==0:
return np.nan
return get_plr()/get_nlr()
def get_AUC(est, gt):
fpr, tpr, thresholds = skm.roc_curve(gt, est)
return skm.auc(fpr, tpr)
def get_OP():
sn = get_tpr()
sp = get_tnr()
P = sn* (TP+FN) + sp * (TN+FP)
return P - np.abs(sp-sn)/(sp + sn)
def get_metrics(est, gt):
# set up global variables
get_confusion_matrix(est, gt)
# initialize output
results = []
names = []
names.append('Sensitivity')
results.append(get_tpr())
names.append('Specificity')
results.append(get_tnr())
names.append('Precision')
results.append(get_ppv())
names.append('Negative_predictive_value')
results.append(get_npv())
names.append('False_negative_rate')
results.append(get_fnr())
names.append('False_positive_rate')
results.append(get_fpr())
names.append('False_discovery_rate')
results.append(get_fdr())
names.append('False_omission_rate')
results.append(get_for())
names.append('Accuracy')
results.append(get_accuracy())
names.append('F1_score')
results.append(get_f1_score())
names.append('Geom_mean')
results.append(get_geom_mean())
names.append('Matthews_CC')
results.append(get_mcc())
names.append('Informedness')
results.append(get_bm())
names.append('Markedness')
results.append(get_markedness())
# names.append('Positive_likelihood_ratio')
# results.append(get_plr())
# names.append('Negative_likelihood_ratio')
# results.append(get_nlr())
# names.append('Diagnostic_odds_ratio')
# results.append(get_dor())
names.append('Optimized_precision')
results.append(get_OP())
names.append('AUC')
results.append(get_AUC(est,gt))
return results, names
def evaluate_prediction(est, gt):
# calculate metrics
results, names = get_metrics(est, gt)
return results, names
def read_file(filename):
data = []
with open(filename, 'r') as fid:
reader = csv.reader(fid)
for row in reader:
data.append(np.int(row[0]))
return np.asarray(data)
#=============================================
# Main method
#=============================================
def main(argv):
prediction_file = None
groundtruth_file = None
output_file = None
try:
opts, args = getopt.getopt(argv[1:],"hp:g:o:",["prediction=","groundtruth=","output="])
except getopt.GetoptError:
help()
for opt, arg in opts:
if opt == '-h':
help()
elif opt in ("-p", "--prediction"):
prediction_file = arg
elif opt in ("-g", "--groundtruth"):
groundtruth_file = arg
elif opt in ('-o', '--output'):
output_file = arg
if (prediction_file is None) or (groundtruth_file is None) or (output_file is None):
help()
# read input
est = read_file(prediction_file)
gt = read_file(groundtruth_file)
# calculate metrics
results, names = evaluate_prediction(est, gt)
# save output
with open(output_file, 'w') as fid:
writer = csv.writer(fid)
for ii in range(len(results)):
writer.writerow([names[ii],results[ii]])
if __name__ == "__main__":
main(sys.argv)
| 23.458194
| 184
| 0.660393
|
5b634996f2ec8af1ee6661602098aa61e692ad2b
| 668
|
py
|
Python
|
examples/simple_spider/python_documentation_spider.py
|
AirSpiders/AirSpider
|
a56e4b1c640e19113b2b078c9a8e7f3a02b2f721
|
[
"Apache-2.0"
] | 21
|
2020-03-20T09:01:24.000Z
|
2021-06-30T02:00:56.000Z
|
examples/simple_spider/python_documentation_spider.py
|
LRENZ/AirSpider
|
a56e4b1c640e19113b2b078c9a8e7f3a02b2f721
|
[
"Apache-2.0"
] | null | null | null |
examples/simple_spider/python_documentation_spider.py
|
LRENZ/AirSpider
|
a56e4b1c640e19113b2b078c9a8e7f3a02b2f721
|
[
"Apache-2.0"
] | 6
|
2020-03-30T09:24:22.000Z
|
2020-10-30T16:45:02.000Z
|
import asyncio
import sys
from airspider import Item, TextField, AttrField
class PythonDocumentationItem(Item):
title = TextField(css_select="title")
tutorial_link = AttrField(xpath_select="//a[text()='Tutorial']", attr="href")
async def field_extraction():
url = "https://docs.python.org/3/"
item = await PythonDocumentationItem.get_item(url=url)
print(item.title)
print(item.tutorial_link)
if __name__ == "__main__":
if sys.version_info[:2] == (3, 7):
# Recommended for Python 3.7
asyncio.run(field_extraction())
else:
loop = asyncio.new_event_loop()
loop.run_until_complete(field_extraction())
| 25.692308
| 81
| 0.688623
|
27a1074d9b34002da448303d72bdbd5d894b542c
| 783
|
py
|
Python
|
tests/components/pages/form.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 15
|
2019-12-19T11:57:30.000Z
|
2021-11-15T23:34:41.000Z
|
tests/components/pages/form.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 196
|
2019-09-21T15:10:14.000Z
|
2022-03-31T11:07:48.000Z
|
tests/components/pages/form.py
|
jbampton/dazzler
|
4018f6cbcb55a9f482cb5c5cbf6a06b063c15e21
|
[
"MIT"
] | 7
|
2019-10-30T19:38:15.000Z
|
2021-12-01T04:54:16.000Z
|
"""
Page form of dazzler
Created 2019-07-14
"""
from aiohttp import web
from dazzler.components import core
from dazzler.system import Page, RouteMethod
page = Page(
__name__,
core.Container([
core.Form(
fields=[
{
'label': 'Field 1',
'name': 'field1',
'type': 'text'
},
],
action='/submit-form',
method='post',
identity='form'
)
])
)
@page.route('/submit-form', method=RouteMethod.POST, prefix=False)
async def submit(request: web.Request):
data = await request.post()
return web.Response(
body=f'<div id="output">{data.get("field1")}</div>',
content_type='text/html'
)
| 21.75
| 66
| 0.51341
|
115d286b326f5209df02f17b28f66dabc9439ecf
| 722
|
py
|
Python
|
src/predict.py
|
BiswajeetNayak/BankruptcyPrediction
|
92ae5523210a27eb8ac1869118991d7be648d3aa
|
[
"MIT"
] | null | null | null |
src/predict.py
|
BiswajeetNayak/BankruptcyPrediction
|
92ae5523210a27eb8ac1869118991d7be648d3aa
|
[
"MIT"
] | null | null | null |
src/predict.py
|
BiswajeetNayak/BankruptcyPrediction
|
92ae5523210a27eb8ac1869118991d7be648d3aa
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, IsolationForest
from sklearn.linear_model import LogisticRegression
from models import *
def i_f_predict(model, X):
pred = model.predict(X)
return pred
def clf_predict(model, X):
pred = model.predict(X)
pred_proba = model.predict_proba(X)
return pred, pred_proba
def save_output(pred, proba, df,model_name):
pred_df = pd.DataFrame([pred, proba], columns=['prediction_class', 'prediction_probability'])
pred_df = pd.concat([pred_df, df['class']], axis=1)
pred_df.rename(columns={'class': 'actual_class'}, inplace=True)
pred_df.to_csv(f'{config.output_file_path}predictions_{model_name}.csv')
return None
| 30.083333
| 97
| 0.739612
|
aabeeda2dff1300e9659c6a0083e6ffd105f5e5e
| 5,415
|
py
|
Python
|
electrumx/server/controller.py
|
UbuntuEvangelist/electrumx
|
3577dbc31cb9f661682dd7587a9a4ac400305f38
|
[
"CNRI-Python"
] | null | null | null |
electrumx/server/controller.py
|
UbuntuEvangelist/electrumx
|
3577dbc31cb9f661682dd7587a9a4ac400305f38
|
[
"CNRI-Python"
] | null | null | null |
electrumx/server/controller.py
|
UbuntuEvangelist/electrumx
|
3577dbc31cb9f661682dd7587a9a4ac400305f38
|
[
"CNRI-Python"
] | null | null | null |
# Copyright (c) 2016-2021, Neil Booth
#
# All rights reserved.
#
# This file is licensed under the Open BSV License version 3, see LICENCE for details.
from asyncio import Event
from aiorpcx import _version as aiorpcx_version, TaskGroup
import electrumx
import electrumx.server.block_processor as block_proc
from electrumx.lib.server_base import ServerBase
from electrumx.lib.util import version_string
from electrumx.server.daemon import Daemon
from electrumx.server.db import DB
from electrumx.server.mempool import MemPool, MemPoolAPI
from electrumx.server.session import SessionManager
class Notifications(object):
# hashX notifications come from two sources: new blocks and
# mempool refreshes.
#
# A user with a pending transaction is notified after the block it
# gets in is processed. Block processing can take an extended
# time, and the prefetcher might poll the daemon after the mempool
# code in any case. In such cases the transaction will not be in
# the mempool after the mempool refresh. We want to avoid
# notifying clients twice - for the mempool refresh and when the
# block is done. This object handles that logic by deferring
# notifications appropriately.
def __init__(self):
self._touched_mp = {}
self._touched_bp = {}
self._highest_block = -1
async def _maybe_notify(self):
tmp, tbp = self._touched_mp, self._touched_bp
common = set(tmp).intersection(tbp)
if common:
height = max(common)
elif tmp and max(tmp) == self._highest_block:
height = self._highest_block
else:
# Either we are processing a block and waiting for it to
# come in, or we have not yet had a mempool update for the
# new block height
return
touched = tmp.pop(height)
for old in [h for h in tmp if h <= height]:
del tmp[old]
for old in [h for h in tbp if h <= height]:
touched.update(tbp.pop(old))
await self.notify(height, touched)
async def notify(self, height, touched):
pass
async def start(self, height, notify_func):
self._highest_block = height
self.notify = notify_func
await self.notify(height, set())
async def on_mempool(self, touched, height):
self._touched_mp[height] = touched
await self._maybe_notify()
async def on_block(self, touched, height):
self._touched_bp[height] = touched
self._highest_block = height
await self._maybe_notify()
class Controller(ServerBase):
'''Manages server initialisation and stutdown.
Servers are started once the mempool is synced after the block
processor first catches up with the daemon.
'''
async def serve(self, shutdown_event):
'''Start the RPC server and wait for the mempool to synchronize. Then
start serving external clients.
'''
if not (0, 22) <= aiorpcx_version < (0, 23):
raise RuntimeError('aiorpcX version 0.22.x is required')
env = self.env
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
self.logger.info(f'software version: {electrumx.version}')
self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
self.logger.info(f'event loop policy: {env.loop_policy}')
self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')
notifications = Notifications()
async with Daemon(env.coin, env.daemon_url) as daemon:
db = DB(env)
bp = block_proc.BlockProcessor(env, db, daemon, notifications)
# Set notifications up to implement the MemPoolAPI
def get_db_height():
return db.state.height
notifications.height = daemon.height
notifications.db_height = get_db_height
notifications.cached_height = daemon.cached_height
notifications.mempool_hashes = daemon.mempool_hashes
notifications.raw_transactions = daemon.getrawtransactions
notifications.lookup_utxos = db.lookup_utxos
MemPoolAPI.register(Notifications)
mempool = MemPool(env.coin, notifications)
session_mgr = SessionManager(env, db, bp, daemon, mempool,
shutdown_event)
# Test daemon authentication, and also ensure it has a cached
# height. Do this before entering the task group.
await daemon.height()
caught_up_event = Event()
mempool_event = Event()
async def wait_for_catchup():
await caught_up_event.wait()
await group.spawn(db.populate_header_merkle_cache())
await group.spawn(mempool.keep_synchronized(mempool_event))
async with TaskGroup() as group:
await group.spawn(session_mgr.serve(notifications, mempool_event))
await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
await group.spawn(bp.check_cache_size_loop())
await group.spawn(wait_for_catchup())
async for task in group:
if not task.cancelled():
task.result()
| 38.956835
| 86
| 0.651339
|
085c8959a975de56a879fc10c7d9d8dee1947bff
| 17,289
|
py
|
Python
|
code/huxt_analysis.py
|
University-of-Reading-Space-Science/GeoModelUncertainty
|
f5d26cb9c5888d23c1f96ed085dbda57e568af25
|
[
"MIT"
] | null | null | null |
code/huxt_analysis.py
|
University-of-Reading-Space-Science/GeoModelUncertainty
|
f5d26cb9c5888d23c1f96ed085dbda57e568af25
|
[
"MIT"
] | null | null | null |
code/huxt_analysis.py
|
University-of-Reading-Space-Science/GeoModelUncertainty
|
f5d26cb9c5888d23c1f96ed085dbda57e568af25
|
[
"MIT"
] | 1
|
2021-07-12T11:46:26.000Z
|
2021-07-12T11:46:26.000Z
|
import numpy as np
import astropy.units as u
from astropy.time import Time
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
import moviepy.editor as mpy
import pandas as pd
from moviepy.video.io.bindings import mplfig_to_npimage
import huxt as H
mpl.rc("axes", labelsize=16)
mpl.rc("ytick", labelsize=16)
mpl.rc("xtick", labelsize=16)
mpl.rc("legend", fontsize=16)
@u.quantity_input(time=u.day)
def plot(model, time, save=False, tag=''):
"""
Make a contour plot on polar axis of the solar wind solution at a specific time.
:param model: An instance of the HUXt class with a completed solution.
:param time: Time to look up closet model time to (with an astropy.unit of time).
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return fig: Figure handle.
:return ax: Axes handle.
"""
if (time < model.time_out.min()) | (time > (model.time_out.max())):
print("Error, input time outside span of model times. Defaulting to closest time")
id_t = np.argmin(np.abs(model.time_out - time))
# Get plotting data
lon_arr, dlon, nlon = H.longitude_grid()
lon, rad = np.meshgrid(lon_arr.value, model.r.value)
mymap = mpl.cm.viridis
v_sub = model.v_grid.value[id_t, :, :].copy()
plotvmin = 200
plotvmax = 810
dv = 10
ylab = "Solar Wind Speed (km/s)"
# Insert into full array
if lon_arr.size != model.lon.size:
v = np.zeros((model.nr, nlon)) * np.NaN
if model.lon.size != 1:
for i, lo in enumerate(model.lon):
id_match = np.argwhere(lon_arr == lo)[0][0]
v[:, id_match] = v_sub[:, i]
else:
print('Warning: Trying to contour single radial solution will fail.')
else:
v = v_sub
# Pad out to fill the full 2pi of contouring
pad = lon[:, 0].reshape((lon.shape[0], 1)) + model.twopi
lon = np.concatenate((lon, pad), axis=1)
pad = rad[:, 0].reshape((rad.shape[0], 1))
rad = np.concatenate((rad, pad), axis=1)
pad = v[:, 0].reshape((v.shape[0], 1))
v = np.concatenate((v, pad), axis=1)
mymap.set_over('lightgrey')
mymap.set_under([0, 0, 0])
levels = np.arange(plotvmin, plotvmax + dv, dv)
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw={"projection": "polar"})
cnt = ax.contourf(lon, rad, v, levels=levels, cmap=mymap, extend='both')
# Add on CME boundaries
cme_colors = ['r', 'c', 'm', 'y', 'deeppink', 'darkorange']
for j, cme in enumerate(model.cmes):
cid = np.mod(j, len(cme_colors))
cme_lons = cme.coords[id_t]['lon']
cme_r = cme.coords[id_t]['r'].to(u.solRad)
if np.any(np.isfinite(cme_r)):
# Pad out to close the profile.
cme_lons = np.append(cme_lons, cme_lons[0])
cme_r = np.append(cme_r, cme_r[0])
ax.plot(cme_lons, cme_r, '-', color=cme_colors[cid], linewidth=3)
# Add on observers
for body, style in zip(['EARTH', 'VENUS', 'MERCURY', 'STA', 'STB'], ['co', 'mo', 'ko', 'rs', 'y^']):
obs = model.get_observer(body)
deltalon = 0.0*u.rad
if model.frame == 'sidereal':
earth_pos = model.get_observer('EARTH')
deltalon = earth_pos.lon_hae[id_t] - earth_pos.lon_hae[0]
obslon = H._zerototwopi_(obs.lon[id_t] + deltalon)
ax.plot(obslon, obs.r[id_t], style, markersize=16, label=body)
# Add on a legend.
fig.legend(ncol=5, loc='lower center', frameon=False, handletextpad=0.2, columnspacing=1.0)
ax.set_ylim(0, model.r.value.max())
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.patch.set_facecolor('slategrey')
fig.subplots_adjust(left=0.05, bottom=0.16, right=0.95, top=0.99)
# Add color bar
pos = ax.get_position()
dw = 0.005
dh = 0.045
left = pos.x0 + dw
bottom = pos.y0 - dh
wid = pos.width - 2 * dw
cbaxes = fig.add_axes([left, bottom, wid, 0.03])
cbar1 = fig.colorbar(cnt, cax=cbaxes, orientation='horizontal')
cbar1.set_label(ylab)
cbar1.set_ticks(np.arange(plotvmin, plotvmax, dv*10))
# Add label
label = "Time: {:3.2f} days".format(model.time_out[id_t].to(u.day).value)
fig.text(0.675, pos.y0, label, fontsize=16)
label = "HUXt2D"
fig.text(0.175, pos.y0, label, fontsize=16)
if save:
cr_num = np.int32(model.cr_num.value)
filename = "HUXt_CR{:03d}_{}_frame_{:03d}.png".format(cr_num, tag, id_t)
filepath = os.path.join(model._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax
def animate(model, tag):
"""
Animate the model solution, and save as an MP4.
:param model: An instance of the HUXt class with a completed solution.
:param tag: String to append to the filename of the animation.
"""
# Set the duration of the movie
# Scaled so a 5 day simulation with dt_scale=4 is a 10 second movie.
duration = model.simtime.value * (10 / 432000)
def make_frame(t):
"""
Produce the frame required by MoviePy.VideoClip.
:param t: time through the movie
"""
# Get the time index closest to this fraction of movie duration
i = np.int32((model.nt_out - 1) * t / duration)
fig, ax = plot(model, model.time_out[i])
frame = mplfig_to_npimage(fig)
plt.close('all')
return frame
cr_num = np.int32(model.cr_num.value)
filename = "HUXt_CR{:03d}_{}_movie.mp4".format(cr_num, tag)
filepath = os.path.join(model._figure_dir_, filename)
animation = mpy.VideoClip(make_frame, duration=duration)
animation.write_videofile(filepath, fps=24, codec='libx264')
return
def plot_radial(model, time, lon, save=False, tag=''):
"""
Plot the radial solar wind profile at model time closest to specified time.
:param model: An instance of the HUXt class with a completed solution.
:param time: Time (in seconds) to find the closest model time step to.
:param lon: The model longitude of the selected radial to plot.
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return: fig: Figure handle
:return: ax: Axes handle
"""
if (time < model.time_out.min()) | (time > (model.time_out.max())):
print("Error, input time outside span of model times. Defaulting to closest time")
id_t = np.argmin(np.abs(model.time_out - time))
time = model.time_out[id_t]
if model.lon.size != 1:
if (lon < model.lon.min()) | (lon > (model.lon.max())):
print("Error, input lon outside range of model longitudes. Defaulting to closest longitude")
id_lon = np.argmin(np.abs(model.lon - lon))
lon = model.lon[id_lon]
fig, ax = plt.subplots(figsize=(14, 7))
# Get plotting data
id_t = np.argmin(np.abs(model.time_out - time))
time_out = model.time_out[id_t].to(u.day).value
if model.lon.size == 1:
id_lon = 0
lon_out = model.lon.value
else:
id_lon = np.argmin(np.abs(model.lon - lon))
lon_out = model.lon[id_lon].to(u.deg).value
ylab = 'Solar Wind Speed (km/s)'
ax.plot(model.r, model.v_grid[id_t, :, id_lon], 'k-')
ymin = 200
ymax = 1000
# Plot the CME points on if needed
cme_colors = ['r', 'c', 'm', 'y', 'deeppink', 'darkorange']
for c, cme in enumerate(model.cmes):
cc = np.mod(c, len(cme_colors))
lon_cme = cme.coords[id_t]['lon']
r_cme = cme.coords[id_t]['r'].to(u.solRad)
id_front = cme.coords[id_t]['front_id'] == 1.0
id_back = cme.coords[id_t]['front_id'] == 0.0
r_front = r_cme[id_front]
lon_front = lon_cme[id_front]
r_back = r_cme[id_back]
lon_back = lon_cme[id_back]
id_cme_lon = np.argmin(np.abs(lon_front - lon))
r_front = r_front[id_cme_lon]
id_cme_lon = np.argmin(np.abs(lon_back - lon))
r_back = r_back[id_cme_lon]
id_cme = (model.r >= r_back) & (model.r <= r_front)
label = "CME {:02d}".format(c)
ax.plot(model.r[id_cme], model.v_grid[id_t, id_cme, id_lon], '.', color=cme_colors[cc], label=label)
ax.set_ylim(ymin, ymax)
ax.set_ylabel(ylab)
ax.set_xlim(model.r.value.min(), model.r.value.max())
ax.set_xlabel('Radial distance ($R_{sun}$)')
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95)
# Add label
time_label = " Time: {:3.2f} days".format(time_out)
lon_label = " Lon: {:3.2f}$^\circ$".format(lon_out)
label = "HUXt" + time_label + lon_label
ax.set_title(label, fontsize=20)
if save:
cr_num = np.int32(model.cr_num.value)
lon_tag = "{}deg".format(lon.to(u.deg).value)
filename = "HUXt_CR{:03d}_{}_radial_profile_lon_{}_frame_{:03d}.png".format(cr_num, tag, lon_tag, id_t)
filepath = os.path.join(model._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax
def plot_timeseries(model, radius, lon, save=False, tag=''):
"""
Plot the solar wind model timeseries at model radius and longitude closest to those specified.
:param model: An instance of the HUXt class with a completed solution.
:param radius: Radius to find the closest model radius to.
:param lon: Longitude to find the closest model longitude to.
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return: fig: Figure handle
:return: ax: Axes handle
"""
if (radius < model.r.min()) | (radius > (model.r.max())):
print("Error, specified radius outside of model radial grid")
if model.lon.size != 1:
if (lon < model.lon.min()) | (lon > (model.lon.max())):
print("Error, input lon outside range of model longitudes. Defaulting to closest longitude")
id_lon = np.argmin(np.abs(model.lon - lon))
lon = model.lon[id_lon]
fig, ax = plt.subplots(figsize=(14, 7))
# Get plotting data
id_r = np.argmin(np.abs(model.r - radius))
r_out = model.r[id_r].value
if model.lon.size == 1:
id_lon = 0
lon_out = model.lon.value
else:
id_lon = np.argmin(np.abs(model.lon - lon))
lon_out = model.lon[id_lon].value
t_day = model.time_out.to(u.day)
ax.plot(t_day, model.v_grid[:, id_r, id_lon], 'k-')
ylab = 'Solar Wind Speed (km/s)'
ymin = 200
ymax = 1000
ax.set_ylim(ymin, ymax)
ax.set_ylabel(ylab)
ax.set_xlim(t_day.value.min(), t_day.value.max())
ax.set_xlabel('Time (days)')
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95)
# Add label
radius_label = " Radius: {:3.2f}".format(r_out) + "$R_{sun}$ "
lon_label = " Longitude: {:3.2f}".format(lon_out) + "$^\circ$"
label = "HUXt" + radius_label + lon_label
ax.set_title(label, fontsize=20)
#ax.legend(loc=1)
if save:
cr_num = np.int32(model.cr_num.value)
r_tag = np.int32(r_out)
lon_tag = np.int32(lon_out)
template_string = "HUXt1D_CR{:03d}_{}_time_series_radius_{:03d}_lon_{:03d}.png"
filename = template_string.format(cr_num, tag, r_tag, lon_tag)
filepath = os.path.join(model._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax
def get_earth_timeseries(model):
"""
Compute the solar wind time series at Earth. Returns a pandas dataframe with the
solar wind speed time series at Earth interpolated from the model solution using the
Earth ephemeris. Nearest neighbour interpolation in r, linear interpolation in longitude.
"""
earth_pos = model.get_observer('Earth')
#adjust the HEEQ coordinates if the sidereal frame has been used
if model.frame == 'sidereal':
deltalon = earth_pos.lon_hae - earth_pos.lon_hae[0]
lonheeq = H._zerototwopi_(earth_pos.lon.value + deltalon.value)
elif model.frame == 'synodic':
lonheeq = earth_pos.lon.value
if model.nlon == 1:
print('Single longitude simulated. Extracting time series at Earth r')
time = np.ones((model.nt_out))*np.nan
model_time = np.ones((model.nt_out))*np.nan
lon = np.ones((model.nt_out))*np.nan
rad = np.ones((model.nt_out))*np.nan
speed = np.ones((model.nt_out))*np.nan
for t in range(model.nt_out):
model_time[t] = model.time_out[t].value
time[t] = (model.time_init + model.time_out[t]).jd
#find the nearest R coord
id_r = np.argmin(np.abs(model.r.value - earth_pos.r[t].value))
rad[t] = model.r[id_r].value
lon[t] = lonheeq[t]
#then interpolate the values in longitude
if model.nlon == 1:
speed[t] = model.v_grid[t, id_r, 0].value
else:
speed[t] = np.interp(lonheeq[t], model.lon.value, model.v_grid[t, id_r, :].value, period=2*np.pi)
time = Time(time, format='jd')
#print(time, rad, lon, speed)
earth_time_series = pd.DataFrame(data={'time':time.datetime, 'model_time':model_time, 'r':rad, 'lon':lon, 'vsw':speed})
return earth_time_series
@u.quantity_input(time=u.day)
def plot_3d_meridional(model3d, time, lon=np.NaN*u.deg, save=False, tag=''):
"""
Make a contour plot on polar axis of the solar wind solution at a specific time.
:param model: An instance of the HUXt class with a completed solution.
:param time: Time to look up closet model time to (with an astropy.unit of time).
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return fig: Figure handle.
:return ax: Axes handle.
"""
#get the metadata from one of the individual HUXt elements
model=model3d.HUXtlat[0]
if (time < model.time_out.min()) | (time > (model.time_out.max())):
print("Error, input time outside span of model times. Defaulting to closest time")
id_t = np.argmin(np.abs(model.time_out - time))
time_out = model.time_out[id_t].to(u.day).value
#get the requested longitude
if model.lon.size == 1:
id_lon = 0
lon_out = model.lon.value
else:
id_lon = np.argmin(np.abs(model.lon - lon))
lon_out = model.lon[id_lon].to(u.deg).value
#loop over latitudes and extract the radial profiles
mercut=np.ones((len(model.r),model3d.nlat))
ymax=0.0
for n in range(0,model3d.nlat):
model=model3d.HUXtlat[n]
ymin=200; ymax=810; dv=19;
ylab='Solar Wind Speed (km/s)'
mercut[:,n]=model.v_grid[id_t, :, id_lon]
mymap = mpl.cm.viridis
mymap.set_over('lightgrey')
mymap.set_under([0, 0, 0])
levels = np.arange(ymin, ymax + dv, dv)
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw={"projection": "polar"})
cnt = ax.contourf(model3d.lat.to(u.rad), model.r, mercut, levels=levels, cmap=mymap, extend='both')
ax.set_ylim(0, model.r.value.max())
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.patch.set_facecolor('slategrey')
fig.subplots_adjust(left=0.05, bottom=0.16, right=0.95, top=0.99)
# Add color bar
pos = ax.get_position()
dw = 0.005
dh = 0.045
left = pos.x0 + dw
bottom = pos.y0 - dh
wid = pos.width - 2 * dw
cbaxes = fig.add_axes([left, bottom, wid, 0.03])
cbar1 = fig.colorbar(cnt, cax=cbaxes, orientation='horizontal')
cbar1.set_label(ylab)
cbar1.set_ticks(np.arange(ymin, ymax, dv*20))
# Add label
label = "Time: {:3.2f} days".format(time_out)
fig.text(0.675, pos.y0, label, fontsize=16)
label = "HUXt2D"
fig.text(0.175, pos.y0, label, fontsize=16)
if save:
cr_num = np.int32(model.cr_num.value)
filename = "HUXt_CR{:03d}_{}_frame_{:03d}.png".format(cr_num, tag, id_t)
filepath = os.path.join(model._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax
def animate_3d(model3d, lon=np.NaN*u.deg, tag=''):
"""
Animate the model solution, and save as an MP4.
:param field: String, either 'cme', or 'ambient', specifying which solution to animate.
:param tag: String to append to the filename of the animation.
"""
# Set the duration of the movie
# Scaled so a 5 day simulation with dt_scale=4 is a 10 second movie.
model=model3d.HUXtlat[0]
duration = model.simtime.value * (10 / 432000)
def make_frame_3d(t):
"""
Produce the frame required by MoviePy.VideoClip.
:param t: time through the movie
"""
# Get the time index closest to this fraction of movie duration
i = np.int32((model.nt_out - 1) * t / duration)
fig, ax = plot_3d_meridional(model3d, model.time_out[i], lon)
frame = mplfig_to_npimage(fig)
plt.close('all')
return frame
cr_num = np.int32(model.cr_num.value)
filename = "HUXt_CR{:03d}_{}_movie.mp4".format(cr_num, tag)
filepath = os.path.join(model._figure_dir_, filename)
animation = mpy.VideoClip(make_frame_3d, duration=duration)
animation.write_videofile(filepath, fps=24, codec='libx264')
return
| 37.100858
| 123
| 0.629186
|
ec61118776f4d1f2d21206c749a90f450dc07789
| 1,942
|
py
|
Python
|
masci_tools/io/cif2inp_ase.py
|
soumyajyotih/masci-tools
|
e4d9ea2fbf6e16378d0cbfb8828a11bdb09c2139
|
[
"MIT"
] | 15
|
2018-11-07T10:04:46.000Z
|
2021-11-08T20:51:08.000Z
|
masci_tools/io/cif2inp_ase.py
|
soumyajyotih/masci-tools
|
e4d9ea2fbf6e16378d0cbfb8828a11bdb09c2139
|
[
"MIT"
] | 120
|
2020-02-04T15:37:42.000Z
|
2022-03-17T10:49:40.000Z
|
masci_tools/io/cif2inp_ase.py
|
soumyajyotih/masci-tools
|
e4d9ea2fbf6e16378d0cbfb8828a11bdb09c2139
|
[
"MIT"
] | 11
|
2018-10-18T08:09:07.000Z
|
2022-02-22T15:45:21.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the Masci-tools package. #
# (Material science tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/masci-tools #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# #
###############################################################################
"""
Independent utility script to convert cif file formats to input for the inpgen code
Usage: `python cif2inp_ase.py <filename.cif>`
"""
import sys
import ase.io
import numpy as np
cifFilename = sys.argv[1]
structure = ase.io.read(cifFilename)
structureFormula = structure.get_chemical_formula()
inpFilename = 'inp_' + structureFormula
Binv = np.linalg.inv(structure.cell)
frac_coordinates = structure.arrays['positions'].dot(Binv)
with open(inpFilename, 'w+') as f:
natoms = len(structure.arrays['numbers'])
f.write(structureFormula + '\r\n')
f.write('&input film=F /\r\n')
for i in range(3):
f.write(' '.join(map('{:.12f}'.format, structure.cell[i])) + '\r\n')
f.write('1.8897 !lattice const scaled as(1.0*bohr)\r\n1.0000 1.0000 1.0000 !scaling\r\n\r\n')
f.write(str(natoms) + '\r\n')
for i in range(natoms):
f.write(
str(structure.arrays['numbers'][i]) + ' ' + ' '.join(map('{:.12f}'.format, frac_coordinates[i])) + '\r\n')
f.write('\r\n')
| 44.136364
| 118
| 0.49897
|
e02f3c2f3c34c30db4d9234a53c21efd2bb3f061
| 12,909
|
py
|
Python
|
lib/matplotlib/testing/compare.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2017-04-11T08:55:30.000Z
|
2022-03-25T04:31:26.000Z
|
lib/matplotlib/testing/compare.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2021-05-10T17:57:41.000Z
|
2021-07-26T16:23:09.000Z
|
lib/matplotlib/testing/compare.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | 14
|
2015-10-05T04:15:46.000Z
|
2020-06-11T18:06:02.000Z
|
"""
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import hashlib
import os
import shutil
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
cmd = lambda old, new: \
[gs, '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
cmd = lambda old, new: \
['inkscape', '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(six.iterkeys(converter))
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure(
"Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much
# faster than using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Example
-------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
msg = "Output image %s does not exist." % actual
raise Exception(msg)
if os.stat(actual).st_size == 0:
msg = "Output image file %s is empty." % actual
raise Exception(msg)
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
diff_image = make_test_filename(actual, 'failed-diff')
if rms <= tol:
if os.path.exists(diff_image):
os.unlink(diff_image)
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim == actualImage.ndim
assert expectedImage.shape == actualImage.shape
absDiffImage = abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np, output)
| 34.060686
| 79
| 0.629173
|
c2142dbaff612a5a47a56fdb9c4a5adc22e34cb9
| 9,386
|
py
|
Python
|
ir_to_COCO/cal_ojb_num.py
|
WJ-Lai/CenterNet-CentralNet
|
d28a8c2438244782ccdd6805e555558b2c01ff46
|
[
"MIT"
] | 6
|
2019-12-24T07:13:18.000Z
|
2021-06-12T17:06:36.000Z
|
ir_to_COCO/cal_ojb_num.py
|
WJ-Lai/CenterNet-CentralNet
|
d28a8c2438244782ccdd6805e555558b2c01ff46
|
[
"MIT"
] | null | null | null |
ir_to_COCO/cal_ojb_num.py
|
WJ-Lai/CenterNet-CentralNet
|
d28a8c2438244782ccdd6805e555558b2c01ff46
|
[
"MIT"
] | 1
|
2021-03-15T02:38:44.000Z
|
2021-03-15T02:38:44.000Z
|
import os
import random
import shutil
import sys
import json
import glob
import xml.etree.ElementTree as ET
import re
#from https://www.php.cn/python-tutorials-424348.html
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
# print(path+' ----- folder created')
return True
else:
# print(path+' ----- folder existed')
return False
#foler to make, please enter full path
"""
main code below are from
https://github.com/Tony607/voc2coco
"""
def get(root, name):
vars = root.findall(name)
return vars
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise ValueError("Can not find %s in %s." % (name, root.tag))
if length > 0 and len(vars) != length:
raise ValueError(
"The size of %s is supposed to be %d, but is %d."
% (name, length, len(vars))
)
if length == 1:
vars = vars[0]
return vars
def get_filename(xml_file, voc_images):
filename = xml_file.split('/')[-1]
filename = filename.replace('.xml', '.png')
all_file = os.listdir(voc_images)
if filename in all_file:
image_not_exit = False
else:
image_not_exit = True
return filename, image_not_exit
def get_filename_as_int(filename):
try:
filename = os.path.splitext(os.path.basename(filename))[0]
filename = re.sub("[^0-9]", "", filename)
return int(filename)
except:
raise ValueError("Filename %s is supposed to be an integer." % (filename))
def get_categories(xml_files):
"""Generate category name to id mapping from a list of xml files.
Arguments:
xml_files {list} -- A list of xml file paths.
Returns:
dict -- category name to id mapping.
"""
classes_names = []
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall("object"):
classes_names.append(member[0].text)
classes_names = list(set(classes_names))
classes_names.sort()
return {name: i for i, name in enumerate(classes_names)}
def convert(xml_files, json_file, voc_images):
num_obj = {'bike': 0, 'car': 0, 'car_stop': 0, 'color_cone': 0, 'person': 0}
json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
if PRE_DEFINE_CATEGORIES is not None:
categories = PRE_DEFINE_CATEGORIES
else:
categories = get_categories(xml_files)
bnd_id = START_BOUNDING_BOX_ID
for xml_file in xml_files:
filename, image_not_exit = get_filename(xml_file, voc_images)
if image_not_exit:
continue
try:
tree = ET.parse(xml_file)
except:
continue
root = tree.getroot()
# root = get_and_check(root, sensor, 1)
## The filename must be a number
image_id = get_filename_as_int(filename)
width = image_size[sensor][0]
height = image_size[sensor][1]
image = {
"file_name": filename,
"height": height,
"width": width,
"id": image_id,
}
json_dict["images"].append(image)
## Currently we do not support segmentation.
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(root, "object"):
category = get_and_check(obj, "name", 1).text
# if category=='car_stop':
# category = 'car'
if category not in categories:
# new_id = len(categories)
# categories[category] = new_id
# category = 'color_cone'
continue
num_obj[category] += 1
category_id = categories[category]
bndbox = get_and_check(obj, "bndbox", 1)
xmin = float(get_and_check(bndbox, "xmin", 1).text)
ymin = float(get_and_check(bndbox, "ymin", 1).text)
xmax = float(get_and_check(bndbox, "xmax", 1).text)
ymax = float(get_and_check(bndbox, "ymax", 1).text)
if xmax < xmin or ymax < ymin:
continue
assert xmax > xmin
assert ymax > ymin
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {
"area": o_width * o_height,
"iscrowd": 0,
"image_id": image_id,
"bbox": [xmin, ymin, o_width, o_height],
"category_id": category_id,
"id": bnd_id,
"ignore": 0,
"segmentation": [],
}
json_dict["annotations"].append(ann)
bnd_id = bnd_id + 1
print(num_obj)
for cate, cid in categories.items():
cat = {"supercategory": "none", "id": cid, "name": cate}
json_dict["categories"].append(cat)
os.makedirs(os.path.dirname(json_file), exist_ok=True)
json_fp = open(json_file, "w")
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
def create_dir(dir_name):
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
else:
mkdir(dir_name)
def get_fileID(xml_file):
filename = xml_file.split('/')[-1]
filename = filename.split('.')[0]
return filename
def extract_night(merge_xml_path):
xml_files = os.listdir(merge_xml_path)
night_id = []
for xml_file in xml_files:
try:
tree = ET.parse(os.path.join(merge_xml_path, xml_file))
except:
continue
root = tree.getroot()
time = get_and_check(root, "time", 1).text
if time == 'night':
night_id.append(get_fileID(xml_file))
return night_id
def extract_merged_from_night(merged_ok_path, night_id):
merge_ok_files = os.listdir(merged_ok_path)
merged_night_id = []
for merge_ok_file in merge_ok_files:
fileID = get_fileID(merge_ok_file)
if fileID in night_id:
merged_night_id.append(fileID)
return merged_night_id
def filter_no_images(images_path, merged_night_id):
sensor_types = ['rgb', 'fir', 'mir', 'nir']
merged_night_id = set(merged_night_id)
for sensor in sensor_types:
sensor_path = os.path.join(images_path, sensor)
sensor_imges = os.listdir(os.path.join(images_path, sensor))
for idx, sensor_imge in enumerate(sensor_imges):
sensor_imges[idx] = get_fileID(sensor_imge)
sensor_imges = set(sensor_imges)
merged_night_id = merged_night_id & sensor_imges
return merged_night_id
"""
You only need to set the following three parts
1.val_files_num : num of validation samples from your all samples
2.test_files_num = num of test samples from your all samples
3.voc_annotations : path to your VOC dataset Annotations
"""
# get merged and check file existence
from ir_to_COCO.extract_night_merge_well import get_img_id
merged_night_id = get_img_id()
val_num_rate = 0.2
test_num_rate = 0.2
sensor_type = ['rgb', 'fir', 'mir', 'nir']
# image_size = {'rgb':[640, 480], 'fir':[640, 480], 'mir':[320, 256], 'nir':[320, 256]}
# if ConvertedImages
image_size = {'rgb': [320, 256], 'fir': [320, 256], 'mir': [320, 256], 'nir': [320, 256]}
START_BOUNDING_BOX_ID = 0
PRE_DEFINE_CATEGORIES = None
# If necessary, pre-define category and its id
# PRE_DEFINE_CATEGORIES = {"bike": 0, "car": 1, 'car_stop': 2, "color_cone": 3, "person": 4}
PRE_DEFINE_CATEGORIES = {"bike": 0, "car": 1, "color_cone": 2, "person": 3}
val_files_num = int(len(merged_night_id)*val_num_rate)
test_files_num = int(len(merged_night_id)*test_num_rate)
# voc_annotations = '/home/vincent/Data/ir_det_dataset/Annotations_Converted/' # remember to modify the path
voc_annotations = '/home/vincent/Data/ir_det_dataset/Annotations_ConvertedSummarized/' # remember to modify the path
split = voc_annotations.split('/')
coco_name = split[-3]
del split[-2]
del split[-1]
del split[0]
# print(split)
main_path = ''
for i in split:
main_path += '/' + i
main_path = main_path + '/'
for sensor in sensor_type:
print(sensor)
coco_path = os.path.join(main_path, coco_name + '_COCO/', sensor)
coco_images = os.path.join(main_path, coco_name + '_COCO', sensor, 'images/')
coco_json_annotations = os.path.join(main_path, coco_name + '_COCO', sensor, 'annotations/')
# xml_val = os.path.join(main_path, coco_name + '_COCO/', sensor, 'xml', 'xml_val/')
# xml_test = os.path.join(main_path, coco_name + '_COCO/', sensor, 'xml/', 'xml_test/')
xml_train = os.path.join(main_path, coco_name + '_COCO/', sensor, 'xml/', 'xml_train/')
voc_images = os.path.join(main_path, 'ConvertedImages', sensor)
dir_path = [coco_path, coco_images, coco_json_annotations, xml_train]
for dir_name in dir_path:
create_dir(dir_name)
# voc annotations copy to coco annotations
for i in os.listdir(voc_annotations):
id = get_fileID(i)
if id in merged_night_id:
img_path = os.path.join(voc_annotations, i)
shutil.copy(img_path, xml_train)
# create json
xml_train_files = glob.glob(os.path.join(xml_train, "*.xml"))
convert(xml_train_files, coco_json_annotations + 'train.json', voc_images)
| 31.92517
| 117
| 0.623269
|
bc0f81cc6df2152342b298bb5e3eac46fc31a396
| 13,096
|
py
|
Python
|
quantum/tests/unit/nec/test_pfc_driver.py
|
yamt/neutron
|
f94126739a48993efaf1d1439dcd3dadb0c69742
|
[
"Apache-2.0"
] | null | null | null |
quantum/tests/unit/nec/test_pfc_driver.py
|
yamt/neutron
|
f94126739a48993efaf1d1439dcd3dadb0c69742
|
[
"Apache-2.0"
] | null | null | null |
quantum/tests/unit/nec/test_pfc_driver.py
|
yamt/neutron
|
f94126739a48993efaf1d1439dcd3dadb0c69742
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import random
import string
import mox
from quantum import context
from quantum.openstack.common import uuidutils
from quantum.plugins.nec.common import ofc_client as ofc
from quantum.plugins.nec.db import api as ndb
from quantum.plugins.nec.db import models as nmodels
from quantum.plugins.nec import drivers
from quantum.tests import base
class TestConfig(object):
"""Configuration for this test."""
host = '127.0.0.1'
port = 8888
use_ssl = False
key_file = None
cert_file = None
def _ofc(id):
"""OFC ID converter."""
return "ofc-%s" % id
class PFCDriverTestBase(base.BaseTestCase):
driver = 'quantum.plugins.nec.drivers.pfc.PFCDriverBase'
def setUp(self):
super(PFCDriverTestBase, self).setUp()
self.mox = mox.Mox()
self.driver = drivers.get_driver(self.driver)(TestConfig)
self.mox.StubOutWithMock(ofc.OFCClient, 'do_request')
self.addCleanup(self.mox.UnsetStubs)
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test."""
tenant_id = uuidutils.generate_uuid()
network_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789",
port_no=1234, vlan_id=321,
mac="11:22:33:44:55:66")
return tenant_id, network_id, portinfo
def _generate_ofc_tenant_id(self, tenant_id):
fields = tenant_id.split('-')
# Strip 1st character (UUID version) of 3rd field
fields[2] = fields[2][1:]
return ''.join(fields)
def get_ofc_description(self, desc):
"""OFC description consists of [A-Za-z0-9_]."""
return desc.replace('-', '_').replace(' ', '_')
def _create_tenant(self, t, ofc_t, post_id=False, post_desc=False):
tenant_path = '/tenants/%s' % ofc_t
path = "/tenants"
description = "desc of %s" % t
body = {}
if post_desc:
ofc_description = self.get_ofc_description(description)
body['description'] = ofc_description
if post_id:
body['id'] = ofc_t
ofc.OFCClient.do_request("POST", path, body=body)
else:
ofc.OFCClient.do_request("POST", path, body=body).\
AndReturn({'id': ofc_t})
self.mox.ReplayAll()
ret = self.driver.create_tenant(description, t)
self.mox.VerifyAll()
self.assertEqual(ret, tenant_path)
def testa_create_tenant(self):
t, n, p = self.get_ofc_item_random_params()
ofc_t = self._generate_ofc_tenant_id(t)
self._create_tenant(t, ofc_t, post_id=True)
def testc_delete_tenant(self):
t, n, p = self.get_ofc_item_random_params()
path = "/tenants/%s" % _ofc(t)
ofc.OFCClient.do_request("DELETE", path)
self.mox.ReplayAll()
self.driver.delete_tenant(path)
self.mox.VerifyAll()
def testd_create_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
ofc_description = self.get_ofc_description(description)
tenant_path = "/tenants/%s" % _ofc(t)
post_path = "%s/networks" % tenant_path
body = {'description': ofc_description}
network = {'id': _ofc(n)}
ofc.OFCClient.do_request("POST", post_path, body=body).\
AndReturn(network)
self.mox.ReplayAll()
ret = self.driver.create_network(tenant_path, description, n)
self.mox.VerifyAll()
net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n))
self.assertEqual(ret, net_path)
def testf_delete_network(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n))
ofc.OFCClient.do_request("DELETE", net_path)
self.mox.ReplayAll()
self.driver.delete_network(net_path)
self.mox.VerifyAll()
def testg_create_port(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/tenants/%s/networks/%s" % (_ofc(t), _ofc(n))
post_path = "%s/ports" % net_path
port_path = "/tenants/%s/networks/%s/ports/%s" % (_ofc(t), _ofc(n),
_ofc(p.id))
body = {'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
port = {'id': _ofc(p.id)}
ofc.OFCClient.do_request("POST", post_path, body=body).AndReturn(port)
self.mox.ReplayAll()
ret = self.driver.create_port(net_path, p, p.id)
self.mox.VerifyAll()
self.assertEqual(ret, port_path)
def testh_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
port_path = "/tenants/%s/networks/%s/ports/%s" % (_ofc(t), _ofc(n),
_ofc(p.id))
ofc.OFCClient.do_request("DELETE", port_path)
self.mox.ReplayAll()
self.driver.delete_port(port_path)
self.mox.VerifyAll()
class PFCDriverBaseTest(PFCDriverTestBase):
pass
class PFCV3DriverTest(PFCDriverTestBase):
driver = 'pfc_v3'
def testa_create_tenant(self):
t, n, p = self.get_ofc_item_random_params()
self.mox.ReplayAll()
ret = self.driver.create_tenant('dummy_desc', t)
self.mox.VerifyAll()
ofc_t_path = "/tenants/" + self._generate_ofc_tenant_id(t)
self.assertEqual(ofc_t_path, ret)
def testc_delete_tenant(self):
pass
class PFCV4DriverTest(PFCDriverTestBase):
driver = 'pfc_v4'
class PFCDriverStringTest(base.BaseTestCase):
driver = 'quantum.plugins.nec.drivers.pfc.PFCDriverBase'
def setUp(self):
super(PFCDriverStringTest, self).setUp()
self.driver = drivers.get_driver(self.driver)(TestConfig)
def test_generate_pfc_id_uuid(self):
id_str = uuidutils.generate_uuid()
exp_str = (id_str[:14] + id_str[15:]).replace('-', '')[:31]
ret_str = self.driver._generate_pfc_id(id_str)
self.assertEqual(exp_str, ret_str)
def test_generate_pfc_id_uuid_no_hyphen(self):
# Keystone tenant_id style uuid
id_str = uuidutils.generate_uuid()
id_no_hyphen = id_str.replace('-', '')
exp_str = (id_str[:14] + id_str[15:]).replace('-', '')[:31]
ret_str = self.driver._generate_pfc_id(id_no_hyphen)
self.assertEqual(exp_str, ret_str)
def test_generate_pfc_id_string(self):
id_str = uuidutils.generate_uuid() + 'x'
exp_str = id_str[:31].replace('-', '_')
ret_str = self.driver._generate_pfc_id(id_str)
self.assertEqual(exp_str, ret_str)
def test_generate_pfc_desc(self):
random_list = [random.choice(string.printable) for x in range(128)]
random_str = ''.join(random_list)
accept_letters = string.letters + string.digits
exp_list = [x if x in accept_letters else '_' for x in random_list]
exp_str = ''.join(exp_list)[:127]
ret_str = self.driver._generate_pfc_description(random_str)
self.assertEqual(exp_str, ret_str)
class PFCIdConvertTest(base.BaseTestCase):
driver = 'quantum.plugins.nec.drivers.pfc.PFCDriverBase'
def setUp(self):
super(PFCIdConvertTest, self).setUp()
self.mox = mox.Mox()
self.driver = drivers.get_driver(self.driver)(TestConfig)
self.ctx = self.mox.CreateMock(context.Context)
self.ctx.session = "session"
self.mox.StubOutWithMock(ndb, 'get_ofc_id_lookup_both')
self.addCleanup(self.mox.UnsetStubs)
def generate_random_ids(self, count=1):
if count == 1:
return uuidutils.generate_uuid()
else:
return [uuidutils.generate_uuid() for _ in xrange(count)]
def test_convert_tenant_id(self):
ofc_t_id = self.generate_random_ids(1)
print ofc_t_id
ret = self.driver.convert_ofc_tenant_id(self.ctx, ofc_t_id)
self.assertEqual(ret, '/tenants/%s' % ofc_t_id)
def test_convert_tenant_id_noconv(self):
ofc_t_id = '/tenants/%s' % self.generate_random_ids(1)
ret = self.driver.convert_ofc_tenant_id(self.ctx, ofc_t_id)
self.assertEqual(ret, ofc_t_id)
def test_convert_network_id(self):
t_id, ofc_t_id, ofc_n_id = self.generate_random_ids(3)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_id)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_network_id(self.ctx, ofc_n_id, t_id)
self.assertEqual(ret, ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id}))
self.mox.VerifyAll()
def test_convert_network_id_with_new_tenant_id(self):
t_id, ofc_t_id, ofc_n_id = self.generate_random_ids(3)
ofc_t_path = '/tenants/%s' % ofc_t_id
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_path)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_network_id(self.ctx, ofc_n_id, t_id)
self.assertEqual(ret, ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id}))
self.mox.VerifyAll()
def test_convert_network_id_noconv(self):
t_id = 'dummy'
ofc_t_id, ofc_n_id = self.generate_random_ids(2)
ofc_n_id = ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id})
ret = self.driver.convert_ofc_network_id(self.ctx, ofc_n_id, t_id)
self.assertEqual(ret, ofc_n_id)
def test_convert_port_id(self):
t_id, n_id = self.generate_random_ids(2)
ofc_t_id, ofc_n_id, ofc_p_id = self.generate_random_ids(3)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_network', n_id).AndReturn(ofc_n_id)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_id)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_port_id(self.ctx, ofc_p_id, t_id, n_id)
exp = ('/tenants/%(tenant)s/networks/%(network)s/ports/%(port)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id, 'port': ofc_p_id})
self.assertEqual(ret, exp)
self.mox.VerifyAll()
def test_convert_port_id_with_new_tenant_id(self):
t_id, n_id = self.generate_random_ids(2)
ofc_t_id, ofc_n_id, ofc_p_id = self.generate_random_ids(3)
ofc_t_path = '/tenants/%s' % ofc_t_id
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_network', n_id).AndReturn(ofc_n_id)
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_tenant', t_id).AndReturn(ofc_t_path)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_port_id(self.ctx, ofc_p_id, t_id, n_id)
exp = ('/tenants/%(tenant)s/networks/%(network)s/ports/%(port)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id, 'port': ofc_p_id})
self.assertEqual(ret, exp)
self.mox.VerifyAll()
def test_convert_port_id_with_new_network_id(self):
t_id, n_id = self.generate_random_ids(2)
ofc_t_id, ofc_n_id, ofc_p_id = self.generate_random_ids(3)
ofc_n_path = ('/tenants/%(tenant)s/networks/%(network)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id})
ndb.get_ofc_id_lookup_both(
self.ctx.session, 'ofc_network', n_id).AndReturn(ofc_n_path)
self.mox.ReplayAll()
ret = self.driver.convert_ofc_port_id(self.ctx, ofc_p_id, t_id, n_id)
exp = ('/tenants/%(tenant)s/networks/%(network)s/ports/%(port)s' %
{'tenant': ofc_t_id, 'network': ofc_n_id, 'port': ofc_p_id})
self.assertEqual(ret, exp)
self.mox.VerifyAll()
def test_convert_port_id_noconv(self):
t_id = n_id = 'dummy'
ofc_t_id, ofc_n_id, ofc_p_id = self.generate_random_ids(3)
ofc_p_id = ('/tenants/%(tenant)s/networs/%(network)s/ports/%(port)s'
% {'tenant': ofc_t_id, 'network': ofc_n_id,
'port': ofc_p_id})
ret = self.driver.convert_ofc_port_id(self.ctx, ofc_p_id, t_id, n_id)
self.assertEqual(ret, ofc_p_id)
| 36.890141
| 78
| 0.633476
|
f251922e0a52644957fe91c19aa2e8857ed95b5c
| 92,083
|
py
|
Python
|
powergenome/generators.py
|
deeglaze/PowerGenome
|
669e26c63ff1272a7afcfa34165b592eafa9e142
|
[
"MIT"
] | 59
|
2021-01-09T03:00:11.000Z
|
2022-03-31T02:33:55.000Z
|
powergenome/generators.py
|
deeglaze/PowerGenome
|
669e26c63ff1272a7afcfa34165b592eafa9e142
|
[
"MIT"
] | 77
|
2021-01-07T19:39:34.000Z
|
2022-03-25T01:30:31.000Z
|
powergenome/generators.py
|
deeglaze/PowerGenome
|
669e26c63ff1272a7afcfa34165b592eafa9e142
|
[
"MIT"
] | 18
|
2021-01-31T16:43:17.000Z
|
2022-02-11T10:43:34.000Z
|
import collections
import logging
from numbers import Number
from typing import Dict
import re
import requests
import geopandas as gpd
import numpy as np
import pandas as pd
from pathlib import Path
import pudl
from bs4 import BeautifulSoup
from flatten_dict import flatten
from powergenome.cluster_method import (
cluster_by_owner,
cluster_kmeans,
weighted_ownership_by_unit,
)
from powergenome.eia_opendata import fetch_fuel_prices
from powergenome.external_data import (
make_demand_response_profiles,
demand_response_resource_capacity,
add_resource_max_cap_spur,
)
from powergenome.load_data import (
load_ipm_plant_region_map,
load_ownership_eia860,
load_plants_860,
load_utilities_eia,
)
from powergenome.nrelatb import (
atb_fixed_var_om_existing,
atb_new_generators,
fetch_atb_costs,
fetch_atb_heat_rates,
fetch_atb_offshore_spur_costs,
investment_cost_calculator,
)
from powergenome.params import CLUSTER_BUILDER, DATA_PATHS, IPM_GEOJSON_PATH
from powergenome.price_adjustment import inflation_price_adjustment
from powergenome.resource_clusters import map_eia_technology
from powergenome.util import (
download_save,
map_agg_region_names,
reverse_dict_of_lists,
snake_case_col,
regions_to_keep,
)
from scipy.stats import iqr
from sklearn import cluster, preprocessing
from xlrd import XLRDError
logger = logging.getLogger(__name__)
planned_col_map = {
"Entity ID": "utility_id_eia",
"Entity Name": "utility_name",
"Plant ID": "plant_id_eia",
"Plant Name": "plant_name",
"Sector": "sector_name",
"Plant State": "state",
"Generator ID": "generator_id",
"Unit Code": "unit_code",
"Nameplate Capacity (MW)": "capacity_mw",
"Net Summer Capacity (MW)": "summer_capacity_mw",
"Net Winter Capacity (MW)": "winter_capacity_mw",
"Technology": "technology_description",
"Energy Source Code": "energy_source_code_1",
"Prime Mover Code": "prime_mover_code",
"Planned Operation Month": "planned_operating_month",
"Planned Operation Year": "planned_operating_year",
"Status": "operational_status",
"County": "county",
"Latitude": "latitude",
"Longitude": "longitude",
"Google Map": "google_map",
"Bing Map": "bing_map",
"Balancing Authority Code": "balancing_authority_code",
}
op_status_map = {
"(V) Under construction, more than 50 percent complete": "V",
"(TS) Construction complete, but not yet in commercial operation": "TS",
"(U) Under construction, less than or equal to 50 percent complete": "U",
"(T) Regulatory approvals received. Not under construction": "T",
"(P) Planned for installation, but regulatory approvals not initiated": "P",
"(L) Regulatory approvals pending. Not under construction": "L",
"(OT) Other": "OT",
}
TRANSMISSION_TYPES = ["spur", "offshore_spur", "tx"]
def fill_missing_tech_descriptions(df):
"""
EIA 860 records before 2014 don't have a technology description. If we want to
include any of this data in the historical record (e.g. heat rates or capacity
factors) then they need to be filled in.
Parameters
----------
df : dataframe
A pandas dataframe with columns plant_id_eia, generator_id, and
technology_description.
Returns
-------
dataframe
Same data that came in, but with missing technology_description values filled
in.
"""
start_len = len(df)
df = df.sort_values(by="report_date")
df_list = []
for _, _df in df.groupby(["plant_id_eia", "generator_id"], as_index=False):
_df["technology_description"].fillna(method="bfill", inplace=True)
df_list.append(_df)
results = pd.concat(df_list, ignore_index=True, sort=False)
end_len = len(results)
assert (
start_len == end_len
), "Somehow records were dropped when filling tech_descriptions"
return results
def group_generators_at_plant(df, by=["plant_id_eia"], agg_fn={"capacity_mw": "sum"}):
"""
Group generators at a plant. This is a flexible function that lets a user group
by the desired attributes (e.g. plant id) and perform aggregated operations on each
group.
This function also might be a bit unnecessary given how simple it is.
Parameters
----------
df : dataframe
Pandas dataframe with information on power plants.
by : list, optional
Columns to use for the groupby, by default ["plant_id_eia"]
agg_fn : dict, optional
Aggregation function to pass to groupby, by default {"capacity_mw": "sum"}
Returns
-------
dataframe
The grouped dataframe with aggregation functions applied.
"""
df_grouped = df.groupby(by, as_index=False).agg(agg_fn)
return df_grouped
def startup_fuel(df, settings):
"""Add startup fuel consumption for generators
Parameters
----------
df : DataFrame
All generator clusters. Must have a column "technology". Can include both EIA
and NRELATB technology names.
settings : dictionary
User-defined settings loaded from a YAML file. Keys in "startup_fuel_use"
must match those in "eia_atb_tech_map".
Returns
-------
DataFrame
Modified dataframe with the new column "Start_fuel_MMBTU_per_MW".
"""
df["Start_fuel_MMBTU_per_MW"] = 0
for eia_tech, fuel_use in (settings.get("startup_fuel_use") or {}).items():
if not isinstance(settings["eia_atb_tech_map"][eia_tech], list):
settings["eia_atb_tech_map"][eia_tech] = [
settings["eia_atb_tech_map"][eia_tech]
]
atb_tech = settings["eia_atb_tech_map"][eia_tech]
for tech in atb_tech:
df.loc[df["technology"] == tech, "Start_fuel_MMBTU_per_MW"] = fuel_use
df.loc[
df["technology"].str.contains(tech, case=False),
"Start_fuel_MMBTU_per_MW",
] = fuel_use
return df
def startup_nonfuel_costs(df, settings):
"""Add inflation adjusted startup nonfuel costs per MW for generators
Parameters
----------
df : DataFrame
Must contain a column "technology" with the names of each technology type.
settings : dict
Dictionary based on YAML settings file. Must contain the keys
"startup_costs_type", "startup_vom_costs_mw", "existing_startup_costs_tech_map",
etc.
Returns
-------
DataFrame
Modified df with new column "Start_cost_per_MW"
"""
logger.info("Adding non-fuel startup costs")
target_usd_year = settings["target_usd_year"]
vom_costs = settings["startup_vom_costs_mw"]
vom_usd_year = settings["startup_vom_costs_usd_year"]
logger.info(
f"Changing non-fuel VOM costs from {vom_usd_year} to " f"{target_usd_year}"
)
for key, cost in vom_costs.items():
vom_costs[key] = inflation_price_adjustment(
price=cost, base_year=vom_usd_year, target_year=target_usd_year
)
startup_type = settings["startup_costs_type"]
startup_costs = settings[startup_type]
startup_costs_usd_year = settings["startup_costs_per_cold_start_usd_year"]
logger.info(
f"Changing non-fuel startup costs from {vom_usd_year} to {target_usd_year}"
)
for key, cost in startup_costs.items():
startup_costs[key] = inflation_price_adjustment(
price=cost, base_year=startup_costs_usd_year, target_year=target_usd_year
)
df["Start_cost_per_MW"] = 0
for existing_tech, cost_tech in settings["existing_startup_costs_tech_map"].items():
total_startup_costs = vom_costs[cost_tech] + startup_costs[cost_tech]
df.loc[
df["technology"].str.contains(existing_tech), "Start_cost_per_MW"
] = total_startup_costs
for new_tech, cost_tech in settings["new_build_startup_costs"].items():
total_startup_costs = vom_costs[cost_tech] + startup_costs[cost_tech]
df.loc[
df["technology"].str.contains(new_tech), "Start_cost_per_MW"
] = total_startup_costs
df.loc[:, "Start_cost_per_MW"] = df.loc[:, "Start_cost_per_MW"]
# df.loc[df["technology"].str.contains("Nuclear"), "Start_cost_per_MW"] = "FILL VALUE"
return df
def group_technologies(df, settings):
"""
Group different technologies together based on parameters in the settings file.
An example would be to put a bunch of different technologies under the umbrella
category of "biomass" or "peaker".
Parameters
----------
df : dataframe
Pandas dataframe with
settings : dictionary
User-defined settings loaded from a YAML file. Must have key tech_groups.
Returns
-------
dataframe
Same as incoming dataframe but with grouped technology types
"""
if settings.get("group_technologies"):
df["_technology"] = df["technology_description"]
for tech, group in settings["tech_groups"].items():
df.loc[df["technology_description"].isin(group), "_technology"] = tech
for region, tech_list in (settings.get("regional_no_grouping") or {}).items():
df.loc[
(df["model_region"] == region)
& (df["technology_description"].isin(tech_list)),
"_technology",
] = df.loc[
(df["model_region"] == region)
& (df["technology_description"].isin(tech_list)),
"technology_description",
]
df.loc[:, "technology_description"] = df.loc[:, "_technology"]
df = df.drop(columns=["_technology"])
return df
def label_hydro_region(gens_860, pudl_engine, model_regions_gdf):
"""
Label hydro facilities that don't have a region by default.
Parameters
----------
gens_860 : dataframe
Infomation on all generators from PUDL
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas
model_regions_gdf : dataframe
Geodataframe of the model regions
Returns
-------
dataframe
Plant id and region for any hydro that didn't originally have a region label.
"""
plant_entity = pd.read_sql_table("plants_entity_eia", pudl_engine)
model_hydro = gens_860.loc[
gens_860["technology_description"] == "Conventional Hydroelectric"
].merge(plant_entity[["plant_id_eia", "latitude", "longitude"]], on="plant_id_eia")
no_lat_lon = model_hydro.loc[
(model_hydro["latitude"].isnull()) | (model_hydro["longitude"].isnull()), :
]
if not no_lat_lon.empty:
print(no_lat_lon["summer_capacity_mw"].sum(), " MW without lat/lon")
model_hydro = model_hydro.dropna(subset=["latitude", "longitude"])
# Convert the lon/lat values to geo points. Need to add an initial CRS and then
# change it to align with the IPM regions
model_hydro_gdf = gpd.GeoDataFrame(
model_hydro,
geometry=gpd.points_from_xy(model_hydro.longitude, model_hydro.latitude),
crs="EPSG:4326",
)
if model_hydro_gdf.crs != model_regions_gdf.crs:
model_hydro_gdf = model_hydro_gdf.to_crs(model_regions_gdf.crs)
model_hydro_gdf = gpd.sjoin(model_regions_gdf, model_hydro_gdf)
model_hydro_gdf = model_hydro_gdf.rename(columns={"IPM_Region": "region"})
keep_cols = ["plant_id_eia", "region"]
return model_hydro_gdf.loc[:, keep_cols]
def load_plant_region_map(
gens_860, pudl_engine, settings, model_regions_gdf, table="plant_region_map_epaipm"
):
"""
Load the region that each plant is located in.
Parameters
----------
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas
settings : dictionary
The dictionary of settings with a dictionary of region aggregations
table : str, optional
The SQL table to load, by default "plant_region_map_epaipm"
Returns
-------
dataframe
A dataframe where each plant has an associated "model_region" mapped
from the original region labels.
"""
# Load dataframe of region labels for each EIA plant id
region_map_df = pd.read_sql_table(table, con=pudl_engine)
if settings.get("plant_region_map_fn"):
user_region_map_df = pd.read_csv(
Path(settings["input_folder"]) / settings["plant_region_map_fn"]
)
assert (
"region" in user_region_map_df.columns
), f"The column 'region' must appear in {settings['plant_region_map_fn']}"
assert (
"plant_id_eia" in user_region_map_df.columns
), f"The column 'plant_id_eia' must appear in {settings['plant_region_map_fn']}"
user_region_map_df = user_region_map_df.set_index("plant_id_eia")
region_map_df.loc[
region_map_df["plant_id_eia"].isin(user_region_map_df.index), "region"
] = region_map_df["plant_id_eia"].map(user_region_map_df["region"])
# Label hydro using the IPM shapefile because NEEDS seems to drop some hydro
all_hydro_regions = label_hydro_region(gens_860, pudl_engine, model_regions_gdf)
region_map_df = pd.concat(
[region_map_df, all_hydro_regions], ignore_index=True, sort=False
).drop_duplicates(subset=["plant_id_eia"], keep="first")
# Settings has a dictionary of lists for regional aggregations. Need
# to reverse this to use in a map method.
keep_regions, region_agg_map = regions_to_keep(settings)
# Create a new column "model_region" with labels that we're using for aggregated
# regions
model_region_map_df = region_map_df.loc[
region_map_df.region.isin(keep_regions), :
].drop(columns="id")
model_region_map_df = map_agg_region_names(
df=model_region_map_df,
region_agg_map=region_agg_map,
original_col_name="region",
new_col_name="model_region",
)
# There are some cases of plants with generators assigned to different IPM regions.
# If regions are aggregated there may be some duplicates in the results.
model_region_map_df = model_region_map_df.drop_duplicates(
subset=["plant_id_eia", "model_region"]
)
return model_region_map_df
def label_retirement_year(
df,
settings,
age_col="operating_date",
settings_retirement_table="retirement_ages",
add_additional_retirements=True,
):
"""
Add a retirement year column to the dataframe based on the year each generator
started operating.
Parameters
----------
df : dataframe
Dataframe of generators
settings : dictionary
The dictionary of settings with a dictionary of generator lifetimes
age_col : str, optional
The dataframe column to use when calculating the retirement year, by default
"operating_date"
settings_retirement_table : str, optional
The settings dictionary key for another dictionary of generator retirement
lifetimes, by default "retirement_ages"
add_additional_retirements : bool, optional
Logic to determine if additional retirements from the settings file should
be checked. For example, this isn't necessary when adding proposed generators
because we probably won't be setting an artifically early retirement year.
"""
start_len = len(df)
retirement_ages = settings[settings_retirement_table]
if df.loc[df["technology_description"].isnull(), :].empty is False:
df = fill_missing_tech_descriptions(df)
for tech, life in retirement_ages.items():
try:
df.loc[df.technology_description == tech, "retirement_year"] = (
df.loc[df.technology_description == tech, age_col].dt.year + life
)
except AttributeError:
# This is a bit hacky but for the proposed plants I have an int column
df.loc[df.technology_description == tech, "retirement_year"] = (
df.loc[df.technology_description == tech, age_col] + life
)
try:
df.loc[~df["planned_retirement_date"].isnull(), "retirement_year"] = df.loc[
~df["planned_retirement_date"].isnull(), "planned_retirement_date"
].dt.year
except KeyError:
pass
# Add additonal retirements from settings file
if settings.get("additional_retirements") and add_additional_retirements:
logger.info("Changing retirement dates based on settings file")
model_year = settings["model_year"]
start_ret_cap = df.loc[
df["retirement_year"] <= model_year, settings["capacity_col"]
].sum()
logger.info(f"Starting retirement capacity is {start_ret_cap} MW")
i = 0
ret_cap = 0
for record in settings["additional_retirements"]:
plant_id, gen_id, ret_year = record
# gen ids are strings, not integers
gen_id = str(gen_id)
df.loc[
(df["plant_id_eia"] == plant_id) & (df["generator_id"] == gen_id),
"retirement_year",
] = ret_year
i += 1
ret_cap += df.loc[
(df["plant_id_eia"] == plant_id) & (df["generator_id"] == gen_id),
settings["capacity_col"],
].sum()
end_ret_cap = df.loc[
df["retirement_year"] <= model_year, settings["capacity_col"]
].sum()
logger.info(f"Ending retirement capacity is {end_ret_cap} MW")
if not end_ret_cap > start_ret_cap:
logger.debug(
"Adding retirements from settings didn't change the retiring capacity."
)
if end_ret_cap - start_ret_cap != ret_cap:
logger.debug(
f"Retirement diff is {end_ret_cap - start_ret_cap}, adding retirements "
f"yields {ret_cap} MW"
)
logger.info(
f"The retirement year for {i} plants, totaling {ret_cap} MW, was changed "
"based on settings file parameters"
)
else:
logger.info("No retirement dates changed based on the settings file")
end_len = len(df)
assert start_len == end_len
return df
def label_small_hydro(df, settings, by=["plant_id_eia"]):
"""
Use rules from the settings file to label plants below a certain size as small
hydroelectric rather than conventional hydroelectric.
Parameters
----------
df : dataframe
EIA 860 data on generators
settings : dict
User-defined parameters from a settings file
by : list, optional
What columns to use in the groupby function when summing capacity, by default
["plant_id_eia"]
Returns
-------
dataframe
If the user wants to label small hydro plants, some of the conventional
hydro facilities will have their technology type changed to small hydro.
"""
if not settings.get("small_hydro"):
return df
if "report_date" not in by and "report_date" in df.columns:
# by.append("report_date")
logger.warning("'report_date' is in the df but not used in the groupby")
region_agg_map = reverse_dict_of_lists(settings.get("region_aggregations", {}))
keep_regions = [
x
for x in settings["model_regions"] + list(region_agg_map)
if x in settings["small_hydro_regions"]
]
start_len = len(df)
size_cap = settings["small_hydro_mw"]
cap_col = settings.get("capacity_col")
if not cap_col in df:
cap_col = "capacity_mw"
start_hydro_capacity = df.query(
"technology_description=='Conventional Hydroelectric'"
)[cap_col].sum()
plant_capacity = (
df.loc[
(df["technology_description"] == "Conventional Hydroelectric")
& (df["model_region"].isin(keep_regions))
]
.groupby(by, as_index=False)[cap_col]
.sum()
)
small_hydro_plants = plant_capacity.loc[
plant_capacity[cap_col] <= size_cap, "plant_id_eia"
]
df.loc[
(df["technology_description"] == "Conventional Hydroelectric")
& (df["plant_id_eia"].isin(small_hydro_plants)),
"technology_description",
] = "Small Hydroelectric"
end_len = len(df)
small_hydro_capacity = df.query("technology_description=='Small Hydroelectric'")[
cap_col
].sum()
end_conv_hydro_capacity = df.query(
"technology_description=='Conventional Hydroelectric'"
)[cap_col].sum()
assert start_len == end_len
assert np.allclose(
start_hydro_capacity, small_hydro_capacity + end_conv_hydro_capacity
)
return df
def load_generator_860_data(pudl_engine, data_years=[2017]):
"""
Load EIA 860 generator data from the PUDL database
Parameters
----------
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas
data_years : list, optional
Years of data to load, by default [2017]
Returns
-------
dataframe
All of the generating units from PUDL
"""
sql = """
SELECT * FROM generators_eia860
WHERE operational_status_code NOT IN ('RE', 'OS', 'IP', 'CN')
"""
gens_860 = pd.read_sql_query(
sql=sql, con=pudl_engine, parse_dates=["planned_retirement_date", "report_date"]
)
gens_860 = gens_860.loc[gens_860["report_date"].dt.year.isin(data_years), :]
return gens_860
def supplement_generator_860_data(
gens_860, gens_entity, bga, model_region_map, settings
):
"""
Load data about each generating unit in the model area.
Parameters
----------
gens_860 : dataframe
Information on all generating units for the given data years.
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas
settings : dictionary
The dictionary of settings with a dictionary of region aggregations
pudl_out : pudl.PudlTabl
A PudlTabl object for loading pre-calculated PUDL analysis data
model_region_map : dataframe
A dataframe with columns 'plant_id_eia' and 'model_region' (aggregated regions)
data_years : list, optional
Years of data to include, by default [2017]
Returns
-------
dataframe
Data about each generator and generation unit that will be included in the
model. Columns include:
['plant_id_eia', 'generator_id',
'capacity_mw', 'energy_source_code_1',
'energy_source_code_2', 'minimum_load_mw', 'operational_status_code',
'planned_new_capacity_mw', 'switch_oil_gas', 'technology_description',
'time_cold_shutdown_full_load_code', 'model_region', 'prime_mover_code',
'operating_date', 'boiler_id', 'unit_id_eia', 'unit_id_pudl',
'retirement_year']
"""
initial_capacity = (
gens_860.loc[gens_860["plant_id_eia"].isin(model_region_map["plant_id_eia"])]
.groupby("technology_description")[settings["capacity_col"]]
.sum()
)
# Add pudl unit ids, only include specified data years
# Combine generator data that can change over time with static entity data
# and only keep generators that are in a region of interest
gen_cols = [
# "report_date",
"plant_id_eia",
# "plant_name",
"generator_id",
# "balancing_authority_code",
settings["capacity_col"],
"energy_source_code_1",
"energy_source_code_2",
"minimum_load_mw",
"operational_status_code",
"planned_new_capacity_mw",
"switch_oil_gas",
"technology_description",
"time_cold_shutdown_full_load_code",
"planned_retirement_date",
]
entity_cols = ["plant_id_eia", "generator_id", "prime_mover_code", "operating_date"]
bga_cols = [
"plant_id_eia",
"generator_id",
"boiler_id",
"unit_id_eia",
"unit_id_pudl",
]
# In this merge of the three dataframes we're trying to label each generator with
# the model region it is part of, the prime mover and operating date, and the
# PUDL unit codes (where they exist).
gens_860_model = (
pd.merge(
gens_860[gen_cols],
model_region_map.drop(columns="region"),
on="plant_id_eia",
how="inner",
)
.merge(
gens_entity[entity_cols], on=["plant_id_eia", "generator_id"], how="inner"
)
.merge(bga[bga_cols], on=["plant_id_eia", "generator_id"], how="left")
)
merged_capacity = gens_860_model.groupby("technology_description")[
settings["capacity_col"]
].sum()
if not np.allclose(initial_capacity.sum(), merged_capacity.sum()):
logger.warning(
f"Capacity changed from {initial_capacity} \nto \n{merged_capacity}"
)
return gens_860_model
def create_plant_gen_id(df):
"""Combine the plant id and generator id to form a unique combination
Parameters
----------
df : dataframe
Must contain columns plant_id_eia and generator_id
Returns
-------
dataframe
Same as input but with the additional column plant_gen_id
"""
df["plant_gen_id"] = (
df["plant_id_eia"].astype(str) + "_" + df["generator_id"].astype(str)
)
return df
def remove_canceled_860m(df, canceled_860m):
"""Remove generators that 860m shows as having been canceled
Parameters
----------
df : dataframe
All of the EIA 860 generators
canceled_860m : dataframe
From the 860m Canceled or Postponed sheet
Returns
-------
dataframe
Same as input, but possibly without generators that were proposed
"""
df = create_plant_gen_id(df)
canceled_860m = create_plant_gen_id(canceled_860m)
canceled = df.loc[df["plant_gen_id"].isin(canceled_860m["plant_gen_id"]), :]
not_canceled_df = df.loc[~df["plant_gen_id"].isin(canceled_860m["plant_gen_id"]), :]
not_canceled_df = not_canceled_df.drop(columns="plant_gen_id")
if not canceled.empty:
assert len(df) == len(canceled) + len(not_canceled_df)
return not_canceled_df
def remove_retired_860m(df, retired_860m):
"""Remove generators that 860m shows as having been retired
Parameters
----------
df : dataframe
All of the EIA 860 generators
retired_860m : dataframe
From the 860m Retired sheet
Returns
-------
dataframe
Same as input, but possibly without generators that have retired
"""
df = create_plant_gen_id(df)
retired_860m = create_plant_gen_id(retired_860m)
retired = df.loc[df["plant_gen_id"].isin(retired_860m["plant_gen_id"]), :]
not_retired_df = df.loc[~df["plant_gen_id"].isin(retired_860m["plant_gen_id"]), :]
not_retired_df = not_retired_df.drop(columns="plant_gen_id")
if not retired.empty:
assert len(df) == len(retired) + len(not_retired_df)
return not_retired_df
def remove_future_retirements_860m(df, retired_860m):
"""Remove generators that 860m shows as having been retired
Parameters
----------
df : dataframe
All of the EIA 860 generators
retired_860m : dataframe
From the 860m Retired sheet
Returns
-------
dataframe
Same as input, but possibly without generators that have retired
"""
df = create_plant_gen_id(df)
retired_860m = create_plant_gen_id(retired_860m)
retired = df.loc[df["plant_gen_id"].isin(retired_860m["plant_gen_id"]), :]
not_retired_df = df.loc[~df["plant_gen_id"].isin(retired_860m["plant_gen_id"]), :]
not_retired_df = not_retired_df.drop(columns="plant_gen_id")
if not retired.empty:
assert len(df) == len(retired) + len(not_retired_df)
return not_retired_df
def load_923_gen_fuel_data(pudl_engine, pudl_out, model_region_map, data_years=[2017]):
"""
Load generation and fuel data for each plant. EIA-923 provides these values for
each prime mover/fuel combination at every generator. This data can be used to
calculate the heat rate of generators at a single plant. Generators sharing a prime
mover (e.g. multiple combustion turbines) will end up sharing the same heat rate.
Parameters
----------
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas
pudl_out : pudl.PudlTabl
A PudlTabl object for loading pre-calculated PUDL analysis data
model_region_map : dataframe
A dataframe with columns 'plant_id_eia' and 'model_region' (aggregated regions)
data_years : list, optional
Years of data to include, by default [2017]
Returns
-------
dataframe
Generation, fuel use, and heat rates of prime mover/fuel combos over all data
years. Columns are:
['plant_id_eia', 'fuel_type', 'fuel_type_code_pudl',
'fuel_type_code_aer', 'prime_mover_code', 'fuel_consumed_units',
'fuel_consumed_for_electricity_units', 'fuel_consumed_mmbtu',
'fuel_consumed_for_electricity_mmbtu', 'net_generation_mwh',
'heat_rate_mmbtu_mwh']
"""
# Load 923 generation and fuel data for one or more years.
# Only load plants in the model regions.
sql = """
SELECT * FROM generation_fuel_eia923
"""
gen_fuel_923 = pd.read_sql_query(sql, pudl_engine, parse_dates=["report_date"])
gen_fuel_923 = gen_fuel_923.loc[
(gen_fuel_923["report_date"].dt.year.isin(data_years))
& (gen_fuel_923["plant_id_eia"].isin(model_region_map.plant_id_eia)),
:,
]
return gen_fuel_923
def modify_cc_prime_mover_code(df, gens_860):
"""Change combined cycle prime movers from CA and CT to CC.
The heat rate of combined cycle plants that aren't included in PUDL heat rate by
unit should probably be done with the combustion and steam turbines combined. This
modifies the prime mover code of those two generator types so that they match. It
doesn't touch the CS code, which is for single shaft combined units.
Parameters
----------
df : dataframe
A dataframe with columns prime_mover_code, and plant_id_eia.
gens_860 : dataframe
EIA860 dataframe with technology_description, unit_id_pudl, plant_id_eia
columns.
Returns
-------
dataframe
Modified 923 dataframe where prime mover codes at CC generators that don't have
a PUDL unit id are modified from CA and CT to CC.
"""
cc_without_pudl_id = gens_860.loc[
(gens_860["unit_id_pudl"].isnull())
& (gens_860["technology_description"] == "Natural Gas Fired Combined Cycle"),
"plant_id_eia",
]
df.loc[
(df["plant_id_eia"].isin(cc_without_pudl_id))
& (df["prime_mover_code"].isin(["CA", "CT"])),
"prime_mover_code",
] = "CC"
return df
def group_gen_by_year_fuel_primemover(df):
"""
Group generation and fuel consumption by plant, prime mover, and fuel type. Only
matters where multiple years of data are used, otherwise output should be the same
as input.
Parameters
----------
df : dataframe
Generation and fuel consumption data from EIA 923 for each plant, prime mover,
and fuel type
Returns
-------
dataframe
Sum of generation and fuel consumption data (if multiple years).
"""
# Group the data by plant, fuel type, and prime mover
by = [
"plant_id_eia",
"fuel_type",
"fuel_type_code_pudl",
"fuel_type_code_aer",
"prime_mover_code",
]
annual_gen_fuel_923 = (
(
df.drop(columns=["id", "nuclear_unit_id"])
.groupby(by=by, as_index=False)[
"fuel_consumed_units",
"fuel_consumed_for_electricity_units",
"fuel_consumed_mmbtu",
"fuel_consumed_for_electricity_mmbtu",
"net_generation_mwh",
]
.sum()
)
.reset_index()
.drop(columns="index")
.sort_values(["plant_id_eia", "fuel_type", "prime_mover_code"])
)
return annual_gen_fuel_923
def add_923_heat_rate(df):
"""
Small function to calculate the heat rate of records with fuel consumption and net
generation.
Parameters
----------
df : dataframe
Must contain the columns net_generation_mwh and
fuel_consumed_for_electricity_mmbtu
Returns
-------
dataframe
Same dataframe with new column of heat_rate_mmbtu_mwh
"""
# Calculate the heat rate for each prime mover/fuel combination
df["heat_rate_mmbtu_mwh"] = (
df["fuel_consumed_for_electricity_mmbtu"] / df["net_generation_mwh"]
)
return df
def calculate_weighted_heat_rate(heat_rate_df):
"""
Calculate the weighed heat rate when multiple years of data are used. Net generation
in each year is used as the weights.
Parameters
----------
heat_rate_df : dataframe
Currently the PudlTabl unit_hr method.
Returns
-------
dataframe
Heat rate weighted by annual generation for each plant and PUDL unit
"""
def w_hr(df):
weighted_hr = np.average(
df["heat_rate_mmbtu_mwh"], weights=df["net_generation_mwh"]
)
return weighted_hr
weighted_unit_hr = (
heat_rate_df.groupby(["plant_id_eia", "unit_id_pudl"], as_index=False)
.apply(w_hr)
.reset_index()
)
weighted_unit_hr = weighted_unit_hr.rename(columns={0: "heat_rate_mmbtu_mwh"})
return weighted_unit_hr
def plant_pm_heat_rates(annual_gen_fuel_923):
"""
Calculate the heat rate by plant, prime mover, and fuel type. Values are saved
as a dictionary.
Parameters
----------
annual_gen_fuel_923 : dataframe
Data from the 923 generation and fuel use table. Heat rate for each row should
already be calculated.
Returns
-------
dict
Keys are a tuple of plant id, prime mover, and fuel type. Values are the heat
rate.
"""
by = ["plant_id_eia", "prime_mover_code", "fuel_type"]
annual_gen_fuel_923_groups = annual_gen_fuel_923.groupby(by)
prime_mover_hr_map = {
_: df["heat_rate_mmbtu_mwh"].values[0] for _, df in annual_gen_fuel_923_groups
}
return prime_mover_hr_map
def unit_generator_heat_rates(pudl_out, data_years):
"""
Calculate the heat rate for each PUDL unit and generators that don't have a PUDL
unit id.
Parameters
----------
pudl_out : pudl.PudlTabl
A PudlTabl object for loading pre-calculated PUDL analysis data
data_years : list
Years of data to use
Returns
-------
dataframe, dict
A dataframe of heat rates for each pudl unit (columsn are ['plant_id_eia',
'unit_id_pudl', 'heat_rate_mmbtu_mwh']).
"""
# Load the pre-calculated PUDL unit heat rates for selected years.
# Remove rows without generation or with null values.
unit_hr = pudl_out.hr_by_unit()
unit_hr = unit_hr.loc[
(unit_hr.report_date.dt.year.isin(data_years))
& (unit_hr.net_generation_mwh > 0),
:,
].dropna()
weighted_unit_hr = calculate_weighted_heat_rate(unit_hr)
return weighted_unit_hr
def group_units(df, settings):
"""
Group by units within a region/technology/cluster. Add a unique unit code
(plant plus generator) for any generators that aren't part of a unit.
Returns
-------
dataframe
Grouped generators with the total capacity, minimum load, and average heat
rate for each.
"""
by = ["plant_id_eia", "unit_id_pudl"]
# add a unit code (plant plus generator code) in cases where one doesn't exist
df_copy = df.reset_index()
# All units should have the same heat rate so taking the mean will just keep the
# same value.
grouped_units = df_copy.groupby(by).agg(
{
settings["capacity_col"]: "sum",
"minimum_load_mw": "sum",
"heat_rate_mmbtu_mwh": "mean",
"Fixed_OM_cost_per_MWyr": "mean",
"Var_OM_cost_per_MWh": "mean",
}
)
grouped_units = grouped_units.replace([np.inf, -np.inf], np.nan)
grouped_units = grouped_units.fillna(grouped_units.mean())
return grouped_units
def calc_unit_cluster_values(df, settings, technology=None):
"""
Calculate the total capacity, minimum load, weighted heat rate, and number of
units/generators in a technology cluster.
Parameters
----------
df : dataframe
A dataframe with units/generators of a single technology. One column should be
'cluster', to label units as belonging to a specific cluster grouping.
technology : str, optional
Name of the generating technology, by default None
Returns
-------
dataframe
Aggragate values for generators in a technology cluster
"""
# Define a function to compute the weighted mean.
# The issue here is that the df name needs to be used in the function.
# So this will need to be within a function that takes df as an input
def wm(x):
return np.average(x, weights=df.loc[x.index, settings["capacity_col"]])
if df["heat_rate_mmbtu_mwh"].isnull().values.any():
# mean =
# df["heat_rate_mmbtu_mwh"] = df["heat_rate_mmbtu_mwh"].fillna(
# df["heat_rate_mmbtu_mwh"].median()
# )
start_cap = df[settings["capacity_col"]].sum()
df = df.loc[~df["heat_rate_mmbtu_mwh"].isnull(), :]
end_cap = df[settings["capacity_col"]].sum()
cap_diff = start_cap - end_cap
logger.warning(f"dropped {cap_diff}MW because of null heat rate values")
df_values = df.groupby("cluster").agg(
{
settings["capacity_col"]: "mean",
"minimum_load_mw": "mean",
"heat_rate_mmbtu_mwh": wm,
"Fixed_OM_cost_per_MWyr": wm,
"Var_OM_cost_per_MWh": wm,
}
)
if df_values["heat_rate_mmbtu_mwh"].isnull().values.any():
print(df)
print(df_values)
df_values["heat_rate_mmbtu_mwh_iqr"] = df.groupby("cluster").agg(
{"heat_rate_mmbtu_mwh": iqr}
)
df_values["heat_rate_mmbtu_mwh_std"] = df.groupby("cluster").agg(
{"heat_rate_mmbtu_mwh": "std"}
)
df_values["fixed_o_m_mw_std"] = df.groupby("cluster").agg(
{"Fixed_OM_cost_per_MWyr": "std"}
)
df_values["Min_power"] = (
df_values["minimum_load_mw"] / df_values[settings["capacity_col"]]
)
df_values["num_units"] = df.groupby("cluster")["cluster"].count()
if technology:
df_values["technology"] = technology
return df_values
def add_genx_model_tags(df, settings):
"""
Each generator type needs to have certain tags for use by the GenX model. Each tag
is a column, e.g. THERM for thermal generators. These columns and tag values are
defined in the settings file and applied here. Tags are (usually?) boolean 0/1
values.
Parameters
----------
df : dataframe
Clusters of generators. The index should have a column 'technology', which
is used to map tag values.
settings : dict
User-defined settings loaded from a YAML file.
Returns
-------
dataframe
The original generator cluster results with new columns for each model tag.
"""
ignored = r"_"
technology = df["technology"].str.replace(ignored, "")
# Create a new dataframe with the same index
default = settings.get("default_model_tag", 0)
for tag_col in settings.get("model_tag_names", []):
df[tag_col] = default
try:
for tech, tag_value in settings["model_tag_values"][tag_col].items():
tech = re.sub(ignored, "", tech)
mask = technology.str.contains(fr"^{tech}", case=False)
df.loc[mask, tag_col] = tag_value
except (KeyError, AttributeError) as e:
logger.warning(f"No model tag values found for {tag_col} ({e})")
# Change tags with specific regional values for a technology
flat_regional_tags = flatten(settings.get("regional_tag_values", {}) or {})
for tag_tuple, tag_value in flat_regional_tags.items():
region, tag_col, tech = tag_tuple
tech = re.sub(ignored, "", tech)
mask = technology.str.contains(fr"^{tech}", case=False)
df.loc[(df["region"] == region) & mask, tag_col] = tag_value
return df
def load_ipm_shapefile(settings, path=IPM_GEOJSON_PATH):
"""
Load the shapefile of IPM regions
Parameters
----------
settings : dict
User-defined parameters from a settings YAML file. This is where any region
aggregations would be defined.
Returns
-------
geodataframe
Regions to use in the study with the matching geometry for each.
"""
keep_regions, region_agg_map = regions_to_keep(settings)
ipm_regions = gpd.read_file(IPM_GEOJSON_PATH)
if settings.get("user_region_geodata_fn"):
logger.info("Appending user regions to IPM Regions")
user_regions = gpd.read_file(
Path(settings["input_folder"]) / settings["user_region_geodata_fn"]
)
user_regions = user_regions.to_crs(ipm_regions.crs)
ipm_regions = ipm_regions.append(user_regions)
# ipm_regions = gpd.read_file(IPM_SHAPEFILE_PATH)
model_regions_gdf = ipm_regions.loc[ipm_regions["IPM_Region"].isin(keep_regions)]
model_regions_gdf = map_agg_region_names(
model_regions_gdf, region_agg_map, "IPM_Region", "model_region"
).reset_index(drop=True)
return model_regions_gdf
def download_860m(settings: dict) -> pd.ExcelFile:
"""Load the entire 860m file into memory as an ExcelFile object.
Parameters
----------
settings : dict
User-defined settings loaded from a YAML file. This is where the EIA860m
filename is defined.
Returns
-------
pd.ExcelFile
The ExcelFile object with all sheets from 860m.
"""
try:
fn = settings["eia_860m_fn"]
except KeyError:
# No key in the settings file
logger.info("Trying to determine the most recent EIA860m file...")
fn = find_newest_860m()
# Only the most recent file will not have archive in the url
url = f"https://www.eia.gov/electricity/data/eia860m/xls/{fn}"
archive_url = f"https://www.eia.gov/electricity/data/eia860m/archive/xls/{fn}"
local_file = DATA_PATHS["eia_860m"] / fn
if local_file.exists():
logger.info(f"Reading a local copy of the EIA860m file {fn}")
eia_860m = pd.ExcelFile(local_file)
else:
logger.info(f"Downloading the EIA860m file {fn}")
try:
download_save(url, local_file)
eia_860m = pd.ExcelFile(local_file)
except XLRDError:
logger.warning("A more recent version of EIA-860m is available")
download_save(archive_url, local_file)
eia_860m = pd.ExcelFile(local_file)
# write the file to disk
return eia_860m
def find_newest_860m() -> str:
"""Scrape the EIA 860m page to find the most recently posted file.
Returns
-------
str
Name of most recently posted file
"""
site_url = "https://www.eia.gov/electricity/data/eia860m/"
r = requests.get(site_url)
soup = BeautifulSoup(r.content, "lxml")
table = soup.find("table", attrs={"class": "simpletable"})
href = table.find("a")["href"]
fn = href.split("/")[-1]
return fn
def clean_860m_sheet(
eia_860m: pd.ExcelFile, sheet_name: str, settings: dict
) -> pd.DataFrame:
"""Load a sheet from the 860m ExcelFile object and clean it.
Parameters
----------
eia_860m : ExcelFile
Entire 860m file loaded into memory
sheet_name : str
Name of the sheet to load as a dataframe
settings : dict
User-defined settings loaded from a YAML file.
Returns
-------
pd.DataFrame
One of the sheets from 860m
"""
df = eia_860m.parse(sheet_name=sheet_name, na_values=[" "])
for idx, row in df.iterrows():
if row.iloc[0] == "Entity ID":
sr = idx + 1
break
for idx in list(range(-10, 0)):
if isinstance(df.iloc[idx, 0], str):
sf = -idx
break
df = eia_860m.parse(
sheet_name=sheet_name, skiprows=sr, skipfooter=sf, na_values=[" "]
)
df = df.rename(columns=planned_col_map)
if sheet_name in ["Operating", "Planned"]:
df.loc[:, "operational_status_code"] = df.loc[:, "operational_status"].map(
op_status_map
)
if sheet_name == "Planned":
df = df.loc[
df["operational_status_code"].isin(settings["proposed_status_included"]), :
]
return df
def load_860m(settings: dict) -> Dict[str, pd.DataFrame]:
"""Load the planned, canceled, and retired sheets from an EIA 860m file.
Parameters
----------
settings : dict
User-defined settings loaded from a YAML file. This is where the EIA860m
filename is defined.
Returns
-------
Dict[str, pd.DataFrame]
The 860m dataframes, with the keys 'planned', 'canceled', and 'retired'.
"""
sheet_map = {
"planned": "Planned",
"canceled": "Canceled or Postponed",
"retired": "Retired",
}
fn = settings.get("eia_860m_fn")
if not fn:
fn = find_newest_860m()
fn_name = Path(fn).stem
data_dict = {}
eia_860m_excelfile = None
for name, sheet in sheet_map.items():
pkl_path = DATA_PATHS["eia_860m"] / f"{fn_name}_{name}.pkl"
if pkl_path.exists():
data_dict[name] = pd.read_pickle(pkl_path)
else:
if eia_860m_excelfile is None:
eia_860m_excelfile = download_860m(settings)
data_dict[name] = clean_860m_sheet(eia_860m_excelfile, sheet, settings)
data_dict[name].to_pickle(pkl_path)
return data_dict
def import_proposed_generators(planned, settings, model_regions_gdf):
"""
Load the most recent proposed generating units from EIA860m. Will also add
any planned generators that are included in the settings file.
Parameters
----------
settings : dict
User defined parameters from a settings YAML file
model_regions_gdf : geodataframe
Contains the name and geometry of each region being used in the study
Returns
-------
dataframe
All proposed generators.
"""
# Some plants don't have lat/lon data. Log this now to determine if any action is
# needed, then drop them from the dataframe.
no_lat_lon = planned.loc[
(planned["latitude"].isnull()) | (planned["longitude"].isnull()), :
].copy()
if not no_lat_lon.empty:
no_lat_lon_cap = no_lat_lon[settings["capacity_col"]].sum()
logger.warning(
"Some generators do not have lon/lat data. Check the source "
"file to determine if they should be included in results. "
f"\nThe affected generators account for {no_lat_lon_cap} in balancing "
"authorities: "
f"\n{no_lat_lon['balancing_authority_code'].tolist()}"
)
planned = planned.dropna(subset=["latitude", "longitude"])
# Convert the lon/lat values to geo points. Need to add an initial CRS and then
# change it to align with the IPM regions
print("Creating gdf")
planned_gdf = gpd.GeoDataFrame(
planned.copy(),
geometry=gpd.points_from_xy(planned.longitude.copy(), planned.latitude.copy()),
crs="EPSG:4326",
)
if planned_gdf.crs != model_regions_gdf.crs:
planned_gdf = planned_gdf.to_crs(model_regions_gdf.crs)
planned_gdf = gpd.sjoin(model_regions_gdf.drop(columns="IPM_Region"), planned_gdf)
# Add planned additions from the settings file
additional_planned = settings.get("additional_planned") or []
for record in additional_planned:
plant_id, gen_id, model_region = record
plant_record = planned.loc[
(planned["plant_id_eia"] == plant_id) & (planned["generator_id"] == gen_id),
:,
]
plant_record["model_region"] = model_region
planned_gdf = planned_gdf.append(plant_record, sort=False)
logger.info(
f"{len(additional_planned)} generators were added to the planned list based on settings"
)
planned_gdf.loc[:, "heat_rate_mmbtu_mwh"] = planned_gdf.loc[
:, "technology_description"
].map(settings["proposed_gen_heat_rates"])
# The default EIA heat rate for non-thermal technologies is 9.21
planned_gdf.loc[
planned_gdf["heat_rate_mmbtu_mwh"].isnull(), "heat_rate_mmbtu_mwh"
] = 9.21
planned_gdf.loc[:, "minimum_load_mw"] = (
planned_gdf["technology_description"].map(settings["proposed_min_load"])
* planned_gdf[settings["capacity_col"]]
)
# Assume anything else being built at scale is wind/solar and will have a Min_power
# of 0
planned_gdf.loc[planned_gdf["minimum_load_mw"].isnull(), "minimum_load_mw"] = 0
planned_gdf = planned_gdf.set_index(
["plant_id_eia", "prime_mover_code", "energy_source_code_1"]
)
# Add a retirement year based on the planned start year
label_retirement_year(
df=planned_gdf,
settings=settings,
age_col="planned_operating_year",
add_additional_retirements=False,
)
if settings.get("group_technologies"):
planned_gdf = group_technologies(planned_gdf, settings)
print(planned_gdf["technology_description"].unique().tolist())
keep_cols = [
"model_region",
"technology_description",
"generator_id",
settings["capacity_col"],
"minimum_load_mw",
"operational_status_code",
"heat_rate_mmbtu_mwh",
"retirement_year",
]
return planned_gdf.loc[:, keep_cols]
def gentype_region_capacity_factor(
pudl_engine, plant_region_map, settings, years_filter=None
):
"""
Calculate the average capacity factor for all generators of a type/region. This
uses all years of available data unless otherwise specified. The potential
generation is calculated for every year a plant is in operation using the capacity
type specified in settings (nameplate, summer, or winter) and the number of hours
in each year.
As of this time PUDL only has generation data back to 2011.
Parameters
----------
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas
plant_region_map : dataframe
A dataframe with the region for every plant
settings : dictionary
The dictionary of settings with a dictionary of region aggregations
Returns
-------
DataFrame
A dataframe with the capacity factor of every selected technology
"""
cap_col = settings["capacity_col"]
# Include standby (SB) generators since they are in our capacity totals
sql = """
SELECT
G.report_date,
G.plant_id_eia,
G.generator_id,
SUM(G.capacity_mw) AS capacity_mw,
SUM(G.summer_capacity_mw) as summer_capacity_mw,
SUM(G.winter_capacity_mw) as winter_capacity_mw,
G.technology_description,
G.fuel_type_code_pudl
FROM
generators_eia860 G
WHERE operational_status_code NOT IN ('RE', 'OS', 'IP', 'CN')
GROUP BY
G.report_date,
G.plant_id_eia,
G.technology_description,
G.fuel_type_code_pudl,
G.generator_id
ORDER by G.plant_id_eia, G.report_date
"""
plant_gen_tech_cap = pd.read_sql_query(
sql, pudl_engine, parse_dates=["report_date"]
)
plant_gen_tech_cap = plant_gen_tech_cap.loc[
plant_gen_tech_cap["plant_id_eia"].isin(plant_region_map["plant_id_eia"]), :
]
plant_gen_tech_cap = fill_missing_tech_descriptions(plant_gen_tech_cap)
plant_tech_cap = group_generators_at_plant(
df=plant_gen_tech_cap,
by=["plant_id_eia", "report_date", "technology_description"],
agg_fn={cap_col: "sum"},
)
plant_tech_cap = plant_tech_cap.merge(
plant_region_map, on="plant_id_eia", how="left"
)
label_small_hydro(plant_tech_cap, settings, by=["plant_id_eia", "report_date"])
sql = """
SELECT
strftime('%Y', GF.report_date) AS report_date,
GF.plant_id_eia,
SUM(GF.net_generation_mwh) AS net_generation_mwh,
GF.fuel_type_code_pudl
FROM
generation_fuel_eia923 GF
GROUP BY
strftime('%Y', GF.report_date),
GF.plant_id_eia,
GF.fuel_type_code_pudl
ORDER by GF.plant_id_eia, strftime('%Y', GF.report_date)
"""
generation = pd.read_sql_query(sql, pudl_engine, parse_dates={"report_date": "%Y"})
capacity_factor = pudl.helpers.merge_on_date_year(
plant_tech_cap, generation, on=["plant_id_eia"], how="left"
)
if settings.get("group_technologies"):
capacity_factor = group_technologies(capacity_factor, settings)
if years_filter is None:
years_filter = {
tech: settings["capacity_factor_default_year_filter"]
for tech in plant_gen_tech_cap["technology_description"].unique()
}
if type(settings["alt_year_filters"]) is dict:
for tech, value in settings["alt_year_filters"].items():
years_filter[tech] = value
data_years = plant_gen_tech_cap["report_date"].dt.year.unique()
# Use all years where the value is None
for tech, value in years_filter.items():
if value is None:
years_filter[tech] = data_years
df_list = []
for tech, years in years_filter.items():
_df = capacity_factor.loc[
(capacity_factor["technology_description"] == tech)
& (capacity_factor["report_date"].dt.year.isin(years)),
:,
]
df_list.append(_df)
capacity_factor = pd.concat(df_list, sort=False)
# get a unique set of dates to generate the number of hours
dates = capacity_factor["report_date"].drop_duplicates()
dates_to_hours = pd.DataFrame(
data={
"report_date": dates,
"hours": dates.apply(
lambda d: (
pd.date_range(d, periods=2, freq="YS")[1]
- pd.date_range(d, periods=2, freq="YS")[0]
)
/ pd.Timedelta(hours=1)
),
}
)
# merge in the hours for the calculation
capacity_factor = capacity_factor.merge(dates_to_hours, on=["report_date"])
capacity_factor["potential_generation_mwh"] = (
capacity_factor[cap_col] * capacity_factor["hours"]
)
capacity_factor_tech_region = capacity_factor.groupby(
["model_region", "technology_description"], as_index=False
)[["potential_generation_mwh", "net_generation_mwh"]].sum()
# actually calculate capacity factor wooo!
capacity_factor_tech_region["capacity_factor"] = (
capacity_factor_tech_region["net_generation_mwh"]
/ capacity_factor_tech_region["potential_generation_mwh"]
)
capacity_factor_tech_region.rename(
columns={"model_region": "region", "technology_description": "technology"},
inplace=True,
)
logger.debug(capacity_factor_tech_region)
return capacity_factor_tech_region
def add_fuel_labels(df, fuel_prices, settings):
"""Add a Fuel column with the approproriate regional fuel for each generator type
Parameters
----------
df : DataFrame
Generator clusters dataframe with all existing and proposed technologies
fuel_prices : DataFrame
Prices of fuels from EIA AEO scenarios in each census region. Columns include
['year', 'price', 'fuel', 'region', 'scenario', 'full_fuel_name']
settings : dictionary
The dictionary of settings with fuel price variables
Returns
-------
DataFrame
Same as input, but with a new column "Fuel" that is either the name of the
corresponding fuel (coal, natural_gas, uranium, or distillate) or "None".
"""
df["Fuel"] = "None"
for eia_tech, fuel in (settings.get("tech_fuel_map") or {}).items():
try:
if eia_tech == "Natural Gas Steam Turbine":
# No ATB natural gas steam turbine and I match it with coal for O&M
# which would screw this up and list natural gas as a fuel for ATB
# coal plants
atb_tech = None
else:
if not isinstance(settings["eia_atb_tech_map"][eia_tech], list):
settings["eia_atb_tech_map"][eia_tech] = [
settings["eia_atb_tech_map"][eia_tech]
]
atb_tech = [
tech.split("_")[0]
for tech in settings["eia_atb_tech_map"][eia_tech]
]
except KeyError:
# No corresponding ATB technology
atb_tech = None
scenario = settings["aeo_fuel_scenarios"][fuel]
model_year = settings["model_year"]
for aeo_region, model_regions in settings["aeo_fuel_region_map"].items():
fuel_name = ("_").join([aeo_region, scenario, fuel])
assert (
fuel_prices.query(
"year==@model_year & full_fuel_name==@fuel_name"
).empty
is False
), f"{fuel_name} doesn't show up in {model_year}"
df.loc[
(df["technology"] == eia_tech) & df["region"].isin(model_regions),
"Fuel",
] = fuel_name
if atb_tech is not None:
for tech in atb_tech:
df.loc[
(df["technology"].str.contains(tech, case=False))
& df["region"].isin(model_regions),
"Fuel",
] = fuel_name
for ccs_tech, ccs_fuel in (settings.get("ccs_fuel_map") or {}).items():
scenario = settings["aeo_fuel_scenarios"][ccs_fuel.split("_")[0]]
for aeo_region, model_regions in settings["aeo_fuel_region_map"].items():
ccs_fuel_name = ("_").join([aeo_region, scenario, ccs_fuel])
df.loc[
(df["technology"].str.contains(ccs_tech))
& df["region"].isin(model_regions),
"Fuel",
] = ccs_fuel_name
return df
def calculate_transmission_inv_cost(resource_df, settings, offshore_spur_costs=None):
"""Calculate the transmission investment cost for each new resource.
Parameters
----------
resource_df : DataFrame
Each row represents a single resource within a region. Should have columns
`region` and `<type>_miles`, where transmission <type> is one of
'spur', 'offshore_spure', or 'tx'.
settings : dict
A dictionary of user-supplied settings. Must have key
`transmission_investment_cost` with the format:
- <type>
- `capex_mw_mile` (float)
- `wacc` (float)
- `investment_years` (int)
- ...
offshore_spur_costs : DataFrame
Offshore spur costs per mile in the format
`technology` ('OffShoreWind'), `tech_detail`, `cost_case`, and `capex_mw_mile`.
Only used if `settings.transmission_investment_cost.capex_mw_mile` is missing.
Returns
-------
DataFrame
Modified copy of the input dataframe with new columns '<type>_capex' and
'<type>_inv_mwyr' for each column `<type>_miles`.
Raises
------
KeyError
Settings missing transmission types present in resources.
KeyError
Settings missing required keys.
KeyError
Setting capex_mw_mile missing regions present in resources.
TypeError
Setting capex_mw_mile is neither a dictionary nor a numeric value.
"""
SETTING = "transmission_investment_cost"
KEYS = ["wacc", "investment_years", "capex_mw_mile"]
ttypes = settings.get(SETTING, {})
# Check coverage of transmission types in resources
resource_ttypes = [x for x in TRANSMISSION_TYPES if f"{x}_miles" in resource_df]
missing_ttypes = list(set(resource_ttypes) - set(ttypes))
if missing_ttypes:
raise KeyError(f"{SETTING} missing transmission line types {missing_ttypes}")
# Apply calculation for each transmission type
regions = resource_df["region"].unique()
use_offshore_spur_costs = False
for ttype, params in ttypes.items():
if ttype not in resource_ttypes:
continue
if (
ttype == "offshore_spur"
and offshore_spur_costs is not None
and not params.get("capex_mw_mile")
):
use_offshore_spur_costs = True
# Build technology: capex_mw_mile map
params = params.copy()
params["capex_mw_mile"] = (
offshore_spur_costs.assign(
technology=offshore_spur_costs[
["technology", "tech_detail", "cost_case"]
]
.astype(str)
.agg("_".join, axis=1)
)
.set_index("technology")["capex_mw_mile"]
.to_dict()
)
# Check presence of required keys
missing_keys = list(set(KEYS) - set(params))
if missing_keys:
raise KeyError(f"{SETTING}.{ttype} missing required keys {missing_keys}")
if isinstance(params["capex_mw_mile"], dict):
if use_offshore_spur_costs:
capex_mw_mile = resource_df["technology"].map(params["capex_mw_mile"])
else:
# Check coverage of regions in resources
missing_regions = list(set(regions) - set(params["capex_mw_mile"]))
if missing_regions:
raise KeyError(
f"{SETTING}.{ttype}.capex_mw_mile missing regions {missing_regions}"
)
capex_mw_mile = resource_df["region"].map(params["capex_mw_mile"])
elif isinstance(params["capex_mw_mile"], Number):
capex_mw_mile = params["capex_mw_mile"]
else:
raise TypeError(
f"{SETTING}.{ttype}.capex_mw_mile should be numeric or a dictionary"
f" of <region>: <capex>, not {params['capex_mw_mile']}"
)
resource_df[f"{ttype}_capex"] = (
capex_mw_mile.fillna(0) * resource_df[f"{ttype}_miles"]
)
resource_df[f"{ttype}_inv_mwyr"] = investment_cost_calculator(
resource_df[f"{ttype}_capex"], params["wacc"], params["investment_years"]
)
return resource_df
def add_transmission_inv_cost(
resource_df: pd.DataFrame, settings: dict
) -> pd.DataFrame:
"""Add tranmission investment costs to plant investment costs
Parameters
----------
resource_df
Each row represents a single resource within a region. Should have columns
`Inv_cost_per_MWyr` and transmission costs.
- one or more `<type>_inv_mwyr`,
where <type> is 'spur', 'offshore_spur', or 'tx'.
- `interconnect_annuity`
settings
User settings. If `transmission_investment_cost.use_total` is present and true,
`interconnect_annuity` is used over `<type>_inv_mwys` if present, not null,
and not zero.
Returns
-------
DataFrame
A modified copy of the input dataframe where 'Inv_cost_per_MWyr' represents the
combined plant and transmission investment costs. The new column
`plant_inv_cost_mwyr` represents just the plant investment costs.
"""
use_total = (
settings.get("transmission_investment_cost", {}).get("use_total", False)
and "interconnect_annuity" in resource_df
)
resource_df["plant_inv_cost_mwyr"] = resource_df["Inv_cost_per_MWyr"]
columns = [
c for c in [f"{t}_inv_mwyr" for t in TRANSMISSION_TYPES] if c in resource_df
]
cost = resource_df[columns].sum(axis=1)
if use_total:
total = resource_df["interconnect_annuity"]
has_total = ~total.isna() & total != 0
cost[has_total] = total[has_total]
if cost.isna().any() or (cost == 0).any():
logger.warning(
"Transmission investment costs are missing or zero for some resources"
" and will not be included in the total investment costs."
)
resource_df["Inv_cost_per_MWyr"] += cost
return resource_df
def save_weighted_hr(weighted_unit_hr, pudl_engine):
pass
class GeneratorClusters:
"""
This class is used to determine genererating units that will likely be operating
in a given year, clusters them according to parameters for the settings file,
and determines the average operating characteristics of each cluster. Structuring
this as a class isn't strictly necessary but makes it easier to access generator
data part-way through the process.
"""
def __init__(
self,
pudl_engine,
pudl_out,
settings,
current_gens=True,
sort_gens=False,
plant_region_map_table="plant_region_map_epaipm",
settings_agg_key="region_aggregations",
):
"""
Parameters
----------
pudl_engine : sqlalchemy.Engine
A sqlalchemy connection for use by pandas
pudl_out : pudl.PudlTabl
A PudlTabl object for loading pre-calculated PUDL analysis data
settings : dictionary
The dictionary of settings with a dictionary of region aggregations
"""
self.pudl_engine = pudl_engine
self.pudl_out = pudl_out
self.settings = settings
self.current_gens = current_gens
self.sort_gens = sort_gens
self.model_regions_gdf = load_ipm_shapefile(self.settings)
self.weighted_unit_hr = None
if self.current_gens:
self.data_years = self.settings["data_years"]
self.gens_860 = load_generator_860_data(self.pudl_engine, self.data_years)
self.gens_entity = pd.read_sql_table(
"generators_entity_eia", self.pudl_engine
)
bga = self.pudl_out.bga()
self.bga = bga.loc[
bga.report_date.dt.year.isin(self.data_years), :
].drop_duplicates(["plant_id_eia", "generator_id"])
logger.info("Loading map of plants to IPM regions")
self.plant_region_map = load_plant_region_map(
self.gens_860,
self.pudl_engine,
self.settings,
self.model_regions_gdf,
table=plant_region_map_table,
)
self.gen_923 = load_923_gen_fuel_data(
self.pudl_engine,
self.pudl_out,
model_region_map=self.plant_region_map,
data_years=self.data_years,
)
self.eia_860m = load_860m(self.settings)
self.planned_860m = self.eia_860m["planned"]
self.canceled_860m = self.eia_860m["canceled"]
self.retired_860m = self.eia_860m["retired"]
# self.ownership = load_ownership_eia860(self.pudl_engine, self.data_years)
self.plants_860 = load_plants_860(self.pudl_engine, self.data_years)
# self.utilities_eia = load_utilities_eia(self.pudl_engine)
else:
self.existing_resources = pd.DataFrame()
self.fuel_prices = fetch_fuel_prices(self.settings)
self.atb_hr = fetch_atb_heat_rates(self.pudl_engine, self.settings)
self.coal_fgd = pd.read_csv(DATA_PATHS["coal_fgd"])
def fill_na_heat_rates(self, s):
"""Fill null heat rate values with the median of the series. Not many null
values are expected.
Parameters
----------
df : DataFrame
Must contain the column 'heat_rate_mmbtu_mwh'
Returns
-------
Dataframe
Same as input but with any null values replaced by the median.
"""
if s.isnull().any():
median_hr = s.median()
return s.fillna(median_hr)
else:
return s
# median_hr = df["heat_rate_mmbtu_mwh"].median()
# df["heat_rate_mmbtu_mwh"].fillna(median_hr, inplace=True)
# return df
def create_demand_response_gen_rows(self):
"""Create rows for demand response/management resources to include in the
generators file.
Returns
-------
DataFrame
One row for each region/DSM resource with values in all columns filled.
"""
year = self.settings["model_year"]
df_list = []
self.demand_response_profiles = {}
if not self.settings.get("demand_response_resources"):
logger.warning(
"A demand response file is included in extra inputs but the parameter "
"`demand_response_resources` is not in the settings file. No demand "
"response resources will be included with the generators."
)
return pd.DataFrame()
for resource, parameters in self.settings["demand_response_resources"][
year
].items():
_df = pd.DataFrame(
index=self.settings["model_regions"],
columns=list(self.settings["generator_columns"]) + ["profile"],
)
_df = _df.drop(columns="Resource")
_df["technology"] = resource
_df["region"] = self.settings["model_regions"]
dr_path = (
Path.cwd()
/ self.settings["input_folder"]
/ self.settings["demand_response_fn"]
)
dr_profile = make_demand_response_profiles(dr_path, resource, self.settings)
self.demand_response_profiles[resource] = dr_profile
# Add hourly profile to demand response rows
dr_cf = dr_profile / dr_profile.max()
dr_regions = dr_cf.columns
_df = _df.loc[dr_regions, :]
_df["profile"] = list(dr_cf.values.T)
dr_capacity = demand_response_resource_capacity(
dr_profile, resource, self.settings
)
# This is to solve a bug with only one region. Need to come back and solve
# in a better fashion.
if len(dr_capacity) > 1:
dr_capacity_scenario = dr_capacity.squeeze()
else:
dr_capacity_scenario = dr_capacity
_df["Existing_Cap_MW"] = _df["region"].map(dr_capacity_scenario)
if not parameters.get("parameter_values"):
logger.warning(
"No model parameter values are provided in the settings file for "
f"the demand response resource '{resource}'. If another DR resource"
" has values under "
"`demand_response_resource.<year>.<DR_type>.parameter_values`, "
f"those columns will have a value of 0 for '{resource}'."
)
for col, value in parameters.get("parameter_values", {}).items():
_df[col] = value
df_list.append(_df)
dr_rows = pd.concat(df_list)
dr_rows["New_Build"] = -1
dr_rows["Fuel"] = "None"
dr_rows["cluster"] = 1
dr_rows = dr_rows.fillna(0)
return dr_rows
def create_region_technology_clusters(self, return_retirement_capacity=False):
"""
Calculation of average unit characteristics within a technology cluster
(capacity, minimum load, heat rate) and the number of units in the cluster.
Parameters
----------
plant_region_map_table : str, optional
Name of the table with region names for each plant, by default
"plant_region_map_epaipm"
settings_agg_key : str, optional
Name of the settings dictionary key with regional aggregations, by default
"region_aggregations"
return_retirement_capacity : bool, optional
If retired generators should be retured as a second dataframe, by default
False
Returns
-------
dataframe
"""
self.gens_860_model = (
self.gens_860.pipe(
supplement_generator_860_data,
self.gens_entity,
self.bga,
self.plant_region_map,
self.settings,
)
.pipe(remove_canceled_860m, self.canceled_860m)
.pipe(remove_retired_860m, self.retired_860m)
.pipe(label_retirement_year, self.settings, add_additional_retirements=True)
.pipe(label_small_hydro, self.settings, by=["plant_id_eia"])
.pipe(group_technologies, self.settings)
)
self.gens_860_model = self.gens_860_model.pipe(
modify_cc_prime_mover_code, self.gens_860_model
)
self.gens_860_model.drop_duplicates(inplace=True)
self.annual_gen_hr_923 = (
self.gen_923.pipe(modify_cc_prime_mover_code, self.gens_860_model)
.pipe(group_gen_by_year_fuel_primemover)
.pipe(add_923_heat_rate)
)
# Add heat rates to the data we already have from 860
logger.info("Loading heat rate data for units and generator/fuel combinations")
self.prime_mover_hr_map = plant_pm_heat_rates(self.annual_gen_hr_923)
if self.weighted_unit_hr is None:
self.weighted_unit_hr = unit_generator_heat_rates(
self.pudl_out, self.data_years
)
else:
logger.info("Using unit heat rates from previous round.")
# Merge the PUDL calculated heat rate data and set the index for easy
# mapping using plant/prime mover heat rates from 923
hr_cols = ["plant_id_eia", "unit_id_pudl", "heat_rate_mmbtu_mwh"]
idx = ["plant_id_eia", "prime_mover_code", "energy_source_code_1"]
self.units_model = self.gens_860_model.merge(
self.weighted_unit_hr[hr_cols],
on=["plant_id_eia", "unit_id_pudl"],
how="left",
).set_index(idx)
logger.info(
f"Units model technologies are "
f"{self.units_model.technology_description.unique().tolist()}"
)
# print(units_model.head())
logger.info(
"Assigning technology/fuel heat rates where unit heat rates are not "
"available"
)
self.units_model.loc[
self.units_model.heat_rate_mmbtu_mwh.isnull(), "heat_rate_mmbtu_mwh"
] = self.units_model.loc[
self.units_model.heat_rate_mmbtu_mwh.isnull()
].index.map(
self.prime_mover_hr_map
)
self.units_model.loc[
self.units_model.heat_rate_mmbtu_mwh > 35, "heat_rate_mmbtu_mwh"
] = self.units_model.loc[self.units_model.heat_rate_mmbtu_mwh > 35].index.map(
self.prime_mover_hr_map
)
# Set negative heat rates to nan
self.units_model.loc[
(self.units_model.heat_rate_mmbtu_mwh < 0)
| (self.units_model.heat_rate_mmbtu_mwh > 35),
"heat_rate_mmbtu_mwh",
] = np.nan
# Fill any null heat rate values for each tech
for tech in self.units_model["technology_description"]:
self.units_model.loc[
self.units_model.technology_description == tech, "heat_rate_mmbtu_mwh"
] = self.fill_na_heat_rates(
self.units_model.loc[
self.units_model.technology_description == tech,
"heat_rate_mmbtu_mwh",
]
)
# assert (
# self.units_model["heat_rate_mmbtu_mwh"].isnull().any() is False
# ), "There are still some null heat rate values"
logger.info(
f"Units model technologies are "
f"{self.units_model.technology_description.unique().tolist()}"
)
logger.info(
f"Before adding proposed generators, {len(self.units_model)} units with "
f"{self.units_model[self.settings['capacity_col']].sum()} MW capacity"
)
proposed_gens = import_proposed_generators(
planned=self.planned_860m,
settings=self.settings,
model_regions_gdf=self.model_regions_gdf,
)
logger.info(
f"Proposed gen technologies are "
f"{proposed_gens.technology_description.unique().tolist()}"
)
logger.info(f"{proposed_gens[self.settings['capacity_col']].sum()} MW proposed")
self.units_model = pd.concat([proposed_gens, self.units_model], sort=False)
# Create a pudl unit id based on plant and generator id where one doesn't exist.
# This is used later to match the cluster numbers to plants
self.units_model.reset_index(inplace=True)
self.units_model.loc[self.units_model.unit_id_pudl.isnull(), "unit_id_pudl"] = (
self.units_model.loc[
self.units_model.unit_id_pudl.isnull(), "plant_id_eia"
].astype(str)
+ "_"
+ self.units_model.loc[
self.units_model.unit_id_pudl.isnull(), "generator_id"
].astype(str)
).values
self.units_model.set_index(idx, inplace=True)
logger.info("Calculating plant O&M costs")
techs = self.settings["num_clusters"].keys()
self.units_model = (
self.units_model.rename(columns={"technology_description": "technology"})
.query("technology.isin(@techs).values")
.pipe(
atb_fixed_var_om_existing,
self.atb_hr,
self.settings,
self.pudl_engine,
self.coal_fgd,
)
)
# logger.info(
# f"After adding proposed, units model technologies are "
# f"{self.units_model.technology_description.unique().tolist()}"
# )
logger.info(
f"After adding proposed generators, {len(self.units_model)} units with "
f"{self.units_model[self.settings['capacity_col']].sum()} MW capacity"
)
techs = list(self.settings["num_clusters"])
num_clusters = {}
for region in self.settings["model_regions"]:
num_clusters[region] = self.settings["num_clusters"].copy()
if self.settings.get("alt_num_clusters"):
for region in self.settings["alt_num_clusters"]:
for tech, cluster_size in self.settings["alt_num_clusters"][
region
].items():
num_clusters[region][tech] = cluster_size
region_tech_grouped = self.units_model.loc[
(self.units_model.technology.isin(techs))
& (self.units_model.retirement_year > self.settings["model_year"]),
:,
].groupby(["model_region", "technology"])
self.retired = self.units_model.loc[
self.units_model.retirement_year <= self.settings["model_year"], :
]
# gens_860 lost the ownership code... refactor this!
# self.all_gens_860 = load_generator_860_data(self.pudl_engine, self.data_years)
# Getting weighted ownership for each unit, which will be used below.
# self.weighted_ownership = weighted_ownership_by_unit(
# self.units_model, self.all_gens_860, self.ownership, self.settings
# )
# For each group, cluster and calculate the average size/min load/heat rate
# logger.info("Creating technology clusters by region")
logger.info("Creating technology clusters by region")
unit_list = []
self.cluster_list = []
alt_cluster_method = self.settings.get("alt_cluster_method") or {}
for _, df in region_tech_grouped:
region, tech = _
grouped = group_units(df, self.settings)
# This is bad. Should be setting up a dictionary of objects that picks the
# correct clustering method. Can't keep doing if statements as the number of
# methods grows. CHANGE LATER.
if not alt_cluster_method:
if num_clusters[region][tech] > 0:
cluster_cols = [
"Fixed_OM_cost_per_MWyr",
# "Var_OM_cost_per_MWh",
# "minimum_load_mw",
"heat_rate_mmbtu_mwh",
]
clusters = cluster.KMeans(
n_clusters=num_clusters[region][tech], random_state=6
).fit(
preprocessing.StandardScaler().fit_transform(
grouped[cluster_cols]
)
)
grouped["cluster"] = (
clusters.labels_ + 1
) # Change to 1-index for julia
else:
if (
region in alt_cluster_method
and tech in alt_cluster_method[region]["technology_description"]
):
grouped = cluster_by_owner(
df,
self.weighted_ownership,
# self.ownership,
self.plants_860,
region,
tech,
self.settings,
)
elif num_clusters[region][tech] > 0:
clusters = cluster.KMeans(
n_clusters=num_clusters[region][tech], random_state=6
).fit(preprocessing.StandardScaler().fit_transform(grouped))
grouped["cluster"] = (
clusters.labels_ + 1
) # Change to 1-index for julia
# Saving individual unit data for later analysis (if needed)
unit_list.append(grouped)
# Don't add technologies with specified 0 clusters
if num_clusters[region][tech] != 0:
_df = calc_unit_cluster_values(grouped, self.settings, tech)
_df["region"] = region
_df["plant_id_eia"] = (
grouped.reset_index().groupby("cluster")["plant_id_eia"].apply(list)
)
_df["unit_id_pudl"] = (
grouped.reset_index().groupby("cluster")["unit_id_pudl"].apply(list)
)
self.cluster_list.append(_df)
# Save some data about individual units for easy access
self.all_units = pd.concat(unit_list, sort=False)
self.all_units = pd.merge(
self.units_model.reset_index(),
self.all_units,
on=["plant_id_eia", "unit_id_pudl"],
how="left",
).merge(
self.plants_860[["plant_id_eia", "utility_id_eia"]],
on=["plant_id_eia"],
how="left",
)
logger.info("Finalizing generation clusters")
self.results = pd.concat(self.cluster_list)
logger.info(
f"Results technologies are {self.results.technology.unique().tolist()}"
)
# if self.settings.get("region_wind_pv_cap_fn"):
# from powergenome.external_data import overwrite_wind_pv_capacity
# logger.info("Setting existing wind/pv using external file")
# self.results = overwrite_wind_pv_capacity(self.results, self.settings)
self.results = self.results.reset_index().set_index(
["region", "technology", "cluster"]
)
self.results.rename(
columns={
self.settings["capacity_col"]: "Cap_size",
"heat_rate_mmbtu_mwh": "Heat_rate_MMBTU_per_MWh",
},
inplace=True,
)
# Calculate average capacity factors
if type(self.settings["capacity_factor_techs"]) is list:
self.capacity_factors = gentype_region_capacity_factor(
self.pudl_engine, self.plant_region_map, self.settings
)
self.results = pd.merge(
self.results.reset_index(),
self.capacity_factors[["region", "technology", "capacity_factor"]],
on=["region", "technology"],
how="left",
)
if self.settings.get("derate_capacity"):
derate_techs = self.settings["derate_techs"]
self.results.loc[:, "unmodified_cap_size"] = self.results.loc[
:, "Cap_size"
].copy()
self.results.loc[
self.results["technology"].isin(derate_techs), "Cap_size"
] = (
self.results.loc[
self.results["technology"].isin(derate_techs),
"unmodified_cap_size",
]
* self.results.loc[
self.results["technology"].isin(derate_techs), "capacity_factor"
]
)
# Round Cap_size to prevent GenX error.
self.results = self.results.round(3)
self.results["Cap_size"] = self.results["Cap_size"]
self.results["Existing_Cap_MW"] = self.results.Cap_size * self.results.num_units
self.results["unmodified_existing_cap_mw"] = (
self.results["unmodified_cap_size"] * self.results["num_units"]
)
if self.settings.get("region_wind_pv_cap_fn"):
from powergenome.external_data import overwrite_wind_pv_capacity
logger.info("Setting existing wind/pv using external file")
self.results = overwrite_wind_pv_capacity(self.results, self.settings)
# Add fixed/variable O&M based on NREL atb
self.results = (
self.results.reset_index()
# .pipe(
# atb_fixed_var_om_existing, self.atb_costs, self.atb_hr, self.settings
# )
# .pipe(atb_new_generators, self.atb_costs, self.atb_hr, self.settings)
.pipe(startup_fuel, self.settings)
.pipe(add_fuel_labels, self.fuel_prices, self.settings)
.pipe(startup_nonfuel_costs, self.settings)
.pipe(add_genx_model_tags, self.settings)
)
if self.sort_gens:
logger.info("Sorting new resources alphabetically.")
self.results = self.results.sort_values(["region", "technology"])
# self.results = self.results.rename(columns={"technology": "Resource"})
self.results["Resource"] = snake_case_col(self.results["technology"])
# Add variable resource profiles
self.results["profile"] = None
self.results = self.results.reset_index(drop=True)
for i, row in enumerate(self.results.itertuples()):
params = map_eia_technology(row.technology)
if not params:
# EIA technology not supported
continue
params.update({"existing": True})
groups = CLUSTER_BUILDER.find_groups(**params)
if not groups:
# No matching resource groups
continue
if len(groups) > 1:
# Multiple matching resource groups
raise ValueError(
f"Multiple existing resource groups match EIA technology"
+ row.technology
)
group = groups[0]
if group.profiles is None:
# Resource group has no profiles
continue
if row.region in self.settings.get("region_aggregations", {}):
ipm_regions = self.settings.get("region_aggregations", {})[row.region]
else:
ipm_regions = [row.region]
metadata = group.metadata.read()
if not metadata["ipm_region"].isin(ipm_regions).any():
# Resource group has no resources in selected IPM regions
continue
clusters = group.get_clusters(
ipm_regions=ipm_regions,
max_clusters=1,
utc_offset=self.settings.get("utc_offset", 0),
)
self.results["profile"][i] = clusters["profile"][0]
return self.results
def create_new_generators(self):
self.offshore_spur_costs = fetch_atb_offshore_spur_costs(
self.pudl_engine, self.settings
)
self.atb_costs = fetch_atb_costs(
self.pudl_engine, self.settings, self.offshore_spur_costs
)
self.new_generators = atb_new_generators(
self.atb_costs, self.atb_hr, self.settings
)
self.new_generators = (
self.new_generators.pipe(startup_fuel, self.settings)
.pipe(add_fuel_labels, self.fuel_prices, self.settings)
.pipe(startup_nonfuel_costs, self.settings)
.pipe(add_genx_model_tags, self.settings)
)
if self.sort_gens:
logger.info("Sorting new resources alphabetically.")
self.new_generators = self.new_generators.sort_values(
["region", "technology"]
)
if self.settings.get("capacity_limit_spur_fn"):
self.new_generators = self.new_generators.pipe(
add_resource_max_cap_spur, self.settings
)
else:
logger.warning("No settings parameter for max capacity/spur file")
self.new_generators = self.new_generators.pipe(
calculate_transmission_inv_cost, self.settings, self.offshore_spur_costs
).pipe(add_transmission_inv_cost, self.settings)
if self.settings.get("demand_response_fn"):
dr_rows = self.create_demand_response_gen_rows()
self.new_generators = pd.concat([self.new_generators, dr_rows], sort=False)
self.new_generators["Resource"] = snake_case_col(
self.new_generators["technology"]
)
return self.new_generators
def create_all_generators(self):
if self.current_gens:
self.existing_resources = self.create_region_technology_clusters()
self.new_resources = self.create_new_generators()
self.all_resources = pd.concat(
[self.existing_resources, self.new_resources], ignore_index=True, sort=False
)
self.all_resources = self.all_resources.round(3)
self.all_resources["Cap_size"] = self.all_resources["Cap_size"]
self.all_resources["Heat_rate_MMBTU_per_MWh"] = self.all_resources[
"Heat_rate_MMBTU_per_MWh"
]
self.all_resources = self.all_resources.reset_index(drop=True)
self.all_resources["variable_CF"] = 0.0
for i, p in enumerate(self.all_resources["profile"]):
if isinstance(p, (collections.Sequence, np.ndarray)):
self.all_resources.loc[i, "variable_CF"] = np.mean(p)
# Set Min_power of wind/solar to 0
self.all_resources.loc[self.all_resources["DISP"] == 1, "Min_power"] = 0
self.all_resources["R_ID"] = np.arange(len(self.all_resources)) + 1
if self.current_gens:
logger.info(
f"Capacity of {self.all_resources['Existing_Cap_MW'].sum()} MW in final clusters"
)
return self.all_resources
| 35.146183
| 97
| 0.627684
|
69bf4f90c112cfdcb70a009f749c3a71f9f17f0e
| 15,853
|
py
|
Python
|
src/etos_lib/lib/events.py
|
fredjn/etos-library
|
f0fe414abae178f36314dfc3ea25353b7d8780bb
|
[
"Apache-2.0"
] | null | null | null |
src/etos_lib/lib/events.py
|
fredjn/etos-library
|
f0fe414abae178f36314dfc3ea25353b7d8780bb
|
[
"Apache-2.0"
] | 3
|
2020-09-28T12:02:39.000Z
|
2022-01-20T08:39:52.000Z
|
src/etos_lib/lib/events.py
|
fredjn/etos-library
|
f0fe414abae178f36314dfc3ea25353b7d8780bb
|
[
"Apache-2.0"
] | 3
|
2020-09-25T11:16:28.000Z
|
2020-12-02T10:16:07.000Z
|
# Copyright 2020-2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETOS Library event helper module."""
from eiffellib.events import EiffelActivityTriggeredEvent
from eiffellib.events import EiffelActivityStartedEvent
from eiffellib.events import EiffelActivityFinishedEvent
from eiffellib.events import EiffelActivityCanceledEvent
from eiffellib.events import EiffelAnnouncementPublishedEvent
from eiffellib.events import EiffelConfidenceLevelModifiedEvent
from eiffellib.events import EiffelEnvironmentDefinedEvent
from eiffellib.events import EiffelTestSuiteStartedEvent
from eiffellib.events import EiffelTestSuiteFinishedEvent
from eiffellib.events import EiffelTestExecutionRecipeCollectionCreatedEvent
from eiffellib.events import EiffelTestCaseTriggeredEvent
from eiffellib.events import EiffelTestCaseStartedEvent
from eiffellib.events import EiffelTestCaseFinishedEvent
from eiffellib.events import EiffelArtifactCreatedEvent
from eiffellib.events import EiffelArtifactPublishedEvent
from eiffellib.events import EiffelCompositionDefinedEvent
from .debug import Debug
class Events:
"""Helper class for sending eiffel events."""
def __init__(self, publisher):
"""Initialize event helper."""
self.publisher = publisher
self.debug = Debug()
def __del__(self):
"""Delete reference to eiffel publisher."""
self.publisher = None
def send(self, event, links, data):
"""Build an event and send it with an eiffel publisher.
:param event: Initialized event to send.
:type event: :obj:`eiffel.events.base_event.BaseEvent`
:param links: Dictionary of links to add to event.
:type links: dict
:param data: Dictionary of data to add to event.
:type data: dict
:return: The event that was created with data and links added.
:rtype: :obj:`eiffel.events.base_event.BaseEvent`
"""
for key, value in links.items():
if isinstance(value, list):
for link in value:
event.links.add(key.upper(), link)
else:
event.links.add(key.upper(), value)
for key, value in data.items():
event.data.add(key, value)
event.validate()
self.debug.events_published.append(event)
event.tag = self.debug.routing_key_tag
if not self.debug.disable_sending_events:
self.publisher.send_event(event)
return event
def send_activity_triggered(self, name, links=None, **optional):
"""Send activity triggered event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelActivityTriggeredEvent.md
:param name: Name of the activity
:type name: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
:return: The event that was created with data and links added.
:rtype: :obj:`eiffel.events.EiffelActivityTriggeredEvent`
"""
links = links if links is not None else {}
data = {"name": name}
data.update(**optional)
return self.send(EiffelActivityTriggeredEvent(), links, data)
def send_activity_canceled(self, triggered, links=None, **optional):
"""Send activity canceled event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelActivityCanceledEvent.md
:param triggered: Event ID of activity triggered event which is canceled.
:type triggered: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"ACTIVITY_EXECUTION": triggered})
data = optional
return self.send(EiffelActivityCanceledEvent(), links, data)
def send_activity_started(self, triggered, links=None, **optional):
"""Send activity started event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelActivityStartedEvent.md
:param triggered: Event ID of activity triggered event which is started.
:type triggered: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"ACTIVITY_EXECUTION": triggered})
data = optional
return self.send(EiffelActivityStartedEvent(), links, data)
def send_activity_finished(self, triggered, outcome, links=None, **optional):
"""Send activity finished event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelActivityFinishedEvent.md
:param triggered: Event ID of activity triggered event which is finished.
:type triggered: str
:param outcome: Outcome of the activity.
:type outcome: dict
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"ACTIVITY_EXECUTION": triggered})
data = {"outcome": outcome}
data.update(**optional)
return self.send(EiffelActivityFinishedEvent(), links, data)
def send_environment_defined(self, name, links=None, **optional):
"""Send environment defined event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelEnvironmentDefinedEvent.md
:param name: Name of environment.
:type name: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
if (
optional.get("image") is None
and optional.get("host") is None
and optional.get("uri") is None
):
raise Exception("At least one of 'host', 'image' or 'uri' must be provided")
links = links if links is not None else {}
data = {"name": name}
data.update(**optional)
return self.send(EiffelEnvironmentDefinedEvent(), links, data)
def send_test_suite_started(self, name, links=None, **optional):
"""Publish a test suite started event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelTestSuiteStartedEvent.md
:param name: Name of testsuite.
:type name: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
data = {"name": name}
data.update(**optional)
return self.send(EiffelTestSuiteStartedEvent(), links, data)
def send_test_suite_finished(self, test_suite, links=None, **optional):
"""Publish a test suite finished event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelTestSuiteFinishedEvent.md
:param test_suite: A reference to the started test suite.
:type test_suite: :obj:`eiffel.events.base_event.BaseEvent`
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"TEST_SUITE_EXECUTION": test_suite})
data = optional
return self.send(EiffelTestSuiteFinishedEvent(), links, data)
def send_announcement_published(
self, heading, body, severity, links=None, **optional
):
"""Publish an announcement event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelAnnouncementPublishedEvent.md
:param heading: Heading for the announcement.
:type heading: str
:param body: Body for the announcement.
:type body: str
:param severity: Severity of the incident.
:type severity: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
data = {"heading": heading, "body": body, "severity": severity}
data.update(**optional)
return self.send(EiffelAnnouncementPublishedEvent(), links, data)
def send_test_execution_recipe_collection_created(
self, selection_strategy, links=None, **optional
):
"""Publish a TERCC event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelTestExecutionRecipeCollectionCreatedEvent.md
:param selection_strategy: Selection strategy used by tercc
:type selection_strategy: dict
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
if optional.get("batches") is None and optional.get("batchesUri") is None:
raise Exception(
"At least one of 'batches' or 'batchesUri' must be provided"
)
links = links if links is not None else {}
data = {"selectionStrategy": selection_strategy}
data.update(**optional)
return self.send(EiffelTestExecutionRecipeCollectionCreatedEvent(), links, data)
def send_confidence_level_modified(self, name, value, links=None, **optional):
"""Publish a confidence level event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelConfidenceLevelModifiedEvent.md
:param name: Name of confidence level.
:type name: str
:param value: Value of confidence.
:type value: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
data = {"name": name, "value": value}
data.update(**optional)
return self.send(EiffelConfidenceLevelModifiedEvent(), links, data)
def send_test_case_triggered(self, test_case, iut, links=None, **optional):
"""Publish a confidence level event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelTestCaseTriggeredEvent.md
:param test_case: TestCase that has been triggered.
:type test_case: dict
:param iut: Item under test.
:type iut: :obj:`eiffel.events.base_event.BaseEvent`
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"IUT": iut})
data = {"testCase": test_case}
data.update(**optional)
return self.send(EiffelTestCaseTriggeredEvent(), links, data)
def send_test_case_started(self, test_case, links=None, **optional):
"""Publish a confidence level event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelTestCaseStartedEvent.md
:param test_case: Item under test.
:type test_case: :obj:`eiffel.events.base_event.BaseEvent`
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"TEST_CASE_EXECUTION": test_case})
data = optional
return self.send(EiffelTestCaseStartedEvent(), links, data)
def send_test_case_finished(self, test_case, outcome, links=None, **optional):
"""Publish a confidence level event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelTestCaseFinishedEvent.md
:param test_case: Item under test.
:type test_case: :obj:`eiffel.events.base_event.BaseEvent`
:param outcome: Outcome of the test case.
:type outcome: dict
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"TEST_CASE_EXECUTION": test_case})
data = {"outcome": outcome}
data.update(**optional)
return self.send(EiffelTestCaseFinishedEvent(), links, data)
def send_artifact_created_event(self, identity, links=None, **optional):
"""Publish an artifact created event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelArtifactCreatedEvent.md
:param identity: PURL identity specification
:type identity: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
data = {"identity": identity}
data.update(**optional)
return self.send(EiffelArtifactCreatedEvent(), links, data)
def send_artifact_published_event(
self, locations, artifact, links=None, **optional
):
"""Publish an artifact created event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelArtifactPublishedEvent.md
:param locations: Locations for this artifact.
:type locations: list
:param artifact: Artifact created link.
:type artifact: :obj:`eiffel.events.base_event.BaseEvent`
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
links.update({"ARTIFACT": artifact})
data = {"locations": locations}
data.update(**optional)
return self.send(EiffelArtifactPublishedEvent(), links, data)
def send_composition_defined_event(self, name, links=None, **optional):
"""Publish a composition defined event.
https://github.com/eiffel-community/eiffel/blob/master/eiffel-vocabulary/EiffelCompositionDefinedEvent.md
:param name: Name of composition
:type name: str
:param links: Optional links to add to event.
:type links: dict
:param optional: Dictionary of optional data to add.
:type optional: dict
"""
links = links if links is not None else {}
data = {"name": name}
data.update(**optional)
return self.send(EiffelCompositionDefinedEvent(), links, data)
| 41.5
| 131
| 0.668202
|
ed475f8222ae28b790324c16f0c9863194da2b5f
| 1,690
|
py
|
Python
|
salt/grains/extra.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | null | null | null |
salt/grains/extra.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 1
|
2017-07-10T21:44:39.000Z
|
2017-07-10T21:44:39.000Z
|
salt/grains/extra.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 1
|
2021-08-19T13:30:13.000Z
|
2021-08-19T13:30:13.000Z
|
import logging
import os
import salt.utils.data
import salt.utils.files
import salt.utils.platform
import salt.utils.yaml
__proxyenabled__ = ["*"]
log = logging.getLogger(__name__)
def shell():
"""
Return the default shell to use on this system
"""
# Provides:
# shell
if salt.utils.platform.is_windows():
env_var = "COMSPEC"
default = r"C:\Windows\system32\cmd.exe"
else:
env_var = "SHELL"
default = "/bin/sh"
return {"shell": os.environ.get(env_var, default)}
def config():
"""
Return the grains set in the grains file
"""
if "conf_file" not in __opts__:
return {}
if os.path.isdir(__opts__["conf_file"]):
if salt.utils.platform.is_proxy():
gfn = os.path.join(
__opts__["conf_file"], "proxy.d", __opts__["id"], "grains"
)
else:
gfn = os.path.join(__opts__["conf_file"], "grains")
else:
if salt.utils.platform.is_proxy():
gfn = os.path.join(
os.path.dirname(__opts__["conf_file"]),
"proxy.d",
__opts__["id"],
"grains",
)
else:
gfn = os.path.join(os.path.dirname(__opts__["conf_file"]), "grains")
if os.path.isfile(gfn):
log.debug("Loading static grains from %s", gfn)
with salt.utils.files.fopen(gfn, "rb") as fp_:
try:
return salt.utils.data.decode(salt.utils.yaml.safe_load(fp_))
except Exception: # pylint: disable=broad-except
log.warning("Bad syntax in grains file! Skipping.")
return {}
return {}
| 27.704918
| 80
| 0.556213
|
7756cd889b741435a27128beb664f52b38def04c
| 13,463
|
py
|
Python
|
RI/TP2/indexer.py
|
lengors/ua-repository
|
4a2ff60af8b190783e1992fe8edb40fc1147224a
|
[
"MIT"
] | null | null | null |
RI/TP2/indexer.py
|
lengors/ua-repository
|
4a2ff60af8b190783e1992fe8edb40fc1147224a
|
[
"MIT"
] | null | null | null |
RI/TP2/indexer.py
|
lengors/ua-repository
|
4a2ff60af8b190783e1992fe8edb40fc1147224a
|
[
"MIT"
] | null | null | null |
from corpus_reader import CorpusReader
from tokenization import Tokenizer
import collections, math, psutil
import os, shutil, gc
class Indexer:
class __InnerIndexer:
def __init__(self, tokenizer : Tokenizer, index_folder : str, max_memory_usage : float = 20):
self.terms = {}
self.segments = []
self.documents = set()
self.__disposed = False
self.tokenizer = tokenizer
self.index_folder = index_folder
self.max_memory_usage = max_memory_usage
self.process = psutil.Process(os.getpid())
self._write_function = self._write_unranked
self._parse_function = self._parse_unranked
self._merge_function = self._merge_unranked
self.temp_index_folder = os.path.join(index_folder, 'blocks')
self.segm_index_folder = os.path.join(index_folder, 'segments')
if not os.path.isdir(index_folder):
os.mkdir(index_folder)
if not os.path.isdir(self.temp_index_folder):
os.mkdir(self.temp_index_folder)
if not os.path.isdir(self.segm_index_folder):
os.mkdir(self.segm_index_folder)
def __del__(self):
self.dispose()
def dispatch(self):
if type(self.terms) != collections.OrderedDict:
self.sort()
filename = os.path.join(self.temp_index_folder, 'block-{}'.format(len(os.listdir(self.temp_index_folder))))
with open(filename, 'w') as fout:
fout.write(self.__str__())
self.terms = {}
gc.collect()
def dispose(self):
if not self.__disposed:
shutil.rmtree(self.index_folder)
self.__disposed = True
def index(self, corpus_reader : CorpusReader):
for pmid, document in corpus_reader.items():
self.update(pmid, document)
self.documents.add(pmid)
if self.process.memory_percent() >= self.max_memory_usage:
self.dispatch()
def merge(self, calculate_tfidf : bool = False):
self.segments.clear()
# select which function to use
write_func = self.__write_forced_ranked if calculate_tfidf else self._write_function
# sort in-memory terms if necessary
if type(self.terms) != collections.OrderedDict:
self.sort()
# get in disk blocks
filenames = [ filename for filename in [ os.path.join(self.temp_index_folder, filename) for filename in os.listdir(self.temp_index_folder) ] if os.path.isfile(filename) ]
files = [ open(filename, 'r') for filename in filenames ]
# output file
output_filename = os.path.join(self.segm_index_folder, 'segment-{}'.format(len(os.listdir(self.segm_index_folder))))
output_file = open(output_filename, 'w')
# current term for each sorted block
lines = [ self._parse_function(line) for line in [ file.readline() for file in files ] if line and len(line.strip()) > 0 ]
if len(self.terms) > 0:
lines.append(self.terms.popitem(0))
# temporary list to store terms before writing them to disk
output = list()
# gets first term (in order)
cline = self.__get_line(lines, files, self._parse_function)
# while terms to process are available
while len(lines) > 0:
# gets next term (in order)
line = self.__get_line(lines, files, self._parse_function)
# checks if current term and next term are mergable
if line[0] == cline[0]:
# merges them
cline = (cline[0], self._merge_function(cline[1], line[1]))
# else
else:
# stores stringified version of term (and associated data)
output.append(write_func(cline))
# if too much memory in use then write to file stored terms
if self.process.memory_percent() >= self.max_memory_usage:
self.__flush(output, output_file, output_filename)
output_filename = os.path.join(self.segm_index_folder, 'segment-{}'.format(len(os.listdir(self.segm_index_folder))))
output_file = open(output_filename, 'w')
gc.collect()
# update current term
cline = line
# stores stringified version of last term
output.append(write_func(cline))
self.__flush(output, output_file, output_filename)
# deletes temporary blocks in disk
shutil.rmtree(self.temp_index_folder)
# sets if the data is ranked or not
if calculate_tfidf:
self._write_function = self._write_ranked
self._parse_function = self._parse_ranked
# self._merge_function = self._merge_ranked
def sort(self):
self.terms = collections.OrderedDict(sorted(self.terms.items()))
def update(self, pmid, document):
terms = self.terms
for i, term in enumerate(self.tokenizer.tokenize(document)):
documents = terms.setdefault(term, {})
documents[pmid] = documents.get(pmid, 0) + 1
def _merge_unranked(self, docs0 : dict, docs1 : dict):
return { key : docs0.get(key, 0) + docs1.get(key, 0) for key in docs0.keys() | docs1.keys() }
def _parse_ranked(self, line : str):
term, value = line.split(':', 1)
idf, *docs = value.split(';')
return (term, (float(idf), { pmid : (float(weight), round(10 ** (float(weight) / float(idf) - 1))) for pmid, weight in [ doc.split(':') for doc in docs ] }))
def _parse_unranked(self, line : str):
term, *docs = line.split(';')
return (term, { key : int(value) for key, value in [ doc.split(':') for doc in docs ] })
def _rank(self, line : tuple):
term, documents = line
idf = math.log10(len(self.documents) / len(documents))
for pmid, count in documents.items():
documents[pmid] = ((1 + math.log10(count)) * idf, count)
return (term, (idf, documents))
def _write_ranked(self, line : tuple):
return '{}:{};{}'.format(line[0], line[1][0], ';'.join([ '{}:{}'.format(pmid, weight) for pmid, (weight, count) in line[1][1].items() ]))
def _write_unranked(self, line : tuple):
return '{};{}'.format(line[0], ';'.join([ '{}:{}'.format(pmid, count) for pmid, count in line[1].items() ]))
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n'.join([ self._write_function(item) for item in self.terms.items() ])
def __flush(self, output : list, output_file, output_filename : str):
self.segments.append((output[0][0], output[-1][0], output_filename))
output_file.write('\n'.join(output))
output_file.close()
output.clear()
def __get_line(self, lines, files, parse_func):
i, line = min(enumerate(lines), key = lambda line: line[1][0])
if i >= len(files):
if len(self.terms) > 0:
lines[i] = self.terms.popitem(0)
else:
lines.pop(i)
else:
new_line = files[i].readline()
if not new_line or len(new_line.strip()) == 0:
lines.pop(i)
files[i].close()
files.pop(i)
else:
lines[i] = parse_func(new_line)
return line
def __get_segment(self, key):
segment = [ filename for start, end, filename in self.segments if key >= start and key <= end ]
return segment[0] if len(segment) > 0 else None
def __load_segment(self, segment):
with open(segment, 'r') as fin:
self.terms = collections.OrderedDict([ self._parse_function(line) for line in fin ])
return self.terms
def __write_forced_ranked(self, line : tuple):
return self._write_ranked(self._rank(line))
# useful functions to interact with the indexer
def __contains__(self, key):
if key in self.terms:
return True
segment = self.__get_segment(key)
if segment is None:
return False
return key in self.__load_segment(segment)
def __getitem__(self, key):
value = self.terms.get(key, None)
if value is not None:
return value
segment = self.__get_segment(key)
if segment is None:
raise KeyError(key)
return self.__load_segment(segment)[key]
def __iter__(self):
for start, end, filename in self.segments:
for value in iter(self.__load_segment(filename)):
yield value
def __len__(self):
length = 0
for start, end, filename in self.segments:
length += len(self.__load_segment(filename))
return length
def get(self, key, default = None):
value = self.terms.get(key, None)
if value is not None:
return value
segment = self.__get_segment(key)
if segment is None:
return default
return self.__load_segment(segment).get(key, default)
def items(self):
for start, end, filename in self.segments:
for item in self.__load_segment(filename).items():
yield item
def keys(self):
for start, end, filename in self.segments:
for key in self.__load_segment(filename).keys():
yield key
def save(self, output):
if type(output) == str:
with open(output, 'w') as fin:
self.save(fin)
else:
for start, end, filename in self.segments:
with open(filename, 'r') as fin:
output.write(fin.read())
def values(self):
for start, end, filename in self.segments:
for value in self.__load_segment(filename).values():
yield value
class __InnerPositionableIndexer(__InnerIndexer):
def update(self, pmid, document):
terms = self.terms
for i, term in enumerate(self.tokenizer.tokenize(document)):
documents = terms.setdefault(term, {})
positions = documents.setdefault(pmid, [])
positions.append(i)
def _merge_unranked(self, docs0 : dict, docs1 : dict):
return { key : sorted(docs0.get(key, []) + docs1.get(key, [])) for key in docs0.keys() | docs1.keys() }
def _parse_ranked(self, line : str):
term, value = line.split(':', 1)
idf, *docs = value.split(';')
return (term, (float(idf), { pmid : (float(weight), [ int(position) for position in positions.split(',') ]) for pmid, weight, positions in [ doc.split(':') for doc in docs ] }))
def _parse_unranked(self, line : str):
term, *docs = line.split(';')
return (term, { pmid : [ int(position) for position in positions.split(',') ] for pmid, positions in [ doc.split(':') for doc in docs ] })
def _rank(self, line : tuple):
term, documents = line
idf = math.log10(len(self.documents) / len(documents))
for pmid, positions in documents.items():
documents[pmid] = ((1 + math.log10(len(positions))) * idf, positions)
return (term, (idf, documents))
def _write_ranked(self, line : tuple):
return '{}:{};{}'.format(line[0], line[1][0], ';'.join([ '{}:{}:{}'.format(pmid, weight, ','.join([ str(position) for position in positions ])) for pmid, (weight, positions) in line[1][1].items() ]))
def _write_unranked(self, line : tuple): # line = (termo, {doc_id : [index1, index2]})
return '{};{}'.format(line[0], ';'.join([ '{}:{}'.format(pmid, ','.join([ str(position) for position in positions ])) for pmid, positions in line[1].items() ]))
def __init__(self, tokenizer : Tokenizer, index_folder : str, store_positions : bool = False, max_memory_usage = 20):
self.__indexer = self.__InnerPositionableIndexer(tokenizer, index_folder, max_memory_usage = max_memory_usage) if store_positions else self.__InnerIndexer(tokenizer, index_folder, max_memory_usage = max_memory_usage)
def __getattr__(self, key):
return getattr(self.__indexer, key)
# explicit declaration of special methods required (not "intercepted" by __getattr__)
def __contains__(self, key):
return self.__indexer.__contains__(key)
def __getitem__(self, key):
return self.__indexer.__getitem__(key)
def __iter__(self):
return self.__indexer.__iter__()
def __len__(self):
return self.__indexer.__len__()
| 43.289389
| 224
| 0.564287
|
eb531edb8adf408d15b621b9b85e3cc969089b47
| 8,794
|
py
|
Python
|
simscale_sdk/models/darcy_forchheimer_medium.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | 8
|
2021-01-22T13:41:03.000Z
|
2022-01-03T09:00:10.000Z
|
simscale_sdk/models/darcy_forchheimer_medium.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | null | null | null |
simscale_sdk/models/darcy_forchheimer_medium.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | 3
|
2021-03-18T15:52:52.000Z
|
2022-01-03T08:59:30.000Z
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class DarcyForchheimerMedium(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'name': 'str',
'coefficient_d': 'DimensionalVectorReciprocalPermeability',
'coefficient_f': 'DimensionalVectorAbsorptivity',
'orientation': 'OneOfDarcyForchheimerMediumOrientation',
'topological_reference': 'TopologicalReference',
'geometry_primitive_uuids': 'list[str]'
}
attribute_map = {
'type': 'type',
'name': 'name',
'coefficient_d': 'coefficientD',
'coefficient_f': 'coefficientF',
'orientation': 'orientation',
'topological_reference': 'topologicalReference',
'geometry_primitive_uuids': 'geometryPrimitiveUuids'
}
def __init__(self, type='DARCY_FORCHHEIMER', name=None, coefficient_d=None, coefficient_f=None, orientation=None, topological_reference=None, geometry_primitive_uuids=None, local_vars_configuration=None): # noqa: E501
"""DarcyForchheimerMedium - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._name = None
self._coefficient_d = None
self._coefficient_f = None
self._orientation = None
self._topological_reference = None
self._geometry_primitive_uuids = None
self.discriminator = None
self.type = type
if name is not None:
self.name = name
if coefficient_d is not None:
self.coefficient_d = coefficient_d
if coefficient_f is not None:
self.coefficient_f = coefficient_f
if orientation is not None:
self.orientation = orientation
if topological_reference is not None:
self.topological_reference = topological_reference
if geometry_primitive_uuids is not None:
self.geometry_primitive_uuids = geometry_primitive_uuids
@property
def type(self):
"""Gets the type of this DarcyForchheimerMedium. # noqa: E501
Schema name: DarcyForchheimerMedium # noqa: E501
:return: The type of this DarcyForchheimerMedium. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this DarcyForchheimerMedium.
Schema name: DarcyForchheimerMedium # noqa: E501
:param type: The type of this DarcyForchheimerMedium. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def name(self):
"""Gets the name of this DarcyForchheimerMedium. # noqa: E501
:return: The name of this DarcyForchheimerMedium. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DarcyForchheimerMedium.
:param name: The name of this DarcyForchheimerMedium. # noqa: E501
:type: str
"""
self._name = name
@property
def coefficient_d(self):
"""Gets the coefficient_d of this DarcyForchheimerMedium. # noqa: E501
:return: The coefficient_d of this DarcyForchheimerMedium. # noqa: E501
:rtype: DimensionalVectorReciprocalPermeability
"""
return self._coefficient_d
@coefficient_d.setter
def coefficient_d(self, coefficient_d):
"""Sets the coefficient_d of this DarcyForchheimerMedium.
:param coefficient_d: The coefficient_d of this DarcyForchheimerMedium. # noqa: E501
:type: DimensionalVectorReciprocalPermeability
"""
self._coefficient_d = coefficient_d
@property
def coefficient_f(self):
"""Gets the coefficient_f of this DarcyForchheimerMedium. # noqa: E501
:return: The coefficient_f of this DarcyForchheimerMedium. # noqa: E501
:rtype: DimensionalVectorAbsorptivity
"""
return self._coefficient_f
@coefficient_f.setter
def coefficient_f(self, coefficient_f):
"""Sets the coefficient_f of this DarcyForchheimerMedium.
:param coefficient_f: The coefficient_f of this DarcyForchheimerMedium. # noqa: E501
:type: DimensionalVectorAbsorptivity
"""
self._coefficient_f = coefficient_f
@property
def orientation(self):
"""Gets the orientation of this DarcyForchheimerMedium. # noqa: E501
:return: The orientation of this DarcyForchheimerMedium. # noqa: E501
:rtype: OneOfDarcyForchheimerMediumOrientation
"""
return self._orientation
@orientation.setter
def orientation(self, orientation):
"""Sets the orientation of this DarcyForchheimerMedium.
:param orientation: The orientation of this DarcyForchheimerMedium. # noqa: E501
:type: OneOfDarcyForchheimerMediumOrientation
"""
self._orientation = orientation
@property
def topological_reference(self):
"""Gets the topological_reference of this DarcyForchheimerMedium. # noqa: E501
:return: The topological_reference of this DarcyForchheimerMedium. # noqa: E501
:rtype: TopologicalReference
"""
return self._topological_reference
@topological_reference.setter
def topological_reference(self, topological_reference):
"""Sets the topological_reference of this DarcyForchheimerMedium.
:param topological_reference: The topological_reference of this DarcyForchheimerMedium. # noqa: E501
:type: TopologicalReference
"""
self._topological_reference = topological_reference
@property
def geometry_primitive_uuids(self):
"""Gets the geometry_primitive_uuids of this DarcyForchheimerMedium. # noqa: E501
:return: The geometry_primitive_uuids of this DarcyForchheimerMedium. # noqa: E501
:rtype: list[str]
"""
return self._geometry_primitive_uuids
@geometry_primitive_uuids.setter
def geometry_primitive_uuids(self, geometry_primitive_uuids):
"""Sets the geometry_primitive_uuids of this DarcyForchheimerMedium.
:param geometry_primitive_uuids: The geometry_primitive_uuids of this DarcyForchheimerMedium. # noqa: E501
:type: list[str]
"""
self._geometry_primitive_uuids = geometry_primitive_uuids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DarcyForchheimerMedium):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DarcyForchheimerMedium):
return True
return self.to_dict() != other.to_dict()
| 31.633094
| 222
| 0.642029
|
40fd1066f5d91209e56e234327a83593a61ea0c6
| 4,683
|
py
|
Python
|
airquality/hyperband/defs_regression/keras_mlp.py
|
javiermas/BCNAirQualityDatathon
|
88e0d487046a3d4b76f7757c7def2350d86766ab
|
[
"MIT"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
airquality/hyperband/defs_regression/keras_mlp.py
|
javiermas/BCNAirQualityDatathon
|
88e0d487046a3d4b76f7757c7def2350d86766ab
|
[
"MIT"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
airquality/hyperband/defs_regression/keras_mlp.py
|
javiermas/BCNAirQualityDatathon
|
88e0d487046a3d4b76f7757c7def2350d86766ab
|
[
"MIT"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
"function (and parameter space) definitions for hyperband"
"regression with Keras (multilayer perceptron)"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data_for_regression import data
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization as BatchNorm
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import *
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler, MaxAbsScaler
#
# TODO: advanced activations - 'leakyrelu', 'prelu', 'elu', 'thresholdedrelu', 'srelu'
max_layers = 5
max_layer_size = 100
space = {
'scaler': hp.choice( 's',
( None, 'StandardScaler', 'RobustScaler', 'MinMaxScaler', 'MaxAbsScaler' )),
'n_layers': hp.quniform( 'ls', 1, max_layers, 1 ),
#'layer_size': hp.quniform( 'ls', 5, 100, 1 ),
#'activation': hp.choice( 'a', ( 'relu', 'sigmoid', 'tanh' )),
'init': hp.choice( 'i', ( 'uniform', 'normal', 'glorot_uniform',
'glorot_normal', 'he_uniform', 'he_normal' )),
'batch_size': hp.choice( 'bs', ( 16, 32, 64, 128, 256 )),
'shuffle': hp.choice( 'sh', ( False, True )),
'loss': hp.choice( 'l', ( 'mean_absolute_error', 'mean_squared_error' )),
'optimizer': hp.choice( 'o', ( 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax' ))
}
# for each hidden layer, we choose size, activation and extras individually
for i in range( 1, max_layers + 1 ):
space[ 'layer_{}_size'.format( i )] = hp.quniform( 'ls{}'.format( i ),
2, max_layer_size, 1 )
space[ 'layer_{}_activation'.format( i )] = hp.choice( 'a{}'.format( i ),
( 'relu', 'sigmoid', 'tanh' ))
space[ 'layer_{}_extras'.format( i )] = hp.choice( 'e{}'.format( i ), (
{ 'name': 'dropout', 'rate': hp.uniform( 'd{}'.format( i ), 0.1, 0.5 )},
{ 'name': 'batchnorm' },
{ 'name': None } ))
def get_params():
params = sample( space )
return handle_integers( params )
#
# print hidden layers config in readable way
def print_layers( params ):
for i in range( 1, params['n_layers'] + 1 ):
print "layer {} | size: {:>3} | activation: {:<7} | extras: {}".format( i,
params['layer_{}_size'.format( i )],
params['layer_{}_activation'.format( i )],
params['layer_{}_extras'.format( i )]['name'] ),
if params['layer_{}_extras'.format( i )]['name'] == 'dropout':
print "- rate: {:.1%}".format( params['layer_{}_extras'.format( i )]['rate'] ),
print
def print_params( params ):
pprint({ k: v for k, v in params.items() if not k.startswith( 'layer_' )})
print_layers( params )
print
def try_params( n_iterations, params ):
print "iterations:", n_iterations
print_params( params )
y_train = data['y_train']
y_test = data['y_test']
if params['scaler']:
scaler = eval( "{}()".format( params['scaler'] ))
x_train_ = scaler.fit_transform( data['x_train'].astype( float ))
x_test_ = scaler.transform( data['x_test'].astype( float ))
else:
x_train_ = data['x_train']
x_test_ = data['x_test']
input_dim = x_train_.shape[1]
model = Sequential()
model.add( Dense( params['layer_1_size'], init = params['init'],
activation = params['layer_1_activation'], input_dim = input_dim ))
for i in range( int( params['n_layers'] ) - 1 ):
extras = 'layer_{}_extras'.format( i + 1 )
if params[extras]['name'] == 'dropout':
model.add( Dropout( params[extras]['rate'] ))
elif params[extras]['name'] == 'batchnorm':
model.add( BatchNorm())
model.add( Dense( params['layer_{}_size'.format( i + 2 )], init = params['init'],
activation = params['layer_{}_activation'.format( i + 2 )]))
model.add( Dense( 1, init = params['init'], activation = 'linear' ))
model.compile( optimizer = params['optimizer'], loss = params['loss'] )
#print model.summary()
#
validation_data = ( x_test_, y_test )
early_stopping = EarlyStopping( monitor = 'val_loss', patience = 5, verbose = 0 )
history = model.fit( x_train_, y_train,
nb_epoch = int( round( n_iterations )),
batch_size = params['batch_size'],
shuffle = params['shuffle'],
validation_data = validation_data,
callbacks = [ early_stopping ])
#
p = model.predict( x_train_, batch_size = params['batch_size'] )
mse = MSE( y_train, p )
rmse = sqrt( mse )
mae = MAE( y_train, p )
print "\n# training | RMSE: {:.4f}, MAE: {:.4f}".format( rmse, mae )
#
p = model.predict( x_test_, batch_size = params['batch_size'] )
mse = MSE( y_test, p )
rmse = sqrt( mse )
mae = MAE( y_test, p )
print "# testing | RMSE: {:.4f}, MAE: {:.4f}".format( rmse, mae )
return { 'loss': rmse, 'rmse': rmse, 'mae': mae, 'early_stop': model.stop_training }
| 30.809211
| 90
| 0.645953
|
6d052aadd71d8dede700a9e0b91b25988b34f579
| 3,251
|
py
|
Python
|
scripts/old_gui.py
|
cognigraphtravis/cognigraph
|
cfed2a32a4b22b15687b13b40a52e54fdbed703a
|
[
"MIT"
] | null | null | null |
scripts/old_gui.py
|
cognigraphtravis/cognigraph
|
cfed2a32a4b22b15687b13b40a52e54fdbed703a
|
[
"MIT"
] | null | null | null |
scripts/old_gui.py
|
cognigraphtravis/cognigraph
|
cfed2a32a4b22b15687b13b40a52e54fdbed703a
|
[
"MIT"
] | null | null | null |
import sys
from pyqtgraph import QtCore, QtGui
from cognigraph.pipeline import Pipeline
from cognigraph.nodes import sources, processors, outputs
from cognigraph import TIME_AXIS
from cognigraph.gui.window import GUIWindow
app = QtGui.QApplication(sys.argv)
pipeline = Pipeline()
# file_path = r"/home/dmalt/Code/python/real_eyes/Koleno.eeg"
file_path = r"/home/dmalt/Data/cognigraph/data/Koleno.eeg"
# vhdr_file_path = r"/home/dmalt/Code/python/real_eyes/Koleno.vhdr"
# vhdr_file_path = r"/home/dmalt/Data/cognigraph/data/Koleno.vhdr"
# vhdr_file_path = r"/home/dmalt/Data/cognigraph/data/Koleno.fif"
fwd_path = r'/home/dmalt/mne_data/MNE-sample-data/MEG/sample/dmalt_custom-fwd.fif'
source = sources.FileSource(file_path=file_path)
pipeline.source = source
# pipeline.source = sources.LSLStreamSource(stream_name='cognigraph-mock-stream')
# Processors
preprocessing = processors.Preprocessing(collect_for_x_seconds=120)
pipeline.add_processor(preprocessing)
linear_filter = processors.LinearFilter(lower_cutoff=8.0, upper_cutoff=12.0)
pipeline.add_processor(linear_filter)
inverse_model = processors.InverseModel(method='MNE', forward_model_path=fwd_path, snr=1.0)
pipeline.add_processor(inverse_model)
# beamformer = processors.Beamformer(forward_model_path=fwd_path, output_type='activation')
# pipeline.add_processor(beamformer)
envelope_extractor = processors.EnvelopeExtractor()
pipeline.add_processor(envelope_extractor)
# Outputs
global_mode = outputs.ThreeDeeBrain.LIMITS_MODES.GLOBAL
three_dee_brain = outputs.ThreeDeeBrain(limits_mode=global_mode, buffer_length=6)
pipeline.add_output(three_dee_brain)
pipeline.add_output(outputs.LSLStreamOutput())
# pipeline.initialize_all_nodes()
signal_viewer = outputs.SignalViewer()
pipeline.add_output(signal_viewer, input_node=linear_filter)
window = GUIWindow(pipeline=pipeline)
window.init_ui()
window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
window.show()
base_controls = window._controls._base_controls
source_controls = base_controls.source_controls
processors_controls = base_controls.processors_controls
outputs_controls = base_controls.outputs_controls
source_controls.source_type_combo.setValue(source_controls.SOURCE_TYPE_PLACEHOLDER)
linear_filter_controls = processors_controls.children()[0]
envelope_controls = processors_controls.children()[2]
# envelope_controls.disabled.setValue(True)
three_dee_brain_controls = outputs_controls.children()[0]
three_dee_brain_controls.limits_mode_combo.setValue('Global')
three_dee_brain_controls.limits_mode_combo.setValue('Local')
window.initialize()
def run():
pipeline.update_all_nodes()
# print(pipeline.source._samples_already_read / 500)
timer = QtCore.QTimer()
timer.timeout.connect(run)
frequency = pipeline.frequency
timer.setInterval(1000. / frequency * 10)
source.loop_the_file = False
source.MAX_SAMPLES_IN_CHUNK = 5
# envelope.disabled = True
if __name__ == '__main__':
import sys
timer.start()
timer.stop()
# TODO: this runs when in iPython. It should not.
# Start Qt event loop unless running in interactive mode or using pyside.
# if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
# sys.exit(QtGui.QApplication.instance().exec_())
| 31.563107
| 91
| 0.809289
|
16226e18c3da377f4a682ac331457acddf34bd6a
| 3,156
|
py
|
Python
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/copy_errors.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/copy_errors.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/isi_sdk_8_2_1/models/copy_errors.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_1.models.copy_errors_copy_errors import CopyErrorsCopyErrors # noqa: F401,E501
class CopyErrors(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'copy_errors': 'list[CopyErrorsCopyErrors]'
}
attribute_map = {
'copy_errors': 'copy_errors'
}
def __init__(self, copy_errors=None): # noqa: E501
"""CopyErrors - a model defined in Swagger""" # noqa: E501
self._copy_errors = None
self.discriminator = None
if copy_errors is not None:
self.copy_errors = copy_errors
@property
def copy_errors(self):
"""Gets the copy_errors of this CopyErrors. # noqa: E501
:return: The copy_errors of this CopyErrors. # noqa: E501
:rtype: list[CopyErrorsCopyErrors]
"""
return self._copy_errors
@copy_errors.setter
def copy_errors(self, copy_errors):
"""Sets the copy_errors of this CopyErrors.
:param copy_errors: The copy_errors of this CopyErrors. # noqa: E501
:type: list[CopyErrorsCopyErrors]
"""
self._copy_errors = copy_errors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CopyErrors):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.443478
| 96
| 0.57858
|
bc549e81f966823a47b077a9c3318a0992119fea
| 196
|
py
|
Python
|
runnertracker/application/root/views.py
|
victormartinez/runnertracker
|
2a390eff7866d5b96ad2a3f34391dd855cf003a6
|
[
"MIT"
] | null | null | null |
runnertracker/application/root/views.py
|
victormartinez/runnertracker
|
2a390eff7866d5b96ad2a3f34391dd855cf003a6
|
[
"MIT"
] | null | null | null |
runnertracker/application/root/views.py
|
victormartinez/runnertracker
|
2a390eff7866d5b96ad2a3f34391dd855cf003a6
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from aiohttp import web
class RootView(web.View):
async def get(self) -> web.Response:
return web.json_response({"healthy": True}, status=HTTPStatus.OK)
| 21.777778
| 73
| 0.719388
|
43ff06639f6a5d4d503984c0f67f51065ba45f54
| 1,437
|
py
|
Python
|
cookielaw/test_project/wsgi.py
|
selectnull/django-cookie-law
|
5da5c54415e99d0d489eea2aa7e471c5e6bd08e6
|
[
"BSD-2-Clause"
] | 182
|
2015-04-21T13:13:12.000Z
|
2022-03-12T14:20:54.000Z
|
cookielaw/test_project/wsgi.py
|
selectnull/django-cookie-law
|
5da5c54415e99d0d489eea2aa7e471c5e6bd08e6
|
[
"BSD-2-Clause"
] | 74
|
2015-01-26T11:15:08.000Z
|
2021-12-17T05:29:07.000Z
|
cookielaw/test_project/wsgi.py
|
selectnull/django-cookie-law
|
5da5c54415e99d0d489eea2aa7e471c5e6bd08e6
|
[
"BSD-2-Clause"
] | 81
|
2015-01-22T09:55:59.000Z
|
2022-03-23T04:53:05.000Z
|
"""
WSGI config for test_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "test_project.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 43.545455
| 79
| 0.80515
|
c519986c5e35f71d300155869de556b3fcf70a6d
| 60,062
|
py
|
Python
|
src/sentry/south_migrations/0212_auto__add_fileblob__add_field_file_blob.py
|
apragacz/sf-sentry
|
2fdd6c1195c29a1d401d1cd538c22ea68556699a
|
[
"BSD-3-Clause"
] | 1
|
2018-03-05T15:40:12.000Z
|
2018-03-05T15:40:12.000Z
|
src/sentry/south_migrations/0212_auto__add_fileblob__add_field_file_blob.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | 5
|
2020-07-17T11:20:41.000Z
|
2021-05-09T12:16:53.000Z
|
src/sentry/south_migrations/0212_auto__add_fileblob__add_field_file_blob.py
|
zaasmi/codeerrorhelp
|
1ab8d3e314386b9b2d58dad9df45355bf6014ac9
|
[
"BSD-3-Clause"
] | 2
|
2021-01-26T09:53:39.000Z
|
2022-03-22T09:01:47.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FileBlob'
db.create_table(
'sentry_fileblob', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), ('storage', self.gf('django.db.models.fields.CharField')(max_length=128)),
('storage_options', self.gf('jsonfield.fields.JSONField')(default={})),
('path', self.gf('django.db.models.fields.TextField')(null=True)), (
'size', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(
null=True
)
), (
'checksum',
self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)
), (
'timestamp', self.gf('django.db.models.fields.DateTimeField')(
default=datetime.datetime.now, db_index=True
)
),
)
)
db.send_create_signal('sentry', ['FileBlob'])
# Adding field 'File.blob'
db.add_column(
'sentry_file',
'blob',
self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.FileBlob'], null=True
),
keep_default=False
)
def backwards(self, orm):
# Deleting model 'FileBlob'
db.delete_table('sentry_fileblob')
# Deleting field 'File.blob'
db.delete_column('sentry_file', 'blob_id')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
})
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']",
'null': 'True'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'storage_options': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'storage_options': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'),)"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
| 36.269324
| 99
| 0.395375
|
d8e923eae84d83960ad58d4f24c21437d5ac7460
| 6,901
|
py
|
Python
|
index.py
|
Senarc-Studios/Semethon-API
|
e1c12deb1251012409d789a987d4c8df12e1b374
|
[
"BSD-3-Clause"
] | null | null | null |
index.py
|
Senarc-Studios/Semethon-API
|
e1c12deb1251012409d789a987d4c8df12e1b374
|
[
"BSD-3-Clause"
] | null | null | null |
index.py
|
Senarc-Studios/Semethon-API
|
e1c12deb1251012409d789a987d4c8df12e1b374
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import json
import string
import random
import asyncio
from typing import Optional
from cool_utils import get_data
from pydantic import BaseModel
from fastapi import FastAPI, WebSocket
from fastapi.reponses import JSONResponse
from local_cubacrypt import decypher, cypher
from dotenv import find_dotenv, load_dotenv
from pymongo import MongoClient
load_dotenv(find_dotenv())
mongoclient = MongoClient(get_data("config", "MONGO"))
mongoclient.drop_database('database')
mongodb = mongoclient['database']
users = mongodb['users']
session = mongodb['sessions']
temp = mongodb['temp']
web = FastAPI()
def validate_user(
username,
password,
token = None
):
query = {
"username": username,
"password": password
}
if token != None:
query['token'] = token
if users.count_documents(query) == 1:
return True
return False
async def auto_purge_message(message_id):
await asyncio.sleep(5)
temp.delete_one({ "_id": message_id })
def generate_token(size=8, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def generate_message_id(size=8, chars=string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def connected_users(token):
for sessions in session.find({ "token": token }):
return sessions["connected_users"]
def add_user_to_session(token, username):
query = {
"token": token
}
payload = {
"$addToSet": {
"connected_users": username
}
}
session.update_one(query, payload)
def remove_user_from_session(token, username):
query = {
"token": token
}
payload = {
"$pull": {
"connected_users": username
}
}
session.update_one(query, payload)
def validate_username(username):
if len(username) >= 20 or len(username) <= 3:
return False
else:
return True
def _create_session(username):
if not validate_username(username):
return json.dumps({ "complete": False, "reason": "Bad name", "code": "B01" }), 400
token = generate_token()
token = f"{token}"
payload = {
"token": token,
"username": username,
"connected_users": [username]
}
session.insert_one(payload)
return json.dumps({ "token": token }), 200, {'content-type': 'application/json'}
def _join_session(username, token):
if not validate_username(username):
return json.dumps({ "complete": False, "reason": "Bad name", "code": "B01" }), 400
add_user_to_session(token, username)
return json.dumps({ "complete": True }), 200, {'content-type': 'application/json'}
def _validate_session(token):
if session.count_documents({ "token": token }) == 0:
return json.dumps({ "found": False }), 400, {'content-type': 'application/json'}
else:
return json.dumps({ "found": True }), 200, {'content-type': 'application/json'}
def is_sent_before(username, password, token, message_id):
if not validate_user(username, password, token):
return json.dumps({ "complete": False, "reason": "Invalid Account Details." }), 401, {'content-type': 'application/json'}
for data in temp.find({ "_id": message_id, "session": token }):
if data["users"][username] == False:
return False
else:
return True
def process_message(data: dict):
token = data['token']
username = data['username']
esm = data['esm']
query = {
"token": token
}
if session.count_documents(query) == 0:
return json.dumps({ "complete": False, "reason": "Invalid session token.", "code": "I01" }), 404, {'content-type': 'application/json'}
if username in connected_users(token):
message_id = generate_message_id()
message_id = f"{message_id}"
payload = {
"_id": message_id,
"session": token,
"author": username,
"esm": esm,
"users": {}
}
for user in connected_users(token):
template = {
"_id": message_id,
"users": {
f"{user}": False
}
}
payload.update(template)
temp.insert_one(payload)
return json.dumps({ "complete": True }), 200, {'content-type': 'application/json'}
else:
return json.dumps({ "complete": False, "reason": "User not in session.", "code": "I02" }), 400, {'content-type': 'application/json'}
def send_new_messages(username, password, token):
if not validate_user(username, password, token):
return json.dumps({'complete': False, 'reason': "Incorrect Password"}), 401, {'content-type': 'application/json'}
if username in connected_users(token):
for message in temp.find({}):
if temp.count_documents({ "session": token }) == 0:
return "No new messages", 404
if message["session"] == token and is_sent_before(username, password, token, message["_id"]):
query = {
"_id": message["_id"],
"session": token,
"esm": message["esm"]
}
update_payload = {
"$set": {
"_id": message["_id"],
"users": {
f"{username}": True
}
}
}
temp.update_one(query, update_payload)
payload = {
"author": message["author"],
"esm": message["esm"]
}
else:
return "No new messages", 404
return json.dumps(payload), 200, {'content-type': 'application/json'}
def _delete_session(token, username, password):
if not validate_user(username, password, token):
return json.dumps({'complete': False}), 401, {'content-type': 'application/json'}
else:
session_payload = {
"token": token,
"username": username
}
user_payload = {
"username": username,
"password": password
}
session.delete_one(session_payload)
users.delete_one(user_payload)
return json.dumps({'complete': True}), 200, {'content-type': 'application/json'}
class CreateSession(BaseModel):
username: str
password: str
class Session(BaseModel):
username: str
password: str
token: Optional[str]
class Message(BaseModel):
token: str
password: str
esm: str
username: str
class User(BaseModel):
token: str
password: str
username: str
class EncryptedMessage(BaseModel):
esm: str
@web.create("/create-session")
async def create_session(data: CreateSession):
return _create_session(username=data["username"], password=data['password'])
@web.post("/join-session")
async def join_session(data: Session):
return _join_session(data["username"], data['password'], data["token"])
@web.websocket("/message-sync")
async def message_sync(websocket: WebSocket):
await websocket.accept()
while True:
data = await websocket.receive_json()
process_message(data)
return send_new_messages(data['username'], data['password'], data['token'])
@web.post("/decrypt")
async def decypher_esm(data: EncryptedMessage):
return json.dumps({
"message": decypher(data["esm"])
}), 200, {'content-type': 'application/json'}
@web.post("/validate-session")
async def validate_session(data: Session):
return _validate_session(data["token"])
@web.delete("/delete-session")
async def delete_session(data: Session):
return _delete_session(data['token', data['username']], data['password'])
web.run(host="127.0.0.1", port=8080, debug=True)
| 25.654275
| 136
| 0.691494
|
a94189534ce080a8cf3ffff26370b7f4f53abe8a
| 6,101
|
py
|
Python
|
code/examples/Q_Learning_examples/matchers.py
|
ekarais/RLFM
|
479679e39b4fdd230b5a67c2005dd2fb001e7169
|
[
"MIT"
] | 7
|
2020-06-17T14:09:57.000Z
|
2022-03-15T05:20:42.000Z
|
rl_markets_code/matchers.py
|
asikist-ethz/market_rl
|
8a8f45d30a9cb2400107d92f4a13091addc3d6f8
|
[
"MIT"
] | null | null | null |
rl_markets_code/matchers.py
|
asikist-ethz/market_rl
|
8a8f45d30a9cb2400107d92f4a13091addc3d6f8
|
[
"MIT"
] | 2
|
2019-11-12T20:51:24.000Z
|
2020-06-17T14:28:58.000Z
|
__author__ = "Thomas Asikis"
__credits__ = ["Copyright (c) 2019 Thomas Asikis"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Thomas Asikis"
import random
import pandas as pd
from abc import abstractmethod
class Matcher:
def __init__(self):
"""
Abstract matcher object. This object is used by the Market environment to match agent offers
and also decide the deal price.
"""
pass
@abstractmethod
def match(self,
current_actions: dict,
offers: pd.DataFrame,
env_time: int,
agents: pd.DataFrame,
matched: set,
done: dict,
deal_history: pd.DataFrame):
"""
The matching method, which relies on several data structures passed from the market object.
:param current_actions: A dictionary of agent id and offer value
:param offers: The dataframe containing the past offers from agents
:param env_time: the current time step in the market
:param agents: the dataframe containing the agent information
:param matched: the set containing all the ids of matched agents in this round
:param done: the dictionary with agent id as key and a boolean value to determine if an
agent has terminated the episode
:param deal_history: the dictionary containing all the successful deals till now
:return: the dictionary containing the the agent id as keys and the rewards as values
"""
rewards: dict = None
return rewards
class RandomMatcher(Matcher):
def __init__(self, reward_on_reference=False):
"""
A random matcher, which decides the deal price of a matched pair by sampling a uniform
distribution bounded in [seller_ask, buyer_bid] range.
The reward is calculated as the difference from cost or the difference to budget for
sellers and buyers.
:param reward_on_reference: The parameter to use a different reward calculation.
If set to true the reward now becomes: offer - reservation price for, sellers
and: reservation price - offer, for buyers.
You may chose to use this reward scheme, but you have to justify why it is better than
the old!
"""
super().__init__()
self.reward_on_reference = reward_on_reference
def match(self,
current_actions: dict,
offers: pd.DataFrame,
env_time: int,
agents: pd.DataFrame,
matched: set,
done: dict,
deal_history: pd.DataFrame):
"""
The matching method, which relies on several data structures passed from the market object.
:param current_actions: A dictionary of agent id and offer value
:param offers: The dataframe containing the past offers from agents
:param env_time: the current time step in the market
:param agents: the dataframe containing the agent information
:param matched: the set containing all the ids of matched agents in this round
:param done: the dictionary with agent id as key and a boolean value to determine if an
agent has terminated the episode
:param deal_history: the dictionary containing all the successful deals till now
:return: the dictionary containing the the agent id as keys and the rewards as values
"""
# update offers
for agent_id, offer in current_actions.items():
if agent_id not in matched:
offers.loc[offers['id'] == agent_id, ['offer', 'time']] = (offer, env_time)
# keep buyer and seller offers with non-matched ids sorted:
# descending by offer value for buyers
# ascending by offer value for sellers
# and do a second sorting on ascending time to break ties for both
buyer_offers = offers[(offers['role'] == 'Buyer') &
(~offers['id'].isin(matched))] \
.sort_values(['offer', 'time'], ascending=[False, True])
seller_offers = offers[(offers['role'] == 'Seller') &
(~offers['id'].isin(matched))] \
.sort_values(['offer', 'time'], ascending=[True, True])
min_len = min(seller_offers.shape[0], buyer_offers.shape[0])
rewards = dict((aid, 0) for aid in agents['id'].tolist())
for i in range(min_len):
considered_seller = seller_offers.iloc[i, :]
considered_buyer = buyer_offers.iloc[i, :]
if considered_buyer['offer'] >= considered_seller['offer']:
# if seller price is lower or equal to buyer price
# matching is performed
matched.add(considered_buyer['id'])
matched.add(considered_seller['id'])
# keeping both done and matched is redundant
done[considered_buyer['id']] = True
done[considered_seller['id']] = True
deal_price = random.uniform(considered_seller['offer'], considered_buyer[
'offer'])
if self.reward_on_reference:
rewards[considered_buyer['id']] = considered_buyer['res_price'] -\
considered_buyer['offer']
rewards[considered_seller['id']] = considered_seller['offer'] - \
considered_seller['res_price']
else:
rewards[considered_buyer['id']] = considered_buyer['offer'] - deal_price
rewards[considered_seller['id']] = deal_price - considered_seller['offer']
matching = dict(Seller=considered_seller['id'], Buyer=considered_buyer['id'],
time=env_time, deal_price=deal_price)
deal_history.append(matching)
else:
# not possible that new matches can occur after this failure due to sorting.
break
return rewards
| 46.930769
| 100
| 0.609572
|
0359f9e0b4b36bcd839f7c8628cd7c5186e904e9
| 346
|
py
|
Python
|
bravepatcher/utils/firewall/AbstractFirewallHelper.py
|
maxisoft/BravePatcher
|
b745567297460eb6a7d8567eb3707cc14cc9d66f
|
[
"MIT"
] | 3
|
2021-11-11T23:10:59.000Z
|
2022-02-05T09:14:15.000Z
|
bravepatcher/utils/firewall/AbstractFirewallHelper.py
|
maxisoft/BravePatcher
|
b745567297460eb6a7d8567eb3707cc14cc9d66f
|
[
"MIT"
] | 3
|
2021-01-12T21:31:18.000Z
|
2021-11-11T23:12:02.000Z
|
bravepatcher/utils/firewall/AbstractFirewallHelper.py
|
maxisoft/BravePatcher
|
b745567297460eb6a7d8567eb3707cc14cc9d66f
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from pathlib import Path
class AbstractFirewallHelper(ABC):
@abstractmethod
def allow_program(self, path: Path, name: str):
pass
@abstractmethod
def block_program(self, path: Path, name: str):
pass
@abstractmethod
def has_rule(self, name: str) -> bool:
pass
| 20.352941
| 51
| 0.66763
|
0430a44ae46f08bfab2f0b45c05f08eb1f7cdbd7
| 21,466
|
py
|
Python
|
natsort/natsort.py
|
thethiny/natsort
|
36f0d6f553c02c5937af94541463d30656537615
|
[
"MIT"
] | null | null | null |
natsort/natsort.py
|
thethiny/natsort
|
36f0d6f553c02c5937af94541463d30656537615
|
[
"MIT"
] | null | null | null |
natsort/natsort.py
|
thethiny/natsort
|
36f0d6f553c02c5937af94541463d30656537615
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Along with ns_enum.py, this module contains all of the
natsort public API.
The majority of the "work" is defined in utils.py.
"""
import platform
from functools import partial
from operator import itemgetter
from typing import Callable, Iterable, TypeVar
from _typeshed import SupportsLessThan
import natsort.compat.locale
from natsort import utils
from natsort.ns_enum import NS_DUMB, ns
_T = TypeVar("_T")
def decoder(encoding):
"""
Return a function that can be used to decode bytes to unicode.
Parameters
----------
encoding : str
The codec to use for decoding. This must be a valid unicode codec.
Returns
-------
decode_function
A function that takes a single argument and attempts to decode
it using the supplied codec. Any `UnicodeErrors` are raised.
If the argument was not of `bytes` type, it is simply returned
as-is.
See Also
--------
as_ascii
as_utf8
Examples
--------
>>> f = decoder('utf8')
>>> f(b'bytes') == 'bytes'
True
>>> f(12345) == 12345
True
>>> # On Python 3, without decoder this would return [b'a10', b'a2']
>>> natsorted([b'a10', b'a2'], key=decoder('utf8')) == [b'a2', b'a10']
True
>>> # On Python 3, without decoder this would raise a TypeError.
>>> natsorted([b'a10', 'a2'], key=decoder('utf8')) == ['a2', b'a10']
True
"""
return partial(utils.do_decoding, encoding=encoding)
def as_ascii(s):
"""
Function to decode an input with the ASCII codec, or return as-is.
Parameters
----------
s : object
Returns
-------
output
If the input was of type `bytes`, the return value is a `str` decoded
with the ASCII codec. Otherwise, the return value is identically the
input.
See Also
--------
decoder
"""
return utils.do_decoding(s, "ascii")
def as_utf8(s):
"""
Function to decode an input with the UTF-8 codec, or return as-is.
Parameters
----------
s : object
Returns
-------
output
If the input was of type `bytes`, the return value is a `str` decoded
with the UTF-8 codec. Otherwise, the return value is identically the
input.
See Also
--------
decoder
"""
return utils.do_decoding(s, "utf-8")
def natsort_keygen(key=None, alg=ns.DEFAULT):
"""
Generate a key to sort strings and numbers naturally.
This key is designed for use as the `key` argument to
functions such as the `sorted` builtin.
The user may customize the generated function with the
arguments to `natsort_keygen`, including an optional
`key` function.
Parameters
----------
key : callable, optional
A key used to manipulate the input value before parsing for
numbers. It is **not** applied recursively.
It should accept a single argument and return a single value.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : function
A function that parses input for natural sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
natsorted
natsort_key
Examples
--------
`natsort_keygen` is a convenient way to create a custom key
to sort lists in-place (for example).::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> a.sort(key=natsort_keygen(alg=ns.REAL))
>>> a
['num-3', 'num2', 'num5.10', 'num5.3']
"""
try:
ns.DEFAULT | alg
except TypeError:
msg = "natsort_keygen: 'alg' argument must be from the enum 'ns'"
raise ValueError(msg + ", got {}".format(str(alg)))
# Add the NS_DUMB option if the locale library is broken.
if alg & ns.LOCALEALPHA and natsort.compat.locale.dumb_sort():
alg |= NS_DUMB
# Set some variables that will be passed to the factory functions
if alg & ns.NUMAFTER:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale_max
else:
sep = natsort.compat.locale.null_string_max
pre_sep = natsort.compat.locale.null_string_max
else:
if alg & ns.LOCALEALPHA:
sep = natsort.compat.locale.null_string_locale
else:
sep = natsort.compat.locale.null_string
pre_sep = natsort.compat.locale.null_string
regex = utils.regex_chooser(alg)
# Create the functions that will be used to split strings.
input_transform = utils.input_string_transform_factory(alg)
component_transform = utils.string_component_transform_factory(alg)
final_transform = utils.final_data_transform_factory(alg, sep, pre_sep)
# Create the high-level parsing functions for strings, bytes, and numbers.
string_func = utils.parse_string_factory(
alg, sep, regex.split, input_transform, component_transform, final_transform
)
if alg & ns.PATH:
string_func = utils.parse_path_factory(string_func)
bytes_func = utils.parse_bytes_factory(alg)
num_func = utils.parse_number_or_none_factory(alg, sep, pre_sep)
# Return the natsort key with the parsing path pre-chosen.
return partial(
utils.natsort_key,
key=key,
string_func=string_func,
bytes_func=bytes_func,
num_func=num_func,
)
# Exposed for simplicity if one needs the default natsort key.
natsort_key = natsort_keygen()
natsort_key.__doc__ = """\
natsort_key(val)
The default natural sorting key.
This is the output of :func:`natsort_keygen` with default values.
See Also
--------
natsort_keygen
"""
def natsorted(seq: Iterable[_T], key: Callable[[_T], SupportsLessThan]=None, reverse=False, alg=ns.DEFAULT):
"""
Sorts an iterable naturally.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the iterable.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out: list
The sorted input.
See Also
--------
natsort_keygen : Generates the key that makes natural sorting possible.
realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``.
humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``.
index_natsorted : Returns the sorted indexes from `natsorted`.
os_sorted : Sort according to your operating system's rules.
Examples
--------
Use `natsorted` just like the builtin `sorted`::
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
['num2', 'num3', 'num5']
"""
key = natsort_keygen(key, alg)
return sorted(seq, reverse=reverse, key=key)
def humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Convenience function to properly sort non-numeric characters.
This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_humansorted : Returns the sorted indexes from `humansorted`.
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> natsorted(a)
['Apple', 'Banana', 'apple', 'banana']
>>> humansorted(a)
['apple', 'Apple', 'banana', 'Banana']
"""
return natsorted(seq, key, reverse, alg | ns.LOCALE)
def realsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Convenience function to properly sort signed floats.
A signed float in a string could be "a-5.7". This is a wrapper around
``natsorted(seq, alg=ns.REAL)``.
The behavior of :func:`realsorted` for `natsort` version >= 4.0.0
was the default behavior of :func:`natsorted` for `natsort`
version < 4.0.0.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.REAL`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_realsorted : Returns the sorted indexes from `realsorted`.
Examples
--------
Use `realsorted` just like the builtin `sorted`::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> natsorted(a)
['num2', 'num5.3', 'num5.10', 'num-3']
>>> realsorted(a)
['num-3', 'num2', 'num5.10', 'num5.3']
"""
return natsorted(seq, key, reverse, alg | ns.REAL)
def index_natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Determine the list of the indexes used to sort the input sequence.
Sorts a sequence naturally, but returns a list of sorted the
indexes and not the sorted list itself. This list of indexes
can be used to sort multiple lists by the sorted order of the
given sequence.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.INT`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
natsorted
order_by_index
Examples
--------
Use index_natsorted if you want to sort multiple lists by the
sorted order of one list::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
['num2', 'num3', 'num5']
>>> order_by_index(b, index)
['baz', 'foo', 'bar']
"""
if key is None:
newkey = itemgetter(1)
else:
def newkey(x):
return key(itemgetter(1)(x))
# Pair the index and sequence together, then sort by element
index_seq_pair = [(x, y) for x, y in enumerate(seq)]
index_seq_pair.sort(reverse=reverse, key=natsort_keygen(newkey, alg))
return [x for x, _ in index_seq_pair]
def index_humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
This is a wrapper around ``index_natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq: iterable
The input to sort.
key: callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
humansorted
order_by_index
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `index_humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> index_humansorted(a)
[2, 0, 3, 1]
"""
return index_natsorted(seq, key, reverse, alg | ns.LOCALE)
def index_realsorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
This is a wrapper around ``index_natsorted(seq, alg=ns.REAL)``.
Parameters
----------
seq: iterable
The input to sort.
key: callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.REAL`.
Returns
-------
out : tuple
The ordered indexes of the input.
See Also
--------
realsorted
order_by_index
Examples
--------
Use `index_realsorted` just like the builtin `sorted`::
>>> a = ['num5.10', 'num-3', 'num5.3', 'num2']
>>> index_realsorted(a)
[1, 3, 0, 2]
"""
return index_natsorted(seq, key, reverse, alg | ns.REAL)
# noinspection PyShadowingBuiltins,PyUnresolvedReferences
def order_by_index(seq, index, iter=False):
"""
Order a given sequence by an index sequence.
The output of `index_natsorted` is a
sequence of integers (index) that correspond to how its input
sequence **would** be sorted. The idea is that this index can
be used to reorder multiple sequences by the sorted order of the
first sequence. This function is a convenient wrapper to
apply this ordering to a sequence.
Parameters
----------
seq : sequence
The sequence to order.
index : iterable
The iterable that indicates how to order `seq`.
It should be the same length as `seq` and consist
of integers only.
iter : {{True, False}}, optional
If `True`, the ordered sequence is returned as a
iterator; otherwise it is returned as a
list. The default is `False`.
Returns
-------
out : {{list, iterator}}
The sequence ordered by `index`, as a `list` or as an
iterator (depending on the value of `iter`).
See Also
--------
index_natsorted
index_humansorted
index_realsorted
Examples
--------
`order_by_index` is a convenience function that helps you apply
the result of `index_natsorted`::
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> order_by_index(a, index)
['num2', 'num3', 'num5']
>>> order_by_index(b, index)
['baz', 'foo', 'bar']
"""
return (seq[i] for i in index) if iter else [seq[i] for i in index]
def numeric_regex_chooser(alg):
"""
Select an appropriate regex for the type of number of interest.
Parameters
----------
alg : ns enum
Used to indicate the regular expression to select.
Returns
-------
regex : str
Regular expression string that matches the desired number type.
"""
# Remove the leading and trailing parens
return utils.regex_chooser(alg).pattern[1:-1]
def _split_apply(v, key=None):
if key is not None:
v = key(v)
return utils.path_splitter(str(v))
# Choose the implementation based on the host OS
if platform.system() == "Windows":
from ctypes import wintypes, windll
from functools import cmp_to_key
_windows_sort_cmp = windll.Shlwapi.StrCmpLogicalW
_windows_sort_cmp.argtypes = [wintypes.LPWSTR, wintypes.LPWSTR]
_windows_sort_cmp.restype = wintypes.INT
_winsort_key = cmp_to_key(_windows_sort_cmp)
def os_sort_keygen(key=None):
return lambda x: tuple(map(_winsort_key, _split_apply(x, key)))
else:
# For UNIX-based platforms, ICU performs MUCH better than locale
# at replicating the file explorer's sort order. We will use
# ICU's ability to do basic natural sorting as it also better
# replicates than what natsort does by default.
#
# However, if the user does not have ICU installed then fall back
# on natsort's default handling for paths with locale turned on
# which will give good results in most cases (e.g. when there aren't
# a bunch of special characters).
try:
import icu
except ImportError:
# No ICU installed
def os_sort_keygen(key=None):
return natsort_keygen(key=key, alg=ns.LOCALE | ns.PATH | ns.IGNORECASE)
else:
# ICU installed
def os_sort_keygen(key=None):
loc = natsort.compat.locale.get_icu_locale()
collator = icu.Collator.createInstance(loc)
collator.setAttribute(
icu.UCollAttribute.NUMERIC_COLLATION, icu.UCollAttributeValue.ON
)
return lambda x: tuple(map(collator.getSortKey, _split_apply(x, key)))
os_sort_keygen.__doc__ = """
Generate a sorting key to replicate your file browser's sort order
See :func:`os_sorted` for description and caveats.
Returns
-------
out : function
A function that parses input for OS path sorting that is
suitable for passing as the `key` argument to functions
such as `sorted`.
See Also
--------
os_sort_key
os_sorted
Notes
-----
On Windows, this will implicitly coerce all inputs to str before
collating.
"""
os_sort_key = os_sort_keygen()
os_sort_key.__doc__ = """
os_sort_key(val)
The default key to replicate your file browser's sort order
This is the output of :func:`os_sort_keygen` with default values.
See Also
--------
os_sort_keygen
"""
def os_sorted(seq, key=None, reverse=False):
"""
Sort elements in the same order as your operating system's file browser
.. warning::
The resulting function will generate results that will be
differnt depending on your platform. This is intentional.
On Windows, this will sort with the same order as Windows Explorer.
On MacOS/Linux, you will get different results depending on whether
or not you have :mod:`pyicu` installed.
- If you have :mod:`pyicu` installed, you will get results that are
the same as (or very close to) the same order as your operating
system's file browser.
- If you do not have :mod:`pyicu` installed, then this will give
the same results as if you used ``ns.LOCALE``, ``ns.PATH``,
and ``ns.IGNORECASE`` with :func:`natsorted`. If you do not have
special characters this will give correct results, but once
special characters are added you should lower your expectations.
It is *strongly* reccommended to have :mod:`pyicu` installed on
MacOS/Linux if you want correct sort results.
It does *not* take into account if a path is a directory or a file
when sorting.
Parameters
----------
seq : iterable
The input to sort. Each element must be of type str.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
Returns
-------
out : list
The sorted input.
See Also
--------
natsorted
os_sort_keygen
Notes
-----
This will implicitly coerce all inputs to str before collating.
"""
return sorted(seq, key=os_sort_keygen(key), reverse=reverse)
| 28.133683
| 108
| 0.629321
|
b98b48519865e4c9bb60d049b0bed8b5c3a96f97
| 12,327
|
py
|
Python
|
server/views.py
|
aysiu/Crypt-Server
|
90ab865e392ece250d06b0c035961665deaca89a
|
[
"Apache-2.0"
] | null | null | null |
server/views.py
|
aysiu/Crypt-Server
|
90ab865e392ece250d06b0c035961665deaca89a
|
[
"Apache-2.0"
] | null | null | null |
server/views.py
|
aysiu/Crypt-Server
|
90ab865e392ece250d06b0c035961665deaca89a
|
[
"Apache-2.0"
] | null | null | null |
from models import *
from django.contrib.auth.decorators import login_required, permission_required
from django.template import RequestContext, Template, Context
import json
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.http import HttpResponse, Http404, JsonResponse
from django.contrib.auth.models import Permission, User
from django.conf import settings
from django.template.context_processors import csrf
from django.shortcuts import render, get_object_or_404, redirect
from datetime import datetime, timedelta
from django.db.models import Q
from forms import *
from django.views.defaults import server_error
from django.core.mail import send_mail
from django.conf import settings
from django.core.urlresolvers import reverse
# Create your views here.
##clean up old requests
def cleanup():
how_many_days = 7
the_requests = Request.objects.filter(date_approved__lte=datetime.now()-timedelta(days=how_many_days)).filter(current=True)
for the_req in the_requests:
the_req.current = False
the_req.save()
##index view
@login_required
def index(request):
cleanup()
#show table with all the keys
computers = Computer.objects.all()
if hasattr(settings, 'ALL_APPROVE'):
if settings.ALL_APPROVE == True:
permissions = Permission.objects.all()
permission = Permission.objects.get(codename='can_approve')
if request.user.has_perm('server.can_approve') == False:
request.user.user_permissions.add(permission)
request.user.save()
##get the number of oustanding requests - approved equals null
outstanding = Request.objects.filter(approved__isnull=True)
if hasattr(settings, 'APPROVE_OWN'):
if settings.APPROVE_OWN == False:
outstanding = outstanding.filter(~Q(requesting_user=request.user))
c = {'user': request.user, 'computers':computers, 'outstanding':outstanding, }
return render(request,'server/index.html', c)
##view to see computer info
@login_required
def computer_info(request, computer_id=None, serial=None):
cleanup()
if computer_id:
computer = get_object_or_404(Computer, pk=computer_id)
else:
computer = get_object_or_404(Computer, serial=serial)
can_request = None
approved = None
# Get the secrets, annotated with whethere there are approvals for them
secrets = computer.secret_set.all().prefetch_related()
for secret in secrets:
secret.approved = Request.objects.filter(requesting_user=request.user).filter(approved=True).filter(current=True).filter(secret=secret)
secret.pending = Request.objects.filter(requesting_user=request.user).filter(approved__isnull=True).filter(secret=secret)
c = {'user': request.user, 'computer':computer, 'secrets':secrets }
return render(request,'server/computer_info.html', c)
@login_required
def secret_info(request, secret_id):
cleanup()
secret = get_object_or_404(Secret, pk=secret_id)
computer = secret.computer
##check if the user has outstanding request for this
pending = secret.request_set.filter(requesting_user=request.user).filter(approved__isnull=True)
if pending.count() == 0:
can_request = True
else:
can_request = False
##if it's been approved, we'll show a link to retrieve the key
approved = secret.request_set.filter(requesting_user=request.user).filter(approved=True).filter(current=True)
requests = secret.request_set.all()
c = {'user': request.user, 'computer':computer, 'can_request':can_request, 'approved':approved, 'secret':secret, 'requests':requests}
if approved.count() != 0:
return render(request,'server/secret_approved_button.html', c)
else:
return render(request,'server/secret_request_button.html', c)
##request key view
@login_required
def request(request, secret_id):
##we will auto approve this if the user has the right perms
secret = get_object_or_404(Secret, pk=secret_id)
approver = False
if request.user.has_perm('server.can_approve'):
approver = True
if approver == True:
if hasattr(settings, 'APPROVE_OWN'):
if settings.APPROVE_OWN == False:
approver = False
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = RequestForm(request.POST)
if form.is_valid():
new_request = form.save(commit=False)
new_request.requesting_user = request.user
new_request.secret = secret
new_request.save()
if approver:
new_request.auth_user = request.user
new_request.approved = True
new_request.date_approved = datetime.now()
new_request.save()
else:
# User isn't an approver, send an email to all of the approvers
perm = Permission.objects.get(codename='can_approve')
users = User.objects.filter(Q(is_superuser=True) | Q(groups__permissions=perm) | Q(user_permissions=perm) ).distinct()
if hasattr(settings, 'HOST_NAME'):
server_name = settings.HOST_NAME.rstrip('/')
else:
server_name = 'http://crypt'
if hasattr(settings, 'SEND_EMAIL'):
if settings.SEND_EMAIL == True:
for user in users:
if user.email:
email_message = """ There has been a new key request by %s. You can review this request at %s%s
""" % (request.user.username, server_name, reverse('approve', args=[new_request.id]))
email_sender = 'requests@%s' % request.META['SERVER_NAME']
send_mail('Crypt Key Request', email_message, email_sender,
[user.email], fail_silently=True)
##if we're an approver, we'll redirect to the retrieve view
if approver:
return redirect('retrieve', new_request.id)
else:
return redirect('secret_info', secret.id)
else:
form = RequestForm()
c = {'form': form, 'secret':secret, }
return render(request,'server/request.html', c)
##retrieve key view
@login_required
def retrieve(request, request_id):
cleanup()
the_request = get_object_or_404(Request, pk=request_id)
if the_request.approved == True and the_request.current==True:
if hasattr(settings, 'ROTATE_VIEWED_SECRETS'):
if settings.ROTATE_VIEWED_SECRETS:
the_request.secret.rotation_required = True
the_request.secret.save()
c = {'user': request.user, 'the_request':the_request, }
return render(request,'server/retrieve.html', c)
else:
raise Http404
## approve key view
@permission_required('server.can_approve', login_url='/login/')
def approve(request, request_id):
the_request = get_object_or_404(Request, pk=request_id)
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = ApproveForm(request.POST, instance=the_request)
if form.is_valid():
new_request = form.save(commit=False)
new_request.auth_user = request.user
new_request.date_approved = datetime.now()
new_request.save()
# Send an email to the requester with a link to retrieve (or not)
if hasattr(settings, 'HOST_NAME'):
server_name = settings.HOST_NAME.rstrip('/')
else:
server_name = 'http://crypt'
if new_request.approved == True:
request_status = 'approved'
elif new_request.approved == False:
request_status = 'denied'
if hasattr(settings, 'SEND_EMAIL'):
if settings.SEND_EMAIL == True:
if new_request.requesting_user.email:
email_message = """ Your key request has been %s by %s. %s%s
""" % (request_status, request.user.username, server_name, reverse('secret_info', args=[new_request.id]))
email_sender = 'requests@%s' % request.META['SERVER_NAME']
send_mail('Crypt Key Request', email_message, email_sender,
[new_request.requesting_user.email], fail_silently=True)
return redirect('managerequests')
else:
form = ApproveForm(instance=the_request)
c = {'form':form, 'user': request.user, 'the_request':the_request, }
return render(request,'server/approve.html', c)
##manage requests
@permission_required('server.can_approve', login_url='/login/')
def managerequests(request):
requests = Request.objects.filter(approved__isnull=True)
if hasattr(settings, 'APPROVE_OWN'):
if settings.APPROVE_OWN == False:
requests = requests.filter(~Q(requesting_user=request.user))
c = {'user': request.user, 'requests':requests, }
return render(request,'server/manage_requests.html', c)
# Add new manual computer
@login_required
def new_computer(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = ComputerForm(request.POST)
if form.is_valid():
new_computer = form.save(commit=False)
new_computer.save()
form.save_m2m()
return redirect('computer_info', new_computer.id)
else:
form = ComputerForm()
c = {'form': form}
return render(request, 'server/new_computer_form.html', c)
@login_required
def new_secret(request, computer_id):
c = {}
c.update(csrf(request))
computer = get_object_or_404(Computer, pk=computer_id)
if request.method == 'POST':
form = SecretForm(request.POST)
if form.is_valid():
new_secret = form.save(commit=False)
new_secret.computer = computer
new_secret.date_escrowed = datetime.now()
new_secret.save()
#form.save_m2m()
return redirect('computer_info', computer.id)
else:
form = SecretForm()
c = {'form': form, 'computer': computer, }
return render(request, 'server/new_secret_form.html', c)
# Verify key escrow
@csrf_exempt
def verify(request, serial, secret_type):
computer = get_object_or_404(Computer, serial=serial)
try:
secret = Secret.objects.filter(computer=computer, secret_type=secret_type).latest('date_escrowed')
output = {'escrowed': True, 'date_escrowed':secret.date_escrowed}
except Secret.DoesNotExist:
output = {'escrowed': False}
return JsonResponse(output)
##checkin view
@csrf_exempt
def checkin(request):
try:
serial_num = request.POST['serial']
except:
return HttpResponse(status=500)
try:
recovery_pass = request.POST['recovery_password']
except:
return HttpResponse(status=500)
try:
macname = request.POST['macname']
except:
macname = serial_num
try:
user_name = request.POST['username']
except:
return HttpResponse(status=500)
try:
secret_type = request.POST['secret_type']
except:
secret_type = 'recovery_key'
try:
computer = Computer.objects.get(serial=serial_num)
except Computer.DoesNotExist:
computer = Computer(serial=serial_num)
#computer = Computer(recovery_key=recovery_pass, serial=serial_num, last_checkin = datetime.now(), username=user_name, computername=macname)
computer.last_checkin = datetime.now()
computer.username=user_name
computer.computername = macname
computer.secret_type = secret_type
computer.save()
try:
secret = Secret(computer=computer, secret=recovery_pass, secret_type=secret_type, date_escrowed=datetime.now())
secret.save()
except ValidationError:
pass
latest_secret = Secret.objects.filter(secret_type=secret_type).latest('date_escrowed')
rotation_required = latest_secret.rotation_required
c = {
'serial':computer.serial,
'username':computer.username,
'rotation_required': rotation_required
}
return HttpResponse(json.dumps(c), content_type="application/json")
| 38.642633
| 144
| 0.655228
|
a80686019cfc6bf706615e552dafe3c839c36389
| 2,395
|
py
|
Python
|
ropy/models/mdl_panda.py
|
jhavl/ropy
|
38b12369530253a16c22ef1f5be0bcb75053ffd8
|
[
"MIT"
] | 16
|
2020-02-20T10:28:01.000Z
|
2021-07-12T09:03:25.000Z
|
ropy/models/mdl_panda.py
|
jhavl/ropy
|
38b12369530253a16c22ef1f5be0bcb75053ffd8
|
[
"MIT"
] | 2
|
2020-04-27T01:35:44.000Z
|
2020-04-27T23:19:53.000Z
|
ropy/models/mdl_panda.py
|
jhavl/ropy
|
38b12369530253a16c22ef1f5be0bcb75053ffd8
|
[
"MIT"
] | 3
|
2020-04-07T03:00:02.000Z
|
2020-09-05T23:49:28.000Z
|
# #!/usr/bin/env python
# import numpy as np
# from ropy.robot.Revolute import Revolute
# from ropy.robot.SerialLink import SerialLink
# from ropy.tools.transform import transl, xyzrpy_to_trans
# class Panda(SerialLink):
# """
# A class representing the Franka Emika Panda robot arm.
# DH Parameters taken from https://frankaemika.github.io/docs/control_parameters.html
# Attributes:
# --------
# name : string
# Name of the robot
# manufacturer : string
# Manufacturer of the robot
# links : List[n]
# Series of links which define the robot
# base : float np.ndarray(4,4)
# Locaation of the base
# tool : float np.ndarray(4,4)
# Location of the tool
# mdh : int
# 1: Pnada is modified D&H
# n : int
# Number of joints in the robot
# Examples
# --------
# >>> panda = Panda()
# See Also
# --------
# ropy.robot.SerialLink : A superclass for arm type robots
# """
# def __init__(self):
# deg = np.pi/180
# mm = 1e-3
# flange = (107)*mm
# d7 = (103)*mm
# L1 = Revolute(a = 0.0, d = 0.333, alpha = 0.0, qlim = np.array([-2.8973, 2.8973]), mdh = 1)
# L2 = Revolute(a = 0.0, d = 0.0, alpha = -np.pi/2, qlim = np.array([-1.7628, 1.7628]), mdh = 1)
# L3 = Revolute(a = 0.0, d = 0.316, alpha = np.pi/2, qlim = np.array([-2.8973, 2.8973]), mdh = 1)
# L4 = Revolute(a = 0.0825, d = 0.0, alpha = np.pi/2, qlim = np.array([-3.0718, -0.0698]), mdh = 1)
# L5 = Revolute(a =-0.0825, d = 0.384, alpha = -np.pi/2, qlim = np.array([-2.8973, 2.8973]), mdh = 1)
# L6 = Revolute(a = 0.0, d = 0.0, alpha = np.pi/2, qlim = np.array([-0.0175, 3.7525]), mdh = 1)
# L7 = Revolute(a = 0.088, d =flange, alpha = np.pi/2, qlim = np.array([-2.8973, 2.8973]), mdh = 1)
# L = [L1, L2, L3, L4, L5, L6, L7]
# # super(Panda, self).__init__(L, name = 'Panda', manufacturer = 'Franka Emika', tool = transl(0, 0, d7))
# super(Panda, self).__init__(L, name = 'Panda', manufacturer = 'Franka Emika', tool = xyzrpy_to_trans(0, 0, d7, 0, 0, -np.pi/4))
# self.qz = np.array([0, 0, 0, 0, 0, 0, 0])
# self.qr = np.array([0, -90, -90, 90, 0, -90, 90]) * deg
| 36.846154
| 137
| 0.517745
|
34e2bb33253c0a8595ca30111a3d42ed9c194fe5
| 2,485
|
py
|
Python
|
src/org_setup/resources/accessanalyzer.py
|
gilyas/aws-control-tower-org-setup-sample
|
65c1a1a0c7b7bb362dff1924f38f63bd8c3a8e41
|
[
"MIT-0"
] | null | null | null |
src/org_setup/resources/accessanalyzer.py
|
gilyas/aws-control-tower-org-setup-sample
|
65c1a1a0c7b7bb362dff1924f38f63bd8c3a8e41
|
[
"MIT-0"
] | null | null | null |
src/org_setup/resources/accessanalyzer.py
|
gilyas/aws-control-tower-org-setup-sample
|
65c1a1a0c7b7bb362dff1924f38f63bd8c3a8e41
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: MIT-0
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from aws_lambda_powertools import Logger
import boto3
import botocore
from .sts import STS
from ..constants import ORGANIZATION_ANALYZER_NAME
logger = Logger(child=True)
__all__ = ["AccessAnalyzer"]
class AccessAnalyzer:
def __init__(self, session: boto3.Session) -> None:
self.client = session.client("accessanalyzer")
def create_analyzer(self, analyzer_name: str, analyzer_type: str) -> None:
"""
Create an IAM access analyzer
"""
try:
self.client.create_analyzer(analyzerName=analyzer_name, type=analyzer_type)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] != "ConflictException":
logger.exception("Unable to create IAM access analyzer")
raise error
@classmethod
def create_org_analyzer(cls, session: boto3.Session, account_id: str) -> None:
"""
Create an organization IAM access analyzer in the desired account
"""
assumed_role_session = STS(session).assume_role(account_id, "accessanalyzer")
client = cls(assumed_role_session)
logger.info(
f"Creating organizational IAM access analyzer in account {account_id}"
)
client.create_analyzer(ORGANIZATION_ANALYZER_NAME, "ORGANIZATION")
logger.debug(
f"Created organizational IAM access analyzer in account {account_id}"
)
| 37.089552
| 87
| 0.709054
|
52cb1269eb216e15769828fd613ef1a43b4425aa
| 246
|
py
|
Python
|
example_crm/dev_patrick.py
|
pkimber/crm
|
835e8ff3161404316b7da35cf61e3851763b37b9
|
[
"Apache-2.0"
] | 4
|
2015-02-22T07:22:03.000Z
|
2017-03-13T05:36:23.000Z
|
example_crm/dev_patrick.py
|
pkimber/old-crm-migrated-to-gitlab
|
835e8ff3161404316b7da35cf61e3851763b37b9
|
[
"Apache-2.0"
] | 1
|
2015-07-03T23:52:37.000Z
|
2016-04-10T14:49:36.000Z
|
example_crm/dev_patrick.py
|
pkimber/old-crm-migrated-to-gitlab
|
835e8ff3161404316b7da35cf61e3851763b37b9
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'temp.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
| 16.4
| 47
| 0.410569
|
ef737764319dde6f64dbe5b233ce3173f2916280
| 927
|
py
|
Python
|
tests/turing_machine/test_rule.py
|
kigawas/computation-py
|
4e1dc356c03b845cae1218a244ad2f2ce2b10132
|
[
"MIT"
] | 24
|
2016-04-16T09:10:15.000Z
|
2022-03-14T08:44:48.000Z
|
tests/turing_machine/test_rule.py
|
kigawas/computation-py
|
4e1dc356c03b845cae1218a244ad2f2ce2b10132
|
[
"MIT"
] | 10
|
2020-11-29T13:01:09.000Z
|
2022-01-22T13:14:32.000Z
|
tests/turing_machine/test_rule.py
|
kigawas/computation-py
|
4e1dc356c03b845cae1218a244ad2f2ce2b10132
|
[
"MIT"
] | 2
|
2019-09-21T15:07:41.000Z
|
2021-03-14T13:38:10.000Z
|
from computation.turing_machine.rule import (
DTM,
Direction,
DTMRulebook,
Tape,
TMConfiguration,
TMRule,
)
# increment binary number rulebook
rulebook = DTMRulebook(
[
TMRule(1, "0", 2, "1", Direction.RIGHT),
TMRule(1, "1", 1, "0", Direction.LEFT),
TMRule(1, "_", 2, "1", Direction.RIGHT),
TMRule(2, "0", 2, "0", Direction.RIGHT),
TMRule(2, "1", 2, "1", Direction.RIGHT),
TMRule(2, "_", 3, "_", Direction.LEFT),
]
)
def test_rule():
dtm = DTM(
TMConfiguration(1, Tape(["1", "0", "1"], "1", ["_"])),
[3],
rulebook,
)
dtm.run()
assert dtm.current_configuration.state == 3
assert dtm.current_configuration.tape == Tape(["1", "1", "0"], "0", ["_"])
dtm = DTM(
TMConfiguration(1, Tape(["1", "2", "1"], "1", ["_"])),
[3],
rulebook,
)
dtm.run()
assert dtm.is_stuck
| 23.175
| 78
| 0.514563
|
6896e1102e9a82a0195bb2f7d1401ef7a706ebc7
| 45
|
py
|
Python
|
tests/CompileTests/Python_tests/test2011_013.py
|
maurizioabba/rose
|
7597292cf14da292bdb9a4ef573001b6c5b9b6c0
|
[
"BSD-3-Clause"
] | 488
|
2015-01-09T08:54:48.000Z
|
2022-03-30T07:15:46.000Z
|
tests/CompileTests/Python_tests/test2011_013.py
|
sujankh/rose-matlab
|
7435d4fa1941826c784ba97296c0ec55fa7d7c7e
|
[
"BSD-3-Clause"
] | 174
|
2015-01-28T18:41:32.000Z
|
2022-03-31T16:51:05.000Z
|
tests/CompileTests/Python_tests/test2011_013.py
|
sujankh/rose-matlab
|
7435d4fa1941826c784ba97296c0ec55fa7d7c7e
|
[
"BSD-3-Clause"
] | 146
|
2015-04-27T02:48:34.000Z
|
2022-03-04T07:32:53.000Z
|
def foo():
return 1
def bar():
return 2
| 7.5
| 10
| 0.577778
|
e8c5c3df4df9ae14dce48dd709ec5b0d070cd5bd
| 186
|
py
|
Python
|
biothings/tests/hub/config/conf_deep.py
|
sirloon/biothings.api
|
8a981fa2151e368d0ca76aaf226eb565d794d4fb
|
[
"Apache-2.0"
] | null | null | null |
biothings/tests/hub/config/conf_deep.py
|
sirloon/biothings.api
|
8a981fa2151e368d0ca76aaf226eb565d794d4fb
|
[
"Apache-2.0"
] | null | null | null |
biothings/tests/hub/config/conf_deep.py
|
sirloon/biothings.api
|
8a981fa2151e368d0ca76aaf226eb565d794d4fb
|
[
"Apache-2.0"
] | null | null | null |
from conf_base import *
# redefine some params
D_D = "d"
# redefine description
E = "heu"
# redefine desc of read-only
READ_ONLY = "written in titanium"
#- readonly -#
F = "Forged"
| 12.4
| 33
| 0.682796
|
0f506f3d17d3607fd974357621c1e2d8a3d9789b
| 12,049
|
py
|
Python
|
intersight/model/appliance_device_certificate_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/appliance_device_certificate_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/appliance_device_certificate_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.appliance_cert_renewal_phase import ApplianceCertRenewalPhase
globals()['ApplianceCertRenewalPhase'] = ApplianceCertRenewalPhase
class ApplianceDeviceCertificateAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'APPLIANCE.DEVICECERTIFICATE': "appliance.DeviceCertificate",
},
('object_type',): {
'APPLIANCE.DEVICECERTIFICATE': "appliance.DeviceCertificate",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'ca_certificate': (str,), # noqa: E501
'ca_certificate_expiry_time': (datetime,), # noqa: E501
'certificate_renewal_expiry_time': (datetime,), # noqa: E501
'completed_phases': ([ApplianceCertRenewalPhase], none_type,), # noqa: E501
'configuration_mo_id': (str,), # noqa: E501
'current_phase': (ApplianceCertRenewalPhase,), # noqa: E501
'end_time': (datetime,), # noqa: E501
'last_success_poll_time': (datetime,), # noqa: E501
'messages': ([str], none_type,), # noqa: E501
'start_time': (datetime,), # noqa: E501
'status': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'ca_certificate': 'CaCertificate', # noqa: E501
'ca_certificate_expiry_time': 'CaCertificateExpiryTime', # noqa: E501
'certificate_renewal_expiry_time': 'CertificateRenewalExpiryTime', # noqa: E501
'completed_phases': 'CompletedPhases', # noqa: E501
'configuration_mo_id': 'ConfigurationMoId', # noqa: E501
'current_phase': 'CurrentPhase', # noqa: E501
'end_time': 'EndTime', # noqa: E501
'last_success_poll_time': 'LastSuccessPollTime', # noqa: E501
'messages': 'Messages', # noqa: E501
'start_time': 'StartTime', # noqa: E501
'status': 'Status', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ApplianceDeviceCertificateAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "appliance.DeviceCertificate", must be one of ["appliance.DeviceCertificate", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "appliance.DeviceCertificate", must be one of ["appliance.DeviceCertificate", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
ca_certificate (str): The base64 encoded certificate in PEM format.. [optional] # noqa: E501
ca_certificate_expiry_time (datetime): The expiry datetime of new ca certificate which need to be applied on device connector.. [optional] # noqa: E501
certificate_renewal_expiry_time (datetime): The date time allocated till cert renewal will be executed. This time used here will be based on cert renewal plan.. [optional] # noqa: E501
completed_phases ([ApplianceCertRenewalPhase], none_type): [optional] # noqa: E501
configuration_mo_id (str): The operation configuration MOId.. [optional] # noqa: E501
current_phase (ApplianceCertRenewalPhase): [optional] # noqa: E501
end_time (datetime): End date of the certificate renewal.. [optional] # noqa: E501
last_success_poll_time (datetime): The last poll time when data collection was successfull. This time is used to collect data after this time in next cycle.. [optional] # noqa: E501
messages ([str], none_type): [optional] # noqa: E501
start_time (datetime): Start date of the certificate renewal.. [optional] # noqa: E501
status (str): The status of ca certificate renewal.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "appliance.DeviceCertificate")
object_type = kwargs.get('object_type', "appliance.DeviceCertificate")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 54.520362
| 1,678
| 0.642875
|
2ff53c1389dce0053d576fefb065b93ae3831453
| 7,152
|
py
|
Python
|
test/performance/link_prediction/dgl/model.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | null | null | null |
test/performance/link_prediction/dgl/model.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | null | null | null |
test/performance/link_prediction/dgl/model.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | null | null | null |
import dgl
import torch
import pickle
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
import dgl.function as fn
import random
from dgl.data import CoraGraphDataset, PubmedGraphDataset, CiteseerGraphDataset
from autogl.module.model.dgl import AutoSAGE, AutoGCN, AutoGAT
import dgl.data
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from tqdm import tqdm
from helper import get_encoder_decoder_hp
from sklearn.metrics import roc_auc_score
parser = ArgumentParser(
"auto link prediction", formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument("--dataset", default="Cora", type=str, help="dataset to use", choices=["Cora", "CiteSeer", "PubMed"],)
parser.add_argument("--model", default="sage", type=str,help="model to use", choices=["gcn","gat","sage"],)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument('--repeat', type=int, default=10)
parser.add_argument("--device", default=0, type=int, help="GPU device")
args = parser.parse_args()
args.device = torch.device('cuda:0')
device = torch.device('cuda:0')
if args.dataset == 'Cora':
dataset = CoraGraphDataset()
elif args.dataset == 'CiteSeer':
dataset = CiteseerGraphDataset()
elif args.dataset == 'PubMed':
dataset = PubmedGraphDataset()
else:
assert False
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed)
def split_train_valid_test(g):
u, v = g.edges()
eids = np.arange(g.number_of_edges())
eids = np.random.permutation(eids)
valid_size = int(len(eids) * 0.1)
test_size = int(len(eids) * 0.1)
train_size = g.number_of_edges() - test_size - valid_size
test_pos_u, test_pos_v = u[eids[:test_size]], v[eids[:test_size]]
valid_pos_u, valid_pos_v = u[eids[test_size:test_size+valid_size]], v[eids[test_size:test_size+valid_size]]
train_pos_u, train_pos_v = u[eids[test_size+valid_size:]], v[eids[test_size+valid_size:]]
# Find all negative edges and split them for training and testing
adj = sp.coo_matrix((np.ones(len(u)), (u.numpy(), v.numpy())))
adj_neg = 1 - adj.todense() - np.eye(g.number_of_nodes())
neg_u, neg_v = np.where(adj_neg != 0)
neg_eids = np.random.choice(len(neg_u), g.number_of_edges())
test_neg_u, test_neg_v = neg_u[neg_eids[:test_size]], neg_v[neg_eids[:test_size]]
valid_neg_u, valid_neg_v = neg_u[neg_eids[test_size:test_size+valid_size]], neg_v[neg_eids[test_size:test_size+valid_size]]
train_neg_u, train_neg_v = neg_u[neg_eids[test_size+valid_size:]], neg_v[neg_eids[test_size+valid_size:]]
train_g = dgl.remove_edges(g, eids[:test_size+valid_size])
train_pos_g = dgl.graph((train_pos_u, train_pos_v), num_nodes=g.number_of_nodes())
train_neg_g = dgl.graph((train_neg_u, train_neg_v), num_nodes=g.number_of_nodes())
valid_pos_g = dgl.graph((valid_pos_u, valid_pos_v), num_nodes=g.number_of_nodes())
valid_neg_g = dgl.graph((valid_neg_u, valid_neg_v), num_nodes=g.number_of_nodes())
test_pos_g = dgl.graph((test_pos_u, test_pos_v), num_nodes=g.number_of_nodes())
test_neg_g = dgl.graph((test_neg_u, test_neg_v), num_nodes=g.number_of_nodes())
return train_g, train_pos_g, train_neg_g, valid_pos_g, valid_neg_g, test_pos_g, test_neg_g
class DotPredictor(nn.Module):
def forward(self, g, h):
with g.local_scope():
g.ndata['h'] = h
# Compute a new edge feature named 'score' by a dot-product between the
# source node feature 'h' and destination node feature 'h'.
g.apply_edges(fn.u_dot_v('h', 'h', 'score'))
# u_dot_v returns a 1-element vector for each edge so you need to squeeze it.
return g.edata['score'][:, 0]
def compute_loss(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score])
labels = torch.cat([torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])])
return F.binary_cross_entropy_with_logits(scores.cpu(), labels)
def compute_auc(pos_score, neg_score):
scores = torch.cat([pos_score, neg_score]).numpy()
labels = torch.cat(
[torch.ones(pos_score.shape[0]), torch.zeros(neg_score.shape[0])]).numpy()
return roc_auc_score(labels, scores)
def get_link_labels(pos_edge_index, neg_edge_index):
E = pos_edge_index.size(1) + neg_edge_index.size(1)
link_labels = torch.zeros(E, dtype=torch.float, device=device)
link_labels[: pos_edge_index.size(1)] = 1.0
return link_labels
@torch.no_grad()
def evaluate(model, data, mask):
model.eval()
if mask == "val": offset = 3
else: offset = 5
z = model.lp_encode(data[0])
link_logits = model.lp_decode(
z, torch.stack(data[offset].edges()), torch.stack(data[offset + 1].edges())
)
link_probs = link_logits.sigmoid()
link_labels = get_link_labels(
torch.stack(data[offset].edges()), torch.stack(data[offset + 1].edges())
)
result = roc_auc_score(link_labels.cpu().numpy(), link_probs.cpu().numpy())
return result
res = []
model_hp, _ = get_encoder_decoder_hp(args.model, decoupled=False)
for seed in tqdm(range(1234, 1234+args.repeat)):
setup_seed(seed)
g = dataset[0]
splitted = list(split_train_valid_test(g))
if args.model == 'gcn' or args.model == 'gat':
splitted[0] = dgl.add_self_loop(splitted[0])
splitted = [g.to(device) for g in splitted]
if args.model == 'gcn':
model = AutoGCN(
input_dimension=splitted[0].ndata['feat'].shape[1],
output_dimension=2,
device=args.device,
).from_hyper_parameter(model_hp).model
elif args.model == 'gat':
model = AutoGAT(
input_dimension=splitted[0].ndata['feat'].shape[1],
output_dimension=2,
device=args.device,
).from_hyper_parameter(model_hp).model
elif args.model == 'sage':
model = AutoSAGE(
num_features=splitted[0].ndata['feat'].shape[1],
num_classes=2,
device=args.device
).from_hyper_parameter(model_hp).model
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
best_auc = 0.
for epoch in range(100):
model.train()
optimizer.zero_grad()
z = model.lp_encode(splitted[0])
link_logits = model.lp_decode(
z, torch.stack(splitted[1].edges()), torch.stack(splitted[2].edges())
)
link_labels = get_link_labels(
torch.stack(splitted[1].edges()), torch.stack(splitted[2].edges())
)
loss = F.binary_cross_entropy_with_logits(link_logits, link_labels)
loss.backward()
optimizer.step()
auc_val = evaluate(model, splitted, "val")
if auc_val > best_auc:
best_auc = auc_val
best_parameters = pickle.dumps(model.state_dict())
model.load_state_dict(pickle.loads(best_parameters))
res.append(evaluate(model, splitted, "test"))
print("{:.2f} ~ {:.2f}".format(np.mean(res) * 100, np.std(res) * 100))
| 36.865979
| 127
| 0.680928
|
bf4ec80bfaa68d5b950ac516e8cd918166b42cc2
| 5,660
|
py
|
Python
|
mujoco/ppo_main.py
|
mrbermell/seed_rl
|
9562e178fb8c16d2551d9e5d59594a7f908655dd
|
[
"Apache-2.0"
] | 733
|
2019-10-14T11:38:22.000Z
|
2022-03-24T14:55:50.000Z
|
mujoco/ppo_main.py
|
mrbermell/seed_rl
|
9562e178fb8c16d2551d9e5d59594a7f908655dd
|
[
"Apache-2.0"
] | 76
|
2019-10-30T14:18:17.000Z
|
2021-12-10T11:52:15.000Z
|
mujoco/ppo_main.py
|
mrbermell/seed_rl
|
9562e178fb8c16d2551d9e5d59594a7f908655dd
|
[
"Apache-2.0"
] | 141
|
2019-10-14T11:38:25.000Z
|
2022-02-27T10:36:56.000Z
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO learner for Mujoco."""
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import gin
import gin.tf.external_configurables
from seed_rl.agents.policy_gradient import learner
from seed_rl.agents.policy_gradient import learner_flags
from seed_rl.agents.policy_gradient.modules import continuous_control_agent
from seed_rl.agents.policy_gradient.modules import popart
from seed_rl.common import actor
from seed_rl.common import common_flags
from seed_rl.common import parametric_distribution
from seed_rl.common import utils
from seed_rl.mujoco import env
import tensorflow as tf
gin.external_configurable(tf.keras.initializers.Orthogonal,
name='Orthogonal', module='tf.keras.initializers')
# Enable configuring parametric action distribution before Gin configs are read
# and locked.
continuous_action_config = gin.external_configurable(
parametric_distribution.continuous_action_config)
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.0003, 'Learning rate.')
flags.DEFINE_float('lr_decay_multiplier', 0.,
'Franction of original learning rate to decay to.')
flags.DEFINE_string('env_name', 'HalfCheetah-v2',
'Name of the environment from OpenAI Gym.')
flags.DEFINE_string('mujoco_model',
None,
'Optional path to the xml mujoco model to use. It must be '
'compatible with the environment class selected with '
'--env_name.')
flags.DEFINE_string('gin_config', '', 'A path to a config file.')
flags.DEFINE_multi_string('gin_bindings', [],
'Newline separated list of Gin parameter bindings.')
gin.external_configurable(tf.exp, name='exp', module='tf')
gin.external_configurable(tf.keras.layers.LayerNormalization,
'LayerNormalization', module='tf.keras.layers')
gin.external_configurable(tf.keras.initializers.VarianceScaling)
gin.external_configurable(tf.keras.initializers.GlorotUniform)
gin.external_configurable(tf.keras.initializers.GlorotNormal)
gin.external_configurable(tf.keras.initializers.lecun_normal, 'lecun_normal')
gin.external_configurable(tf.keras.initializers.lecun_uniform, 'lecun_uniform')
gin.external_configurable(tf.keras.initializers.he_normal, 'he_normal')
gin.external_configurable(tf.keras.initializers.he_uniform, 'he_uniform')
gin.external_configurable(tf.keras.initializers.TruncatedNormal)
@gin.configurable
def orthogonal_gain_sqrt2():
return tf.keras.initializers.Orthogonal(1.41421356237)
@gin.configurable
def orthogonal_gain_0dot01():
return tf.keras.initializers.Orthogonal(0.01)
@gin.configurable
def create_optimizer(final_iteration, optimizer_fn):
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
FLAGS.learning_rate, final_iteration,
FLAGS.lr_decay_multiplier * FLAGS.learning_rate)
optimizer = optimizer_fn(learning_rate_fn)
return optimizer, learning_rate_fn
def create_agent(unused_action_space, unused_observation_space,
parametric_action_distribution):
return continuous_control_agent.ContinuousControlAgent(
parametric_action_distribution=parametric_action_distribution)
def main(unused_argv):
# Save the string flags now as we modify them later.
string_flags = FLAGS.flags_into_string()
gin.parse_config_files_and_bindings(
[FLAGS.gin_config] if FLAGS.gin_config else [],
# Gin uses slashes to denote scopes but XM doesn't allow slashes in
# parameter names so we use __ instead and convert it to slashes here.
[s.replace('__', '/') for s in FLAGS.gin_bindings])
gym_kwargs = {}
if FLAGS.mujoco_model:
local_mujoco_model = tempfile.mkstemp(
prefix='mujoco_model', suffix='.xml')[1]
logging.info('Copying remote model %s to local file %s', FLAGS.mujoco_model,
local_mujoco_model)
tf.io.gfile.copy(FLAGS.mujoco_model, local_mujoco_model, overwrite=True)
gym_kwargs['model_path'] = local_mujoco_model
create_environment = lambda task, config: env.create_environment(
env_name=config.env_name,
discretization='none',
n_actions_per_dim=11,
action_ratio=30,
gym_kwargs=gym_kwargs)
if FLAGS.run_mode == 'actor':
actor.actor_loop(create_environment)
elif FLAGS.run_mode == 'learner':
logdir = FLAGS.logdir
settings = utils.init_learner_multi_host(FLAGS.num_training_tpus)
learner.learner_loop(
create_environment,
create_agent,
create_optimizer,
learner_flags.training_config_from_flags(),
settings,
action_distribution_config=continuous_action_config())
with tf.io.gfile.GFile(os.path.join(logdir, 'learner_flags.txt'), 'w') as f:
f.write(string_flags)
with tf.io.gfile.GFile(os.path.join(logdir, 'learner.gin'), 'w') as f:
f.write(gin.operative_config_str())
else:
raise ValueError('Unsupported run mode {}'.format(FLAGS.run_mode))
if __name__ == '__main__':
app.run(main)
| 38.767123
| 80
| 0.748057
|
be6d29424a1823b6f23173b26c2aa298ca7ab03e
| 369
|
py
|
Python
|
Intro-Bites/101.py
|
joaopalmeiro/pybites
|
54a70689c14149a8b927a004e70f1b07642ec766
|
[
"MIT"
] | null | null | null |
Intro-Bites/101.py
|
joaopalmeiro/pybites
|
54a70689c14149a8b927a004e70f1b07642ec766
|
[
"MIT"
] | null | null | null |
Intro-Bites/101.py
|
joaopalmeiro/pybites
|
54a70689c14149a8b927a004e70f1b07642ec766
|
[
"MIT"
] | null | null | null |
MIN_DRIVING_AGE = 18
def allowed_driving(name, age):
"""Print '{name} is allowed to drive' or '{name} is not allowed to drive'
checking the passed in age against the MIN_DRIVING_AGE constant"""
not_str = " " if age >= MIN_DRIVING_AGE else " not "
print(f"{name} is{not_str}allowed to drive")
allowed_driving("tim", 17)
allowed_driving("bob", 18)
| 28.384615
| 77
| 0.688347
|
0436d3a43fae5a4664ef8ae9aebe90b696f4d1fa
| 7,201
|
py
|
Python
|
HoundSploit/searcher/engine/version_comparator.py
|
nicolas-carolo/houndsplo
|
a44b02559588ec2ae44af3529cc8a58371fa15c8
|
[
"BSD-3-Clause"
] | 85
|
2019-12-18T08:11:51.000Z
|
2022-02-25T05:45:48.000Z
|
HoundSploit/searcher/engine/version_comparator.py
|
juan157/houndsploit
|
12210481d8fa5880265e4b342f816a53d93e4637
|
[
"BSD-3-Clause"
] | 2
|
2020-04-21T13:33:14.000Z
|
2020-04-30T12:39:50.000Z
|
HoundSploit/searcher/engine/version_comparator.py
|
juan157/houndsploit
|
12210481d8fa5880265e4b342f816a53d93e4637
|
[
"BSD-3-Clause"
] | 11
|
2020-04-20T09:49:30.000Z
|
2022-02-01T15:29:17.000Z
|
import re
from pkg_resources import parse_version
def get_num_version(software_name, description):
"""
Get the number of the version of the software contained in a description of a vulnerability without '<' char.
:param software_name: the name of the software we want to get the number of version.
:param description: the description of the vulnerability from which we want to get the number of the version.
:return: the number of version if it is possible to get it, None else.
"""
software_name = software_name.upper()
description = description.upper()
regex = re.search(software_name + r' \d+((\.\d+)+)?', description)
try:
software = regex.group(0)
regex = re.search(r'\d+((\.\d+)+)?', software)
try:
return regex.group(0)
except AttributeError:
return
except AttributeError:
return
def get_num_version_with_comparator(software_name, description):
"""
Get the number of the version of the software contained in a description of a vulnerability containing '<' char.
:param software_name: the name of the software we want to get the number of version.
:param description: the description of the vulnerability from which we want to get the number of the version.
:return: the number of version if it is possible to get it, None else.
"""
software_name = software_name.upper()
description = description.upper()
regex = re.search(software_name + r' < \d+((\.\d+)+)?', description)
try:
software = regex.group(0)
regex = re.search(r'\d+((\.\d+)+)?', software)
try:
return regex.group(0)
except AttributeError:
return
except AttributeError:
return
def is_lte_with_comparator_x(num_version, software_name, description):
"""
Check if the vulnerability's description contains the number of version (with comparator and the x) of the software
searched by the user.
:param num_version: the number of version searched by the user.
:param software_name: the name of the software searched by the user.
:param description: the vulnerability's description to check.
:return: True if the vulnerability's description contains the number of version of the software searched by
the user, False else.
"""
software_name = software_name.upper()
description = description.upper()
regex = re.search(software_name + r' < \d+((\.\d+)+)?', description)
try:
software = regex.group(0)
regex = re.search(r'\d+((\.\d+)+)?', software)
try:
num_to_compare = regex.group(0)
version_precision = str(num_to_compare).count('.')
except AttributeError:
return False
except AttributeError:
return False
try:
regex = re.search(r'\d+(\.\d+){0,%d}' % version_precision, num_version)
num_version = regex.group()
except AttributeError:
return False
if parse_version(num_version) <= parse_version(num_to_compare):
return True
else:
return False
def is_in_version_range(num_version, software_name, description):
"""
Check if the number of version (without x) of the software searched by the user is contained in the range of
version in the vulnerability's description.
:param num_version: the number of version searched by the user.
:param software_name: the name of the software searched by the user.
:param description: the vulnerability's description in which to do the check.
:return: True if the number of version (without x) of the software searched by the user is contained in the range of
version in the vulnerability's description, False else.
"""
software_name = software_name.upper()
description = description.upper()
regex = re.search(software_name + r' \d+((\.\d+)+)? < \d+((\.\d+)+)?', description)
try:
software = regex.group(0)
regex = re.search(r'(?P<from_version>\d+((\.\d+)+)?) < (?P<to_version>\d+((\.\d+)+)?)', software)
if parse_version(num_version) >= parse_version(regex.group('from_version')) and parse_version(
num_version) <= parse_version(regex.group('to_version')):
return True
else:
return False
except AttributeError:
return False
def is_in_version_range_with_x(num_version, software_name, description):
"""
Check if the number of version (with x) of the software searched by the user is contained in the range of
version in the vulnerability's description.
:param num_version: the number of version searched by the user.
:param software_name: the name of the software searched by the user.
:param description: the vulnerability's description in which to do the check.
:return: True if the number of version (without x) of the software searched by the user is contained in the range of
version in the vulnerability's description, False else.
"""
software_name = software_name.upper()
description = description.upper()
regex = re.search(software_name + r' \w+((\.\w+)+)?(\.x)? < \w+((\.\w+)+)?(\.x)?', description)
try:
software = regex.group(0)
regex = re.search(
r'(?P<from_version>\d+((\.\d+)+)?)(\.X)? < (?P<to_version>\d+((\.\d+)+)?(\.X)?)',
software)
from_version = regex.group('from_version')
to_version = regex.group('to_version')
regex = re.search(r'(?P<base>.+)\.(?P<least_digit>\d+)($|\.X)', to_version)
if to_version.__contains__('X'):
least_digit = int(regex.group('least_digit')) + 1
x_flag = True
else:
least_digit = int(regex.group('least_digit'))
x_flag = False
to_version = regex.group('base') + '.' + str(least_digit)
if (parse_version(from_version) <= parse_version(num_version) <= parse_version(to_version) and x_flag is False)\
or (parse_version(from_version) <= parse_version(num_version) < parse_version(to_version)
and x_flag is True):
return True
else:
return False
except AttributeError:
return False
def is_equal_with_x(num_version, num_to_compare):
"""
Check if the number of version searched by the user is equal to the number of version (with x) of the software
contained in the vulnerability's description.
:param num_version: the number of version searched by the user.
:param num_to_compare: the number of version (containing the x) in the vulnerability's description.
:return: True if the number of version searched by the user is equal to the number of version (with x) of the
software contained in the vulnerability's description.
"""
version_precision = str(num_to_compare).count('.')
try:
regex = re.search(r'\d+(\.\d+){0,%d}' % version_precision, num_version)
num_version = regex.group()
except AttributeError:
pass
if parse_version(num_version) == parse_version(num_to_compare):
return True
else:
return False
| 44.177914
| 120
| 0.653243
|
04a1821b1ce755bd4c7dcbb62c71a1f5261819f2
| 7,206
|
py
|
Python
|
Mac/Modules/res/ressupport.py
|
deadsnakes/python2.4
|
f493d5415b662e99a73d017bcafe2148c5bc8fb5
|
[
"PSF-2.0"
] | null | null | null |
Mac/Modules/res/ressupport.py
|
deadsnakes/python2.4
|
f493d5415b662e99a73d017bcafe2148c5bc8fb5
|
[
"PSF-2.0"
] | null | null | null |
Mac/Modules/res/ressupport.py
|
deadsnakes/python2.4
|
f493d5415b662e99a73d017bcafe2148c5bc8fb5
|
[
"PSF-2.0"
] | null | null | null |
# This script will generate the Resources interface for Python.
# It uses the "bgen" package to generate C code.
# It execs the file resgen.py which contain the function definitions
# (resgen.py was generated by resscan.py, scanning the <Resources.h> header file).
from macsupport import *
class ResMixIn:
def checkit(self):
if self.returntype.__class__ != OSErrType:
OutLbrace()
Output("OSErr _err = ResError();")
Output("if (_err != noErr) return PyMac_Error(_err);")
OutRbrace()
FunctionGenerator.checkit(self) # XXX
class ResFunction(ResMixIn, OSErrWeakLinkFunctionGenerator): pass
class ResMethod(ResMixIn, OSErrWeakLinkMethodGenerator): pass
RsrcChainLocation = Type("RsrcChainLocation", "h")
FSCatalogInfoBitmap = FakeType("0") # Type("FSCatalogInfoBitmap", "l")
FSCatalogInfo_ptr = FakeType("(FSCatalogInfo *)0")
# includestuff etc. are imported from macsupport
includestuff = includestuff + """
#include <Carbon/Carbon.h>
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern PyObject *_ResObj_New(Handle);
extern int _ResObj_Convert(PyObject *, Handle *);
extern PyObject *_OptResObj_New(Handle);
extern int _OptResObj_Convert(PyObject *, Handle *);
#define ResObj_New _ResObj_New
#define ResObj_Convert _ResObj_Convert
#define OptResObj_New _OptResObj_New
#define OptResObj_Convert _OptResObj_Convert
#endif
/* Function to dispose a resource, with a "normal" calling sequence */
static void
PyMac_AutoDisposeHandle(Handle h)
{
DisposeHandle(h);
}
"""
finalstuff = finalstuff + """
/* Alternative version of ResObj_New, which returns None for null argument */
PyObject *OptResObj_New(Handle itself)
{
if (itself == NULL) {
Py_INCREF(Py_None);
return Py_None;
}
return ResObj_New(itself);
}
int OptResObj_Convert(PyObject *v, Handle *p_itself)
{
PyObject *tmp;
if ( v == Py_None ) {
*p_itself = NULL;
return 1;
}
if (ResObj_Check(v))
{
*p_itself = ((ResourceObject *)v)->ob_itself;
return 1;
}
/* If it isn't a resource yet see whether it is convertible */
if ( (tmp=PyObject_CallMethod(v, "as_Resource", "")) ) {
*p_itself = ((ResourceObject *)tmp)->ob_itself;
Py_DECREF(tmp);
return 1;
}
PyErr_Clear();
PyErr_SetString(PyExc_TypeError, "Resource required");
return 0;
}
"""
initstuff = initstuff + """
PyMac_INIT_TOOLBOX_OBJECT_NEW(Handle, ResObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(Handle, ResObj_Convert);
PyMac_INIT_TOOLBOX_OBJECT_NEW(Handle, OptResObj_New);
PyMac_INIT_TOOLBOX_OBJECT_CONVERT(Handle, OptResObj_Convert);
"""
module = MacModule('_Res', 'Res', includestuff, finalstuff, initstuff)
class ResDefinition(PEP253Mixin, GlobalObjectDefinition):
getsetlist = [
('data',
"""
PyObject *res;
char state;
state = HGetState(self->ob_itself);
HLock(self->ob_itself);
res = PyString_FromStringAndSize(
*self->ob_itself,
GetHandleSize(self->ob_itself));
HUnlock(self->ob_itself);
HSetState(self->ob_itself, state);
return res;
""",
"""
char *data;
long size;
if ( v == NULL )
return -1;
if ( !PyString_Check(v) )
return -1;
size = PyString_Size(v);
data = PyString_AsString(v);
/* XXXX Do I need the GetState/SetState calls? */
SetHandleSize(self->ob_itself, size);
if ( MemError())
return -1;
HLock(self->ob_itself);
memcpy((char *)*self->ob_itself, data, size);
HUnlock(self->ob_itself);
/* XXXX Should I do the Changed call immedeately? */
return 0;
""",
'The resource data'
), (
'size',
'return PyInt_FromLong(GetHandleSize(self->ob_itself));',
None,
'The length of the resource data'
)]
def outputCheckNewArg(self):
Output("if (itself == NULL) return PyMac_Error(resNotFound);")
def outputCheckConvertArg(self):
# if it isn't a resource we may be able to coerce it
Output("if (!%s_Check(v))", self.prefix)
OutLbrace()
Output("PyObject *tmp;")
Output('if ( (tmp=PyObject_CallMethod(v, "as_Resource", "")) )')
OutLbrace()
Output("*p_itself = ((ResourceObject *)tmp)->ob_itself;")
Output("Py_DECREF(tmp);")
Output("return 1;")
OutRbrace()
Output("PyErr_Clear();")
OutRbrace()
def outputStructMembers(self):
GlobalObjectDefinition.outputStructMembers(self)
Output("void (*ob_freeit)(%s ptr);", self.itselftype)
def outputInitStructMembers(self):
GlobalObjectDefinition.outputInitStructMembers(self)
Output("it->ob_freeit = NULL;")
def outputCleanupStructMembers(self):
Output("if (self->ob_freeit && self->ob_itself)")
OutLbrace()
Output("self->ob_freeit(self->ob_itself);")
OutRbrace()
Output("self->ob_itself = NULL;")
def output_tp_newBody(self):
Output("PyObject *self;")
Output
Output("if ((self = type->tp_alloc(type, 0)) == NULL) return NULL;")
Output("((%s *)self)->ob_itself = NULL;", self.objecttype)
Output("((%s *)self)->ob_freeit = NULL;", self.objecttype)
Output("return self;")
def output_tp_initBody(self):
Output("char *srcdata = NULL;")
Output("int srclen = 0;")
Output("%s itself;", self.itselftype);
Output("char *kw[] = {\"itself\", 0};")
Output()
Output("if (PyArg_ParseTupleAndKeywords(args, kwds, \"O&\", kw, %s_Convert, &itself))",
self.prefix);
OutLbrace()
Output("((%s *)self)->ob_itself = itself;", self.objecttype)
Output("return 0;")
OutRbrace()
Output("PyErr_Clear();")
Output("if (!PyArg_ParseTupleAndKeywords(args, kwds, \"|s#\", kw, &srcdata, &srclen)) return -1;")
Output("if ((itself = NewHandle(srclen)) == NULL)")
OutLbrace()
Output("PyErr_NoMemory();")
Output("return 0;")
OutRbrace()
Output("((%s *)self)->ob_itself = itself;", self.objecttype)
# XXXX Output("((%s *)self)->ob_freeit = PyMac_AutoDisposeHandle;")
Output("if (srclen && srcdata)")
OutLbrace()
Output("HLock(itself);")
Output("memcpy(*itself, srcdata, srclen);")
Output("HUnlock(itself);")
OutRbrace()
Output("return 0;")
resobject = ResDefinition('Resource', 'ResObj', 'Handle')
module.addobject(resobject)
functions = []
resmethods = []
execfile('resgen.py')
execfile('resedit.py')
for f in functions: module.add(f)
for f in resmethods: resobject.add(f)
SetOutputFileName('_Resmodule.c')
module.generate()
| 32.459459
| 106
| 0.594505
|
fe388dde68ec4f5245a1d09598f40caf18e35407
| 6,314
|
py
|
Python
|
gluon/contrib/login_methods/oauth10a_account.py
|
spiffytech/MobileBlur
|
f9d2469caa05f0fe5c05c2ec83d1480cf6b770d8
|
[
"BSD-3-Clause"
] | 1
|
2019-05-16T17:25:57.000Z
|
2019-05-16T17:25:57.000Z
|
gluon/contrib/login_methods/oauth10a_account.py
|
spiffytech/MobileBlur
|
f9d2469caa05f0fe5c05c2ec83d1480cf6b770d8
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/contrib/login_methods/oauth10a_account.py
|
spiffytech/MobileBlur
|
f9d2469caa05f0fe5c05c2ec83d1480cf6b770d8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Written by Michele Comitini <mcm@glisco.it>
License: GPL v3
Adds support for OAuth1.0a authentication to web2py.
Dependencies:
- python-oauth2 (http://github.com/simplegeo/python-oauth2)
"""
import oauth2 as oauth
import cgi
from urllib2 import urlopen
import urllib2
from urllib import urlencode
class OAuthAccount(object):
"""
Login will be done via OAuth Framework, instead of web2py's
login form.
Include in your model (eg db.py)::
# define the auth_table before call to auth.define_tables()
auth_table = db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=""),
Field('last_name', length=128, default=""),
Field('username', length=128, default="", unique=True),
Field('password', 'password', length=256,
readable=False, label='Password'),
Field('registration_key', length=128, default= "",
writable=False, readable=False))
auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username)
.
.
.
auth.define_tables()
.
.
.
CLIENT_ID=\"<put your fb application id here>\"
CLIENT_SECRET=\"<put your fb application secret here>\"
AUTH_URL="..."
TOKEN_URL="..."
ACCESS_TOKEN_URL="..."
from gluon.contrib.login_methods.oauth10a_account import OAuthAccount
auth.settings.login_form=OAuthAccount(globals(),CLIENT_ID,CLIENT_SECRET, AUTH_URL, TOKEN_URL, ACCESS_TOKEN_URL)
"""
def __redirect_uri(self, next=None):
"""Build the uri used by the authenticating server to redirect
the client back to the page originating the auth request.
Appends the _next action to the generated url so the flows continues.
"""
r = self.request
http_host=r.env.http_x_forwarded_for
if not http_host: http_host=r.env.http_host
url_scheme = r.env.wsgi_url_scheme
if next:
path_info = next
else:
path_info = r.env.path_info
uri = '%s://%s%s' %(url_scheme, http_host, path_info)
if r.get_vars and not next:
uri += '?' + urlencode(r.get_vars)
return uri
def accessToken(self):
"""Return the access token generated by the authenticating server.
If token is already in the session that one will be used.
Otherwise the token is fetched from the auth server.
"""
if self.session.access_token:
# return the token (TODO: does it expire?)
return self.session.access_token
if self.session.request_token:
# Exchange the request token with an authorization token.
token = self.session.request_token
self.session.request_token = None
# Build an authorized client
# OAuth1.0a put the verifier!
token.set_verifier(self.request.vars.oauth_verifier)
client = oauth.Client(self.consumer, token)
resp, content = client.request(self.access_token_url, "POST")
if str(resp['status']) != '200':
self.session.request_token = None
self.globals['redirect'](self.globals['URL'](f='user',args='logout'))
self.session.access_token = oauth.Token.from_string(content)
return self.session.access_token
self.session.access_token = None
return None
def __init__(self, g, client_id, client_secret, auth_url, token_url, access_token_url):
self.globals = g
self.client_id = client_id
self.client_secret = client_secret
self.code = None
self.request = g['request']
self.session = g['session']
self.auth_url = auth_url
self.token_url = token_url
self.access_token_url = access_token_url
# consumer init
self.consumer = oauth.Consumer(self.client_id, self.client_secret)
def login_url(self, next="/"):
self.__oauth_login(next)
return next
def logout_url(self, next="/"):
self.session.request_token = None
self.session.access_token = None
return next
def get_user(self):
'''Get user data.
Since OAuth does not specify what a user
is, this function must be implemented for the specific
provider.
'''
raise NotImplementedError, "Must override get_user()"
def __oauth_login(self, next):
'''This method redirects the user to the authenticating form
on authentication server if the authentication code
and the authentication token are not available to the
application yet.
Once the authentication code has been received this method is
called to set the access token into the session by calling
accessToken()
'''
if not self.accessToken():
# setup the client
client = oauth.Client(self.consumer, None)
# Get a request token.
# oauth_callback *is REQUIRED* for OAuth1.0a
# putting it in the body seems to work.
callback_url = self.__redirect_uri(next)
data = urlencode(dict(oauth_callback=callback_url))
resp, content = client.request(self.token_url, "POST", body=data)
if resp['status'] != '200':
self.session.request_token = None
self.globals['redirect'](self.globals['URL'](f='user',args='logout'))
# Store the request token in session.
request_token = self.session.request_token = oauth.Token.from_string(content)
# Redirect the user to the authentication URL and pass the callback url.
data = urlencode(dict(oauth_token=request_token.key,
oauth_callback=callback_url))
auth_request_url = self.auth_url + '?' +data
HTTP = self.globals['HTTP']
raise HTTP(307,
"You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
Location=auth_request_url)
return None
| 33.057592
| 146
| 0.617041
|
e03dcb8ce21706c05fff84ea41ab08b8bc6175c3
| 571
|
py
|
Python
|
nlp/Lib/site-packages/spacy/tests/lang/ne/test_text.py
|
zacandcheese/nsfw-gang
|
bba0f208780019ca211d9300e76d405ce3882fc0
|
[
"MIT"
] | null | null | null |
nlp/Lib/site-packages/spacy/tests/lang/ne/test_text.py
|
zacandcheese/nsfw-gang
|
bba0f208780019ca211d9300e76d405ce3882fc0
|
[
"MIT"
] | 1
|
2022-02-14T21:09:15.000Z
|
2022-02-14T21:09:15.000Z
|
nlp/Lib/site-packages/spacy/tests/lang/ne/test_text.py
|
zacandcheese/nsfw-gang
|
bba0f208780019ca211d9300e76d405ce3882fc0
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import pytest
def test_ne_tokenizer_handlers_long_text(ne_tokenizer):
text = """मैले पाएको सर्टिफिकेटलाई म त बोक्रो सम्झन्छु र अभ्यास तब सुरु भयो, जब मैले कलेज पार गरेँ र जीवनको पढाइ सुरु गरेँ ।"""
tokens = ne_tokenizer(text)
assert len(tokens) == 24
@pytest.mark.parametrize(
"text,length",
[("समय जान कति पनि बेर लाग्दैन ।", 7), ("म ठूलो हुँदै थिएँ ।", 5)],
)
def test_ne_tokenizer_handles_cnts(ne_tokenizer, text, length):
tokens = ne_tokenizer(text)
assert len(tokens) == length
| 30.052632
| 131
| 0.633975
|
e9eb869466d29aea929ed0ed51a6a7e734683777
| 4,744
|
py
|
Python
|
portfolio/Python/scrapy/sagemcom/curryscouk_sagemcom.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/sagemcom/curryscouk_sagemcom.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/sagemcom/curryscouk_sagemcom.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5
|
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
__author__ = 'juraseg'
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from product_spiders.items import Product, ProductLoader
import logging
import re
class CurrysCoUkSpiderSagemcom(BaseSpider):
name = 'currys.co.uk_sagemcom'
allowed_domains = ['currys.co.uk']
start_urls = (
'http://www.currys.co.uk/',
)
search_url = 'http://www.currys.co.uk/gbuk/s_action/search_keywords/index.html'
keywords = ['Sagemcom']
products = [
'http://www.currys.co.uk/gbuk/humax-hdr-fox-t2-freeview-hd-recorder-500gb-07289192-pdt.html',
'http://www.currys.co.uk/gbuk/humax-hdr-fox-t2-freeview-hd-recorder-1tb-11502291-pdt.html',
'http://www.currys.co.uk/gbuk/humax-foxsat-hdr-freesat-hd-recorder-500gb-09785361-pdt.html',
'http://www.currys.co.uk/gbuk/panasonic-dmr-hw100-freeview-hd-recorder-320gb-10112707-pdt.html',
'http://www.currys.co.uk/gbuk/samsung-smt-s7800-freesat-hd-recorder-500gb-09933610-pdt.html',
'http://www.currys.co.uk/gbuk/sagemcom-rti-90-320-freeview-hd-recorder-320gb-05326751-pdt.html',
'http://www.currys.co.uk/gbuk/humax-pvr-9300t-500-freeview-recorder-500-gb-12290868-pdt.html',
'http://www.currys.co.uk/gbuk/sony-svr-hdt500-freeview-hd-recorder-500gb-10209414-pdt.html',
'http://www.currys.co.uk/gbuk/philips-picopix-ppx2480-pico-projector-12127328-pdt.html',
'http://www.currys.co.uk/gbuk/philips-picopix-ppx2055-pico-projector-12127320-pdt.html',
'http://www.currys.co.uk/gbuk/microvision-showwx-hdmi-pico-projector-12041449-pdt.html',
'http://www.currys.co.uk/gbuk/sagemcom-rti-95-320-freeview-hd-recorder-320-gb-14134720-pdt.html',
'http://www.currys.co.uk/gbuk/sagemcom-rti95-500-freeview-hd-recorder-500-gb-13406864-pdt.html',
'http://www.currys.co.uk/gbuk/philips-hdtp-8530-freeview-hd-recorder-500-gb-13985229-pdt.html',
]
def start_requests(self):
for keyword in self.keywords:
data = {
'subaction': 'keyword_search',
'search-field': keyword
}
url = self.search_url
request = FormRequest(url, formdata=data, callback=self.parse_search)
yield request
for url in self.products:
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
name = hxs.select("//h1[@class='pageTitle']/span/text()").extract()
if not name:
logging.error("ERROR! NO NAME! %s" % url)
return
name = " ".join(name)
name = re.sub("[\s]+", " ", name)
price = hxs.select("//div[contains(@class, 'productDetail')]//span[contains(@class, 'currentPrice')]/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! %s %s" % (url, name))
return
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
def parse_search(self, response):
hxs = HtmlXPathSelector(response)
# parse pages
pages = hxs.select("//ul[@class='pagination']//a/@href").extract()
for page in pages:
if page != '#':
request = Request(page, callback=self.parse_search)
yield request
# parse products
items = hxs.select("//article[contains(@class, 'product')]/div[contains(@class, 'desc')]")
for item in items:
name = item.select(".//div/header[@class='productTitle']/a/text()").extract()
if not name:
continue
name = name[0].strip()
name = re.sub("[\s]+", " ", name)
url = item.select(".//div/header[@class='productTitle']/a/@href").extract()
if not url:
logging.error("ERROR! NO URL! URL: %s. NAME: %s" % (response.url, name))
continue
url = url[0]
price = item.select(".//div//span[@class='currentPrice']/ins/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! URL: %s. NAME: %s" % (response.url, name))
continue
price = price[0].strip()
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
| 41.982301
| 127
| 0.604553
|
cb2472aabe4b148f89fea5325a8d3fffbc0b6e23
| 5,968
|
py
|
Python
|
plugins/pglogical/plugins/pglogical/plugin.py
|
OmniDB/plugins
|
2c951f2f0100a551da86714cffde22d2baeef29a
|
[
"MIT"
] | 6
|
2018-06-09T15:40:30.000Z
|
2020-03-28T14:41:14.000Z
|
plugins/pglogical/plugins/pglogical/plugin.py
|
OmniDB/plugins
|
2c951f2f0100a551da86714cffde22d2baeef29a
|
[
"MIT"
] | null | null | null |
plugins/pglogical/plugins/pglogical/plugin.py
|
OmniDB/plugins
|
2c951f2f0100a551da86714cffde22d2baeef29a
|
[
"MIT"
] | 8
|
2018-07-27T00:48:05.000Z
|
2021-07-04T05:48:42.000Z
|
from . import metadata as metadata
def get_pglogical_version(p_database_object, p_data):
return { 'pglogical_version': metadata.GetPglogicalVersion(p_database_object) }
def get_pglogical_templates(p_database_object, p_data):
return {
'pglogical_create_node': metadata.TemplatePglogicalCreateNode(p_database_object).v_text,
'pglogical_drop_node': metadata.TemplatePglogicalDropNode(p_database_object).v_text,
'pglogical_add_interface': metadata.TemplatePglogicalNodeAddInterface(p_database_object).v_text,
'pglogical_drop_interface': metadata.TemplatePglogicalNodeDropInterface(p_database_object).v_text,
'pglogical_create_repset': metadata.TemplatePglogicalCreateReplicationSet(p_database_object).v_text,
'pglogical_alter_repset': metadata.TemplatePglogicalAlterReplicationSet(p_database_object).v_text,
'pglogical_drop_repset': metadata.TemplatePglogicalDropReplicationSet(p_database_object).v_text,
'pglogical_repset_add_table': metadata.TemplatePglogicalReplicationSetAddTable(p_database_object).v_text,
'pglogical_repset_add_all_tables': metadata.TemplatePglogicalReplicationSetAddAllTables(p_database_object).v_text,
'pglogical_repset_remove_table': metadata.TemplatePglogicalReplicationSetRemoveTable(p_database_object).v_text,
'pglogical_repset_add_seq': metadata.TemplatePglogicalReplicationSetAddSequence(p_database_object).v_text,
'pglogical_repset_add_all_seqs': metadata.TemplatePglogicalReplicationSetAddAllSequences(p_database_object).v_text,
'pglogical_repset_remove_seq': metadata.TemplatePglogicalReplicationSetRemoveSequence(p_database_object).v_text,
'pglogical_create_sub': metadata.TemplatePglogicalCreateSubscription(p_database_object).v_text,
'pglogical_enable_sub': metadata.TemplatePglogicalEnableSubscription(p_database_object).v_text,
'pglogical_disable_sub': metadata.TemplatePglogicalDisableSubscription(p_database_object).v_text,
'pglogical_sync_sub': metadata.TemplatePglogicalSynchronizeSubscription(p_database_object).v_text,
'pglogical_drop_sub': metadata.TemplatePglogicalDropSubscription(p_database_object).v_text,
'pglogical_sub_add_repset': metadata.TemplatePglogicalSubscriptionAddReplicationSet(p_database_object).v_text,
'pglogical_sub_remove_repset': metadata.TemplatePglogicalSubscriptionRemoveReplicationSet(p_database_object).v_text,
}
def get_pglogical_nodes(p_database_object, p_data):
try:
v_list_nodes = []
v_nodes = metadata.QueryPglogicalNodes(p_database_object)
for v_node in v_nodes.Rows:
v_node_data = {
'v_name': v_node['node_name']
}
v_list_nodes.append(v_node_data)
return v_list_nodes
except Exception as exc:
raise exc
def get_pglogical_interfaces(p_database_object, p_data):
try:
v_list_ifaces = []
v_ifaces = metadata.QueryPglogicalNodeInterfaces(p_database_object, p_data['p_node'])
for v_iface in v_ifaces.Rows:
v_iface_data = {
'v_name': v_iface['if_name'],
'v_dsn': v_iface['if_dsn']
}
v_list_ifaces.append(v_iface_data)
return v_list_ifaces
except Exception as exc:
raise exc
def get_pglogical_replicationsets(p_database_object, p_data):
try:
v_list_repsets = []
v_repsets = metadata.QueryPglogicalReplicationSets(p_database_object)
for v_repset in v_repsets.Rows:
v_repset_data = {
'v_name': v_repset['set_name'],
'v_insert': v_repset['replicate_insert'],
'v_update': v_repset['replicate_update'],
'v_delete': v_repset['replicate_delete'],
'v_truncate': v_repset['replicate_truncate']
}
v_list_repsets.append(v_repset_data)
return v_list_repsets
except Exception as exc:
raise exc
def get_pglogical_repset_tables(p_database_object, p_data):
try:
v_list_tables = []
v_tables = metadata.QueryPglogicalReplicationSetTables(p_database_object, p_data['p_repset'])
for v_table in v_tables.Rows:
v_table_data = {
'v_name': v_table['table_name']
}
v_list_tables.append(v_table_data)
return v_list_tables
except Exception as exc:
raise exc
def get_pglogical_repset_seqs(p_database_object, p_data):
try:
v_list_seqs = []
v_seqs = metadata.QueryPglogicalReplicationSetSequences(p_database_object, p_data['p_repset'])
for v_seq in v_seqs.Rows:
v_seq_data = {
'v_name': v_seq['sequence_name']
}
v_list_seqs.append(v_seq_data)
return v_list_seqs
except Exception as exc:
raise exc
def get_pglogical_subscriptions(p_database_object, p_data):
try:
v_list_subs = []
v_subs = metadata.QueryPglogicalSubscriptions(p_database_object)
for v_sub in v_subs.Rows:
v_sub_data = {
'v_name': v_sub['sub_name'],
'v_status': v_sub['sub_status'],
'v_origin': v_sub['sub_origin'],
'v_enabled': v_sub['sub_enabled'],
'v_delay': v_sub['sub_apply_delay']
}
v_list_subs.append(v_sub_data)
return v_list_subs
except Exception as exc:
raise exc
def get_pglogical_subscription_repsets(p_database_object, p_data):
try:
v_list_repsets = []
v_repsets = metadata.QueryPglogicalSubscriptionReplicationSets(p_database_object, p_data['p_sub'])
for v_repset in v_repsets.Rows:
v_repset_data = {
'v_name': v_repset['set_name']
}
v_list_repsets.append(v_repset_data)
return v_list_repsets
except Exception as exc:
raise exc
| 46.263566
| 124
| 0.702245
|
d903683fd5d56ac5d7c8a036f7db2df0e592fc1f
| 839
|
py
|
Python
|
google/colab/_kernel.py
|
zbkruturaj/colabtools
|
73c8127fac51ce0509a4a6a994ec64ed5bb70422
|
[
"Apache-2.0"
] | 5
|
2020-03-08T05:39:03.000Z
|
2021-10-29T22:40:43.000Z
|
google/colab/_kernel.py
|
zbkruturaj/colabtools
|
73c8127fac51ce0509a4a6a994ec64ed5bb70422
|
[
"Apache-2.0"
] | null | null | null |
google/colab/_kernel.py
|
zbkruturaj/colabtools
|
73c8127fac51ce0509a4a6a994ec64ed5bb70422
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Colab-specific kernel customizations."""
from ipykernel import ipkernel
from google.colab import _shell
class Kernel(ipkernel.IPythonKernel):
"""Kernel with additional Colab-specific features."""
def _shell_class_default(self):
return _shell.Shell
| 32.269231
| 74
| 0.765197
|
4e6ef8ebb4ca75246c1cd9f805d0384c9522069c
| 1,015
|
py
|
Python
|
pymote/utils/visualization.py
|
darbula/pymote
|
fd581cc12fcd10beec7e4d72e90bf6250ac17699
|
[
"BSD-3-Clause"
] | 16
|
2015-01-22T11:52:48.000Z
|
2021-01-31T23:30:45.000Z
|
pymote/utils/visualization.py
|
nkitic/pymote
|
84d852c922b39afc64950e967b8309ccd8faf6a5
|
[
"BSD-3-Clause"
] | 5
|
2015-10-30T10:45:04.000Z
|
2017-06-28T17:42:04.000Z
|
pymote/utils/visualization.py
|
nkitic/pymote
|
84d852c922b39afc64950e967b8309ccd8faf6a5
|
[
"BSD-3-Clause"
] | 17
|
2015-06-14T08:53:55.000Z
|
2021-08-25T00:53:07.000Z
|
from pymote.utils.tree import get_root_node
def show_mst(net, treeKey='mst'):
"""
Show tree representation of network.
treeKey -- key in nodes memory (dictionary) where parent and
children data is stored in format:
{'parent': parent_node,
'children': [child_node1, child_node2 ...]}
"""
nodesToCheck = [(get_root_node(net, treeKey), 0)]
edgelist = []
levels = [0]*len(net.nodes()) # level of node in tree, root is 0
while nodesToCheck:
(node, level) = nodesToCheck.pop()
edgelist += [(node, child)
for child in node.memory[treeKey]['children']]
levels[net.nodes().index(node)] = level
nodesToCheck += [(child, level+1)
for child in node.memory[treeKey]['children']]
net.show(edgelist=edgelist, nodeColor=levels)
from matplotlib.pyplot import gca
gca().set_title('Minimum spanning tree in memory[\'%s\']' % treeKey)
| 39.038462
| 73
| 0.585222
|
a16af96e25978801c69ceda7070daff0fd9dd1e6
| 2,037
|
py
|
Python
|
day11.py
|
mmokko/aoc2017
|
0732ac440775f9e6bd4a8447c665c9b0e6969f74
|
[
"MIT"
] | null | null | null |
day11.py
|
mmokko/aoc2017
|
0732ac440775f9e6bd4a8447c665c9b0e6969f74
|
[
"MIT"
] | null | null | null |
day11.py
|
mmokko/aoc2017
|
0732ac440775f9e6bd4a8447c665c9b0e6969f74
|
[
"MIT"
] | null | null | null |
from day11_input import INPUT
class Coordinates(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def travel(self, direction):
if direction == 'n':
self.y += 1
elif direction == 'ne':
self.y += 1
self.x += 1
elif direction == 'se':
self.y -= 1
self.x += 1
elif direction == 's':
self.y -= 1
elif direction == 'sw':
self.y -= 1
self.x -= 1
elif direction == 'nw':
self.y += 1
self.x -= 1
def at_origo(self):
if self.x == 0 and self.y == 0:
return True
return False
def direction_home(self):
direction = ''
# first find y-axel direction
if self.y > 0:
direction += 's'
else:
direction += 'n'
# find x-axel direction
if self.x > 0:
direction += 'w'
elif self.x < 0:
direction += 'e'
return direction
class HexFinder(object):
def __init__(self, route):
self._route = route.split(',')
self._coords = Coordinates()
self.furthest_away = 0
def travel(self):
for direction in self._route:
self._coords.travel(direction)
route = self._find_route_back()
if self.furthest_away < len(route):
self.furthest_away = len(route)
def _find_route_back(self):
route = list()
coords = Coordinates(self._coords.x, self._coords.y)
while not coords.at_origo():
dir_home = coords.direction_home()
coords.travel(dir_home)
route.append(dir_home)
return route
def steps_to_origo(self):
route = self._find_route_back()
return len(route)
def main():
hex_finder = HexFinder(INPUT)
hex_finder.travel()
print(hex_finder.steps_to_origo())
print(hex_finder.furthest_away)
if __name__=='__main__':
main()
| 24.542169
| 60
| 0.517919
|
14ea378de01c7652a54a0280f040591066c23905
| 373
|
py
|
Python
|
core/migrations/0002_contato_data.py
|
capmayer/construameumaponte
|
2ef44dab8130597f1ff16b32bfbe35ac44761d48
|
[
"MIT"
] | null | null | null |
core/migrations/0002_contato_data.py
|
capmayer/construameumaponte
|
2ef44dab8130597f1ff16b32bfbe35ac44761d48
|
[
"MIT"
] | null | null | null |
core/migrations/0002_contato_data.py
|
capmayer/construameumaponte
|
2ef44dab8130597f1ff16b32bfbe35ac44761d48
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-03-28 15:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='contato',
name='data',
field=models.DateTimeField(auto_now=True),
),
]
| 19.631579
| 54
| 0.58445
|
5e3ac032966703355ba975cf6e4230084de6fae8
| 980
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ATI/vertex_attrib_array_object.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ATI/vertex_attrib_array_object.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ATI/vertex_attrib_array_object.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ATI_vertex_attrib_array_object'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ATI_vertex_attrib_array_object',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLfloatArray)
def glGetVertexAttribArrayObjectfvATI(index,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetVertexAttribArrayObjectivATI(index,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLenum,_cs.GLboolean,_cs.GLsizei,_cs.GLuint,_cs.GLuint)
def glVertexAttribArrayObjectATI(index,size,type,normalized,stride,buffer,offset):pass
| 40.833333
| 128
| 0.797959
|
12aeeb883bbb50b230023ccc79a424ef5c1c7157
| 9,490
|
py
|
Python
|
src/config/utils/provision_control.py
|
casek14/contrail-controller
|
18e2572635370b3cb6da2731af049cbeb934f2bb
|
[
"Apache-2.0"
] | 1
|
2019-01-11T06:16:10.000Z
|
2019-01-11T06:16:10.000Z
|
src/config/utils/provision_control.py
|
casek14/contrail-controller
|
18e2572635370b3cb6da2731af049cbeb934f2bb
|
[
"Apache-2.0"
] | null | null | null |
src/config/utils/provision_control.py
|
casek14/contrail-controller
|
18e2572635370b3cb6da2731af049cbeb934f2bb
|
[
"Apache-2.0"
] | 1
|
2020-06-08T11:50:36.000Z
|
2020-06-08T11:50:36.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import argparse
import ConfigParser
from provision_bgp import BgpProvisioner
from vnc_api.vnc_api import *
class ControlProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
if self._args.router_asn and not self._args.oper:
self._vnc_lib = VncApi(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
api_server_use_ssl=self._args.api_server_use_ssl)
# Update global system config also with this ASN
gsc_obj = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc_obj.set_autonomous_system(self._args.router_asn)
if self._args.ibgp_auto_mesh is not None:
gsc_obj.set_ibgp_auto_mesh(self._args.ibgp_auto_mesh)
if self._args.set_graceful_restart_parameters == True:
gr_params = GracefulRestartParametersType()
gr_params.set_restart_time(
int(self._args.graceful_restart_time))
gr_params.set_long_lived_restart_time(
int(self._args.long_lived_graceful_restart_time))
gr_params.set_end_of_rib_timeout(
int(self._args.end_of_rib_timeout))
gr_params.set_enable(self._args.graceful_restart_enable)
gr_params.set_bgp_helper_enable(
self._args.graceful_restart_bgp_helper_enable)
gr_params.set_xmpp_helper_enable(
self._args.graceful_restart_xmpp_helper_enable)
gsc_obj.set_graceful_restart_parameters(gr_params)
self._vnc_lib.global_system_config_update(gsc_obj)
return
bp_obj = BgpProvisioner(
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip, self._args.api_server_port,
api_server_use_ssl=self._args.api_server_use_ssl)
if self._args.oper == 'add':
bp_obj.add_bgp_router('control-node', self._args.host_name,
self._args.host_ip, self._args.router_asn,
self._args.address_families, self._args.md5)
elif self._args.oper == 'del':
bp_obj.del_bgp_router(self._args.host_name)
else:
print "Unknown operation %s. Only 'add' and 'del' supported"\
% (self._args.oper)
# end __init__
def gr_time_type(self, value):
time = int(value)
if time < 0 or time > 4095:
raise argparse.ArgumentTypeError("graceful_restart_time %s must be in range (0..4095)" % value)
return time
def llgr_time_type(self, value):
time = int(value)
if time < 0 or time > 16777215:
raise argparse.ArgumentTypeError("long_lived_graceful_restart_time %s must be in range (0..16777215)" % value)
return time
def _parse_args(self, args_str):
'''
Eg. python provision_control.py --host_name a3s30.contrail.juniper.net
--host_ip 10.1.1.1
--router_asn 64512
--ibgp_auto_mesh|--no_ibgp_auto_mesh
--api_server_ip 127.0.0.1
--api_server_port 8082
--api_server_use_ssl False
--oper <add | del>
--md5 <key value>|None(optional)
--graceful_restart_time 100
--long_lived_graceful_restart_time 100
--end_of_rib_timeout 300
--set_graceful_restart_parameters False
--graceful_restart_bgp_helper_enable False
--graceful_restart_xmpp_helper_enable False
--graceful_restart_enable False
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'router_asn': '64512',
'ibgp_auto_mesh': None,
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'oper': None,
'admin_user': None,
'admin_password': None,
'admin_tenant_name': None,
'md5' : None,
'graceful_restart_time': 60,
'long_lived_graceful_restart_time': 300,
'end_of_rib_timeout': 300,
'graceful_restart_bgp_helper_enable': False,
'graceful_restart_xmpp_helper_enable': False,
'graceful_restart_enable': False,
'set_graceful_restart_parameters': False,
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument(
"--host_name", help="hostname name of control-node")
parser.add_argument("--host_ip", help="IP address of control-node")
parser.add_argument(
"--router_asn", help="AS Number the control-node is in", required=True)
parser.add_argument(
"--address_families", help="Address family list",
choices=["route-target", "inet-vpn", "e-vpn", "erm-vpn", "inet6-vpn"],
nargs="*", default=[])
parser.add_argument(
"--md5", help="Md5 config for the node")
parser.add_argument(
"--ibgp_auto_mesh", help="Create iBGP mesh automatically", dest='ibgp_auto_mesh', action='store_true')
parser.add_argument(
"--no_ibgp_auto_mesh", help="Don't create iBGP mesh automatically", dest='ibgp_auto_mesh', action='store_false')
parser.add_argument(
"--api_server_ip", help="IP address of api server", required=True)
parser.add_argument("--api_server_port", help="Port of api server", required=True)
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument(
"--oper",
help="Provision operation to be done(add or del)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
parser.add_argument(
"--graceful_restart_time",
help="Graceful Restart Time in seconds (0..4095)",
type=self.gr_time_type, default=60,
required=False)
parser.add_argument(
"--long_lived_graceful_restart_time",
help="Long Lived Graceful Restart Time in seconds (0..16777215)",
type=self.llgr_time_type, default=300,
required=False)
parser.add_argument(
"--end_of_rib_timeout",
help="EndOfRib timeout value in seconds (0..4095)",
type=self.gr_time_type, default=300,
required=False)
parser.add_argument("--graceful_restart_bgp_helper_enable",
action='store_true',
help="Enable helper mode for BGP graceful restart")
parser.add_argument("--graceful_restart_xmpp_helper_enable",
action='store_true',
help="Enable helper mode for XMPP graceful restart")
parser.add_argument("--graceful_restart_enable",
action='store_true',
help="Enable Graceful Restart")
parser.add_argument("--set_graceful_restart_parameters",
action='store_true',
help="Set Graceful Restart Parameters")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
# end class ControlProvisioner
def main(args_str=None):
ControlProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
| 43.53211
| 124
| 0.577871
|
80ac95e83ab7f54b765cdaaed2771acc446cc54b
| 4,993
|
py
|
Python
|
chumpy/optimization.py
|
jeffhsu3/chumpy
|
00e83955ea45cd129d55a3017c08f89045842224
|
[
"MIT"
] | null | null | null |
chumpy/optimization.py
|
jeffhsu3/chumpy
|
00e83955ea45cd129d55a3017c08f89045842224
|
[
"MIT"
] | null | null | null |
chumpy/optimization.py
|
jeffhsu3/chumpy
|
00e83955ea45cd129d55a3017c08f89045842224
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
__all__ = ['minimize']
import numpy as np
from . import ch
import scipy.sparse as sp
import scipy.optimize
from .optimization_internal import minimize_dogleg
#from memory_profiler import profile, memory_usage
# def disable_cache_for_single_parent_node(node):
# if hasattr(node, '_parents') and len(node._parents.keys()) == 1:
# node.want_cache = False
# Nelder-Mead
# Powell
# CG
# BFGS
# Newton-CG
# Anneal
# L-BFGS-B
# TNC
# COBYLA
# SLSQP
# dogleg
# trust-ncg
def minimize(fun, x0, method='dogleg', bounds=None, constraints=(), tol=None, callback=None, options=None):
if method == 'dogleg':
if options is None: options = {}
return minimize_dogleg(fun, free_variables=x0, on_step=callback, **options)
if isinstance(fun, list) or isinstance(fun, tuple):
fun = ch.concatenate([f.ravel() for f in fun])
if isinstance(fun, dict):
fun = ch.concatenate([f.ravel() for f in fun.values()])
obj = fun
free_variables = x0
from ch import SumOfSquares
hessp = None
hess = None
if obj.size == 1:
obj_scalar = obj
else:
obj_scalar = SumOfSquares(obj)
def hessp(vs, p,obj, obj_scalar, free_variables):
changevars(vs,obj,obj_scalar,free_variables)
if not hasattr(hessp, 'vs'):
hessp.vs = vs*0+1e16
if np.max(np.abs(vs-hessp.vs)) > 0:
J = ns_jacfunc(vs,obj,obj_scalar,free_variables)
hessp.J = J
hessp.H = 2. * J.T.dot(J)
hessp.vs = vs
return np.array(hessp.H.dot(p)).ravel()
#return 2*np.array(hessp.J.T.dot(hessp.J.dot(p))).ravel()
if method.lower() != 'newton-cg':
def hess(vs, obj, obj_scalar, free_variables):
changevars(vs,obj,obj_scalar,free_variables)
if not hasattr(hessp, 'vs'):
hessp.vs = vs*0+1e16
if np.max(np.abs(vs-hessp.vs)) > 0:
J = ns_jacfunc(vs,obj,obj_scalar,free_variables)
hessp.H = 2. * J.T.dot(J)
return hessp.H
def changevars(vs, obj, obj_scalar, free_variables):
cur = 0
changed = False
for idx, freevar in enumerate(free_variables):
sz = freevar.r.size
newvals = vs[cur:cur+sz].copy().reshape(free_variables[idx].shape)
if np.max(np.abs(newvals-free_variables[idx]).ravel()) > 0:
free_variables[idx][:] = newvals
changed = True
cur += sz
methods_without_callback = ('anneal', 'powell', 'cobyla', 'slsqp')
if callback is not None and changed and method.lower() in methods_without_callback:
callback(None)
return changed
def residuals(vs,obj, obj_scalar, free_variables):
changevars(vs, obj, obj_scalar, free_variables)
residuals = obj_scalar.r.ravel()[0]
return residuals
def scalar_jacfunc(vs,obj, obj_scalar, free_variables):
if not hasattr(scalar_jacfunc, 'vs'):
scalar_jacfunc.vs = vs*0+1e16
if np.max(np.abs(vs-scalar_jacfunc.vs)) == 0:
return scalar_jacfunc.J
changevars(vs, obj, obj_scalar, free_variables)
if True: # faster, at least on some problems
result = np.concatenate([np.array(obj_scalar.lop(wrt, np.array([[1]]))).ravel() for wrt in free_variables])
else:
jacs = [obj_scalar.dr_wrt(wrt) for wrt in free_variables]
for idx, jac in enumerate(jacs):
if sp.issparse(jac):
jacs[idx] = jacs[idx].todense()
result = np.concatenate([jac.ravel() for jac in jacs])
scalar_jacfunc.J = result
scalar_jacfunc.vs = vs
return result.ravel()
def ns_jacfunc(vs,obj, obj_scalar, free_variables):
if not hasattr(ns_jacfunc, 'vs'):
ns_jacfunc.vs = vs*0+1e16
if np.max(np.abs(vs-ns_jacfunc.vs)) == 0:
return ns_jacfunc.J
changevars(vs, obj, obj_scalar, free_variables)
jacs = [obj.dr_wrt(wrt) for wrt in free_variables]
result = hstack(jacs)
ns_jacfunc.J = result
ns_jacfunc.vs = vs
return result
x1 = scipy.optimize.minimize(
method=method,
fun=residuals,
callback=callback,
x0=np.concatenate([free_variable.r.ravel() for free_variable in free_variables]),
jac=scalar_jacfunc,
hessp=hessp, hess=hess, args=(obj, obj_scalar, free_variables),
bounds=bounds, constraints=constraints, tol=tol, options=options).x
changevars(x1, obj, obj_scalar, free_variables)
return free_variables
def main():
pass
if __name__ == '__main__':
main()
| 30.820988
| 131
| 0.589425
|
8c6af9efd2eeade0fe53ab90e047019362d26513
| 2,608
|
py
|
Python
|
ai_lab/modelloader.py
|
ZachisGit/MultidirectionalAutoencoders
|
49257d9912e3de056034bb8315b7b2574cfcb030
|
[
"Apache-2.0"
] | 3
|
2018-03-29T22:49:37.000Z
|
2019-01-18T15:13:51.000Z
|
ai_lab/modelloader.py
|
ZachisGit/MultidirectionalAutoencoders
|
49257d9912e3de056034bb8315b7b2574cfcb030
|
[
"Apache-2.0"
] | null | null | null |
ai_lab/modelloader.py
|
ZachisGit/MultidirectionalAutoencoders
|
49257d9912e3de056034bb8315b7b2574cfcb030
|
[
"Apache-2.0"
] | null | null | null |
import numpy as numpy
import imp
from ai_lab import storagemanager as sm
'''
load_model(path[string])
Description:
Returns the loaded model class, based on the
BaseModel (ModelBaseStructure). It should be
abstracted from BaseModel, else NotImplemented-
Error is raised.
model(class):
|_ class_name = model
|_ abstracted from BaseModel
'''
def load_model(model_path):
model_path = sm.check_file_path(model_path)
if model_path == None:
return None
#if model_path[-3:] == ".py":
# model_path = model_path[:-3]
model = imp.load_source("model",model_path).model(model_path)
return model
'''
BaseModel(class)
Description:
Container for ModelBaseStructure, holds base functionality
(like abstract class) of the standard model.
'''
class BaseModel:
'''
Procedure:
-__init__
-assign_dataset
-set_hyper_params
-init
-get_...
'''
# Model Parameters
hyper_params = None
hyper_param_dict = None
dataset = None
# Model Infos
model_path = None
model_name = None
model_group = None
model_compatibility = None
# Assigns a dataset if dataset_loaded = True
# else sets self.dataset to None
# Ret: -
# class-vars: dataset
def assign_dataset(self, dataset):
if dataset.dataset_loaded == True:
self.dataset = dataset
else:
self.dataset = None
'''
Set the hyper-parameters based on the hyper_param_dict
if the dict is None raise NotImplementedError
just igonre hyper_parameters not set in hyper_param_dict
class-vars: hyper_params, hyper_param_dict
Ret: -
'''
def set_hyper_params(self, hyper_params):
if self.hyper_param_dict == None:
raise NotImplementedError()
# Set all parameters defined in hyper_params,
# for the rest use the predefined values in
# self.hyper_param_dict.
n_hyper_params = {}
for key,value in self.hyper_param_dict.iteritems():
if key in hyper_params:
n_hyper_params[key] = hyper_params[key]
else:
n_hyper_params[key] = value
self.hyper_params = n_hyper_params
# Ret: -
# class-vars: model (tflearn model),etc
def init(self):
raise NotImplementedError()
# Ret: ds.get_batch(batch_size,validation=validation)
# class-vars: dataset
def get_training_batch(self,validation=False):
raise NotImplementedError()
# Ret: test_data,test_labels,cell_counts
# class-vars: dataset
def get_evaluation_batch(self):
raise NotImplementedError()
# Ret: data,labels
# class-vars: dataset
def get_livedisplay_batch(self):
raise NotImplementedError()
# Ret: tflearn model
# class-vars: model (tflearn model)
def get_model(self):
raise NotImplementedError()
| 20.864
| 62
| 0.732362
|
fde0df824f3f664297d7a9098986cb40c2a45740
| 6,273
|
py
|
Python
|
railrl/launchers/contextual_env_launcher_util.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
railrl/launchers/contextual_env_launcher_util.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
railrl/launchers/contextual_env_launcher_util.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
from functools import partial
import numpy as np
import railrl.samplers.rollout_functions as rf
import railrl.torch.pytorch_util as ptu
from railrl.data_management.contextual_replay_buffer import (
ContextualRelabelingReplayBuffer,
SelectKeyFn,
)
from railrl.envs.contextual import ContextualEnv
from railrl.envs.contextual.goal_conditioned import (
GoalDistributionFromMultitaskEnv,
ContextualRewardFnFromMultitaskEnv,
)
from railrl.samplers.data_collector.contextual_path_collector import (
ContextualPathCollector
)
from railrl.visualization.video import get_save_video_function
from railrl.torch.networks import FlattenMlp
from railrl.torch.sac.policies import MakeDeterministic
from railrl.torch.sac.policies import TanhGaussianPolicy
from railrl.torch.sac.sac import SACTrainer
from railrl.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
def goal_conditioned_sac_experiment(
max_path_length,
qf_kwargs,
sac_trainer_kwargs,
replay_buffer_kwargs,
policy_kwargs,
algo_kwargs,
env_id=None,
env_class=None,
env_kwargs=None,
observation_key='state_observation',
desired_goal_key='state_desired_goal',
achieved_goal_key='state_achieved_goal',
contextual_env_kwargs=None,
evaluation_goal_sampling_mode=None,
exploration_goal_sampling_mode=None,
# Video parameters
save_video=True,
save_video_kwargs=None,
):
if contextual_env_kwargs is None:
contextual_env_kwargs = {}
if not save_video_kwargs:
save_video_kwargs = {}
def contextual_env_distrib_and_reward(
env_id, env_class, env_kwargs, goal_sampling_mode
):
env = get_gym_env(env_id, env_class=env_class, env_kwargs=env_kwargs)
env.goal_sampling_mode = goal_sampling_mode
goal_distribution = GoalDistributionFromMultitaskEnv(
env,
desired_goal_key=desired_goal_key,
)
reward_fn = ContextualRewardFnFromMultitaskEnv(
env=env,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
)
env = ContextualEnv(
env,
context_distribution=goal_distribution,
reward_fn=reward_fn,
observation_key=observation_key,
**contextual_env_kwargs,
)
return env, goal_distribution, reward_fn
expl_env, expl_context_distrib, expl_reward = contextual_env_distrib_and_reward(
env_id, env_class, env_kwargs, exploration_goal_sampling_mode
)
eval_env, eval_context_distrib, eval_reward = contextual_env_distrib_and_reward(
env_id, env_class, env_kwargs, evaluation_goal_sampling_mode
)
context_key = desired_goal_key
obs_dim = (
expl_env.observation_space.spaces[observation_key].low.size
+ expl_env.observation_space.spaces[desired_goal_key].low.size
)
action_dim = expl_env.action_space.low.size
def create_qf():
return FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
**qf_kwargs
)
qf1 = create_qf()
qf2 = create_qf()
target_qf1 = create_qf()
target_qf2 = create_qf()
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
**policy_kwargs
)
ob_keys_to_save = [
observation_key,
desired_goal_key,
achieved_goal_key,
]
def concat_context_to_obs(batch):
obs = batch['observations']
next_obs = batch['next_observations']
context = batch['contexts']
batch['observations'] = np.concatenate([obs, context], axis=1)
batch['next_observations'] = np.concatenate([next_obs, context], axis=1)
return batch
replay_buffer = ContextualRelabelingReplayBuffer(
env=eval_env,
context_key=desired_goal_key,
context_distribution=eval_context_distrib,
sample_context_from_obs_dict_fn=SelectKeyFn(achieved_goal_key),
ob_keys_to_save=ob_keys_to_save,
reward_fn=eval_reward,
post_process_batch_fn=concat_context_to_obs,
**replay_buffer_kwargs
)
trainer = SACTrainer(
env=expl_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**sac_trainer_kwargs
)
eval_path_collector = ContextualPathCollector(
eval_env,
MakeDeterministic(policy),
observation_key=observation_key,
context_key=context_key,
)
expl_path_collector = ContextualPathCollector(
expl_env,
policy,
observation_key=observation_key,
context_key=context_key,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
max_path_length=max_path_length,
**algo_kwargs
)
algorithm.to(ptu.device)
if save_video:
rollout_function = partial(
rf.contextual_rollout,
max_path_length=max_path_length,
observation_key=observation_key,
context_key=context_key,
)
eval_video_func = get_save_video_function(
rollout_function,
eval_env,
MakeDeterministic(policy),
tag="eval",
**save_video_kwargs
)
train_video_func = get_save_video_function(
rollout_function,
expl_env,
policy,
tag="train",
**save_video_kwargs
)
algorithm.post_train_funcs.append(eval_video_func)
algorithm.post_train_funcs.append(train_video_func)
algorithm.train()
def get_gym_env(env_id, env_class=None, env_kwargs=None):
if env_kwargs is None:
env_kwargs = {}
assert env_id or env_class
if env_id:
import gym
import multiworld
multiworld.register_all_envs()
env = gym.make(env_id)
else:
env = env_class(**env_kwargs)
return env
| 30.75
| 84
| 0.676391
|
ee8ec999fa8b5c6a1d8c6e509ef788e5072983fd
| 3,445
|
py
|
Python
|
clients/oathkeeper/python/ory_oathkeeper_client/models/swagger_rule_response.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 77
|
2020-02-14T17:27:36.000Z
|
2022-03-25T08:44:52.000Z
|
clients/oathkeeper/python/ory_oathkeeper_client/models/swagger_rule_response.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 125
|
2020-02-07T21:45:52.000Z
|
2022-03-31T12:54:24.000Z
|
clients/oathkeeper/python/ory_oathkeeper_client/models/swagger_rule_response.py
|
russelg/sdk
|
2515b35981784319bd7d58fcf0b5ab85b501b62f
|
[
"Apache-2.0"
] | 44
|
2020-01-31T22:05:47.000Z
|
2022-03-09T14:41:22.000Z
|
# coding: utf-8
"""
ORY Oathkeeper
ORY Oathkeeper is a reverse proxy that checks the HTTP Authorization for validity against a set of rules. This service uses Hydra to validate access tokens and policies. # noqa: E501
The version of the OpenAPI document: v0.0.0-alpha.37
Contact: hi@ory.am
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ory_oathkeeper_client.configuration import Configuration
class SwaggerRuleResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'body': 'SwaggerRule'
}
attribute_map = {
'body': 'Body'
}
def __init__(self, body=None, local_vars_configuration=None): # noqa: E501
"""SwaggerRuleResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this SwaggerRuleResponse. # noqa: E501
:return: The body of this SwaggerRuleResponse. # noqa: E501
:rtype: SwaggerRule
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SwaggerRuleResponse.
:param body: The body of this SwaggerRuleResponse. # noqa: E501
:type: SwaggerRule
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SwaggerRuleResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SwaggerRuleResponse):
return True
return self.to_dict() != other.to_dict()
| 28.237705
| 187
| 0.583454
|
fe413fdae7533f0c9f9626347e191dacc0c4e485
| 28,271
|
py
|
Python
|
Text To Speech/strange_sound.py
|
poly451/Tutorials
|
8b6746394a8b651c9b746adae11e768bb9c74a38
|
[
"MIT"
] | 10
|
2020-10-17T18:40:05.000Z
|
2022-02-21T17:42:44.000Z
|
Text To Speech/strange_sound.py
|
Mimsadi/Tutorials
|
ffd3b6eba3cecc4c30984cd1b33d7944bb4e0317
|
[
"MIT"
] | null | null | null |
Text To Speech/strange_sound.py
|
Mimsadi/Tutorials
|
ffd3b6eba3cecc4c30984cd1b33d7944bb4e0317
|
[
"MIT"
] | 18
|
2020-10-22T09:00:33.000Z
|
2022-03-29T17:52:14.000Z
|
import pyttsx3
import os, sys
import utils
"""
rate — Integer speech rate in words per minute. The base value is 200.
voice — String identifier of the active voice
volume — Floating point volume in the range of 0.0 to 1.0 inclusive
voices — List of pyttsx3.voice.Voice descriptor objects
"""
# ----------------------------------------------------------
# class Voices
# ----------------------------------------------------------
class Voice:
def __init__(self, mydict):
# print(mydict)
self.name = mydict["name"]
self.id = mydict["id"]
self.languages = mydict["voice languages"]
self.gender = mydict["voice gender"]
self.age = mydict["voice age"]
# ----------------------------------------------------------
# class Voices
# ----------------------------------------------------------
class Voices:
def __init__(self):
self.inner = []
# ----
filepath = os.path.join("data", "voices.txt")
with open(filepath, "r") as f:
mylines = f.readlines()
mylines = [i.strip() for i in mylines if len(i.strip()) > 0]
for i in range(0, len(mylines), 5):
mydict = {}
for j in range(5):
# print(mylines[i+j])
mydict = utils.strip_value(mylines[i+j], mydict)
new_voice = Voice(mydict)
self.inner.append(new_voice)
# -----------------------------------------------
def get_voice_by_name(self, name):
name = name.strip()
for elem in self.inner:
# print("elem.name: {}, name: {}".format(elem.name, name))
if elem.name == name:
return elem
return None
def get_voices_by_language(self, language):
language = language.lower().strip()
mylist = []
for elem in self.inner:
if elem.languages == language:
mylist.append(elem)
return mylist
def get_voices_by_gender(self, gender):
# ['voicegendermale', 'voicegenderneuter', 'voicegenderfemale']
this_gender = ""
gender = gender.lower().strip()
if gender in ["female", "f", "voicegenderfemale"]:
this_gender = 'voicegenderfemale'
elif gender in ["male", "m", "voicegendermale"]:
this_gender = 'voicegendermale'
elif gender in ["neuter", "n", "voicegenderneuter"]:
this_gender = 'voicegenderneuter'
else:
s = "I don't recognize this: {} ({})".format(gender, type(gender))
raise ValueError(s)
# ----
mylist = []
for elem in self.inner:
if elem.gender == this_gender:
mylist.append(elem)
return mylist
def get_voices_by_age(self, age):
mylist = []
for elem in self.inner:
# print("age: {} ({}) == {} ({})".format(elem.age, type(elem.age), age, type(age)))
if elem.age == age:
mylist.append(elem)
return mylist
def filter_voices(self, age, gender, langauge):
ages_filtered = self.get_voices_by_age(age)
genders_filtered = self.get_voices_by_gender(gender)
langages_filtered = self.get_voices_by_language(langauge)
intersection = list(set(ages_filtered + genders_filtered))
intersection = list(set(intersection + langages_filtered))
if len(intersection) == 0:
raise ValueError("Error!")
return intersection
# -----------------------------------------------
def get_names(self):
mylist = []
for elem in self.inner:
mylist.append(elem.name)
return mylist
def get_ids(self):
mylist = []
for elem in self.inner:
mylist.append(elem.id)
return mylist
def get_languages(self):
mylist = []
for elem in self.inner:
mylist.append(elem.languages)
return list(set(mylist))
def get_genders(self):
mylist = []
for elem in self.inner:
mylist.append(elem.gender)
return list(set(mylist))
def get_gender(self):
mylist = []
for elem in self.inner:
mylist.append(elem.gender)
return list(set(mylist))
def get_ages(self):
mylist = []
for elem in self.inner:
mylist.append(elem.age)
return list(set(mylist))
def update_init_file(self, this_voice, init_filepath):
name = "name: {}\n".format(this_voice.name)
id = "id: {}\n".format(this_voice.id)
languages = "langauges: {}\n".format(this_voice.languages)
gender = "gender: {}\n".format(this_voice.gender)
age = "age: {}\n".format(this_voice.age)
with open(init_filepath, "w") as f:
f.write(name)
f.write(id)
f.write(languages)
f.write(gender)
f.write(age)
# ----------------------------------------------------------
# class PySound
# ----------------------------------------------------------
class StrangeSound:
def __init__(self):
self.engine = pyttsx3.init()
self.keep_looping = True
# ------------------------
self.load_initialization_file()
def load_initialization_file(self):
with open(os.path.join("data", "init.txt"), "r") as f:
mylines = f.readlines()
mylines = [i.strip() for i in mylines if len(i.strip()) > 0]
mydict = {}
for i in range(5):
mydict = utils.strip_value(mylines[i], mydict)
self.engine.setProperty("voice", mydict["id"])
def change_voice_by_name(self):
print("Here are names of the voices I know. Which one would you like?\n")
voices = Voices()
names = voices.get_names()
utils.print_elements(names, 8)
user_input = ""
while not user_input in names:
user_input = input("> ").strip()
if user_input == "quit": return False
this_voice = voices.get_voice_by_name(user_input)
filepath = os.path.join("data", "init.txt")
self.engine.setProperty("voice", this_voice.id)
voices.update_init_file(this_voice, filepath)
self.speak("Hi! My name is {}.".format(this_voice.name))
def change_volume(self):
print("The defaut for 'volume' is 1. What would you like to change it to? (0 to 3)")
user_input = ""
while not user_input in ["0", "1", "2", "3"]:
user_input = input("> ").lower().strip()
user_input = int(user_input)
self.set_sound_property("volume", user_input)
self.speak("The volume has been chnaged")
def play_script(self):
line_length = 60
indy = "com.apple.speech.synthesis.voice.daniel.premium"
satipo = "com.apple.speech.synthesis.voice.Alex"
serena = "com.apple.speech.synthesis.voice.moira.premium"
# ------------------------------------------
text = "Indy and Satipo fan out to fight their way through the entwined "
text += "trees that guard the temple. Visibility is cut to five feet "
text += "in the heavy mist. Satipo extracts a short, native dart from "
text += "a tree and examines the point gingerly. ... As Satipo shows Indy the dart, he says,"
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", serena)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "The Hovitos are near. The poison is still fresh...three days. "
text += "I tell you, they’re following us."
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", satipo)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 400)
self.speak(text)
# -----
text = "If they knew we were here, they would have killed us already."
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", indy)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "In the undergrowth, there is a slithering movement. Their eyes "
text += "are drawn to one of the trees that surround them. There is something ..."
text += "A dessicated corpse is attached to the trunk of the tree with arrows. Sapito screams."
text += "Indy examins the corpse."
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", serena)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "So this is where Forestal cashed in."
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
self.engine.setProperty("voice", indy)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "A friend of yours?"
print(text)
print("")
self.engine.setProperty("voice", satipo)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 400)
self.speak(text)
# -----
text = "Competitor. He was good, very good."
print(text)
print("")
self.engine.setProperty("voice", indy)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "No one has ever come out of there alive. Why should we put our faith in you?"
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", satipo)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 400)
self.speak(text)
# -----
text = "Indy shows Satipo his map. Satipo looks at it with naked greed. "
text += "Indy spreads out the map on the grass. Minutes go by as the men study it. "
text += "Satipo leans closer. He says,"
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", serena)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "It's the floor plan of the temple! It has to be!"
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", satipo)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 400)
self.speak(text)
# -----
text = "Indy nodded."
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
print("")
self.engine.setProperty("voice", serena)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "That's how I figure it."
text_list = utils.separate_text_into_lines(text, line_length)
[print(i) for i in text_list]
self.engine.setProperty("voice", indy)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "No one else had this ... Only us."
print(text)
self.engine.setProperty("voice", satipo)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 400)
self.speak(text)
# -----
text = "Will Indy survive the dangers that await him in the tunnel, "
text += "or will he die an agonizing death like all those before him?"
print(text)
self.engine.setProperty("voice", serena)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
# -----
text = "The end."
print(text)
self.engine.setProperty("voice", serena)
self.engine.setProperty("volume", 1)
self.engine.setProperty("rate", 200)
self.speak(text)
def print_out_properties(self):
print("rate: ", self.engine.getProperty('rate'))
print("volume: ", self.engine.getProperty('volume'))
def print_out_all_voices(self):
voices = self.engine.getProperty('voices')
for voice in voices:
print("Voice name: {}".format(voice.name))
print("Voice ID: {}".format(voice.id))
print("Voice Languages: {}".format(voice.languages))
print("Voice Gender: {}".format(voice.gender))
print("Voice Age: {}".format(voice.age))
print("-" * 40)
def write_voices_to_file(self):
filepath = os.path.join("data", "voices_text.txt")
voices = self.engine.getProperty('voices')
mylist = []
for voice in voices:
language = ''.join(voice.languages).strip()
s = "name: {}\n".format(voice.name)
s += "ID: {}\n".format(voice.id)
s += "Voice Languages: {}\n".format(language)
s += "Voice Gender: {}\n".format(voice.gender)
s += "Voice Age: {}\n".format(voice.age)
s += "\n"
mylist.append(s)
# [print(i) for i in mylist]
with open(filepath, "w") as f:
for elem in mylist:
f.write(elem)
def set_properties(self):
self.engine.setProperty('volume', 1)
self.engine.setProperty('rate', 200)
voices = self.engine.getProperty('voices')
# self.engine.setProperty("voice", voices[1].id)
id = "com.apple.speech.synthesis.voice.BadNews"
id = "com.apple.speech.synthesis.voice.Bahh"
id = "com.apple.speech.synthesis.voice.diego"
id = "com.apple.speech.synthesis.voice.daniel.premium"
id = "com.apple.speech.synthesis.voice.carmit"
id = "com.apple.speech.synthesis.voice.GoodNews"
self.engine.setProperty("voice", id)
# self.engine.setProperty('age', 300)
def test(self):
self.speak()
self.set_properties()
self.print_out_properties()
self.speak()
self.print_out_properties()
def print_voices_to_file(self):
self.write_voices_to_file()
# ************************************************
def speak(self, text=""):
# this_text = "Though I speak with the tongues of men and of angels, and have not charity, I am become as sounding brass, or a tinkling cymbal."
this_text = "The cat is on the mat."
if len(text) == 0:
self.engine.say(this_text)
else:
self.engine.say(text)
self.engine.runAndWait()
# file_to_save = os.path.join("data", "exp_my_exp")
# engine.save_to_file('the text I want to save as audio', file_to_save)
def cycle_through_voices(self):
myvoices = Voices()
ids = myvoices.get_ids()
for id in ids:
print("playing {} ...".format(id))
self.engine.setProperty("voice", id)
self.speak()
print("-" * 40)
def speak_some_text(self):
def is_valid(mystring, mylist):
if not utils.is_int(mystring): return False
myint = int(mystring)
for i in range(len(mylist)):
if myint - 1 == i:
return True
return False
# -----------------------------------------
first_choice = "enter the text you would like me to speak"
second_choice = "Enter the filename you would like me to read"
choices = [first_choice, second_choice, "quit"]
text = ""
default_text = """Though I speak with the tongues of men and of angels,
and have not charity, I am become as sounding brass,
or a tinkling cymbal."""
# -----------------------------------------
for count, elem in enumerate(choices):
print("{}) {}".format(count + 1, elem))
user_input = input("> ").lower().strip()
while not is_valid(user_input, choices):
user_input = input("> ").lower().strip()
if user_input == "quit": return False
user_input = int(user_input)
user_choice = choices[user_input - 1].lower().strip()
# -----------------------------------------
if user_choice == first_choice:
print("Enter the text you would like me to speak:")
user_input = input("> ").lower().strip()
while len(user_input) == 0:
user_input = input("> ").lower().strip()
if user_input == "quit": sys.exit()
self.engine.say(user_input)
self.engine.runAndWait()
print("Finished speaking.")
elif user_choice == second_choice:
print("Entering the filename you would like me to read :..")
filepath = os.path.join("data", "text_files")
files = os.listdir(filepath)
for count, file in enumerate(files):
print("{}) {}".format(count + 1, file))
user_input = input("> ").lower().strip()
while not utils.is_int(user_input):
user_input = input("> ").lower().strip()
if user_input == "quit": sys.exit()
filename = files[int(user_input)-1]
print("Reading file: ", filename)
filepath = os.path.join("data", "text_files", filename)
mytext = ""
with open(filepath, "r") as f:
mytext = f.read()
# -----------------------------------------
self.engine.say(mytext)
self.engine.runAndWait()
print("Finished speaking.")
elif user_choice == "quit":
pass
else:
raise ValueError("I don't recognize that: {}".format(user_choice))
def choose_by_age(self):
def is_valid(mystring, name_list):
if not mystring in name_list:
return False
return True
myvoices = Voices()
age_list = myvoices.get_ages()
utils.print_elements(age_list, 6)
# ----
print("What age would you like to choose?")
age = input("> ").lower().strip()
while len(age) == 0:
age = input("> ").lower().strip()
age = int(age)
voices_found = myvoices.get_voices_by_age(age)
if len(voices_found) == 0:
raise ValueError("Error!")
[print(i.name) for i in voices_found]
# ----
print("What name would you like to choose?")
the_name = input("> ").strip()
while not is_valid(the_name, myvoices.get_names()):
the_name = input("> ").strip()
this_voice = myvoices.get_voice_by_name(the_name)
# print("This is the voice id: {}".format(this_voice.id))
# ----
self.engine.setProperty("voice", this_voice.id)
self.speak()
def set_sound_property(self, property, value):
def is_valid_volume(mystring):
if not utils.is_int(mystring): return False
myint = int(mystring)
if myint >= 0 and myint <= 2:
return True
return False
# ------------------------------------
if not property in ["volume", "rate"]:
raise ValueError("Error!")
if value < 0:
raise ValueError("Error!")
# ---------------------------------------
if property == "volume":
if not is_valid_volume(value):
raise ValueError("Error")
self.engine.setProperty('volume', value)
elif property == "rate":
if not (value <= 400 and value >= 0):
raise ValueError("Error")
self.engine.setProperty('rate', value)
else:
raise ValueError("Error")
def select_voice(self):
def is_valid(mystring, thelist):
# print("the name: {}".format(mystring))
# print("the list: {}".format(thelist))
if len(mystring) == 0: return False
if not mystring in thelist:
return False
return True
# ----
print("Select by 1) Name, 2) Language, 3) Gender, 4) Age")
user_input = input("> ").strip()
while not user_input in ["1", "2", "3", "4", "5"]:
user_input = input("> ").strip()
if user_input == "quit": return False
user_input = int(user_input)
# -----------------------------------------
if user_input == 1: # Name
myvoices = Voices()
name_list = myvoices.get_names()
utils.print_elements(name_list, 6)
# ----
print("What name would you like to choose?")
the_name = input("> ").strip()
while not is_valid(the_name, name_list):
the_name = input("> ").strip()
if the_name == "quit": sys.exit()
this_voice = myvoices.get_voice_by_name(the_name)
# this_voice_id = utils.format_voice_id(this_voice.name)
self.engine.setProperty("voice", this_voice.id)
# self.engine.setProperty("voice", "com.apple.speech.synthesis.voice.Hysterical")
self.speak()
elif user_input == 2: # language
myvoices = Voices()
languages_list = myvoices.get_languages()
utils.print_elements(languages_list, 6)
# ----
print("What language would you like to choose?")
langauge = input("> ").lower().strip()
while len(langauge) == 0:
langauge = input("> ").lower().strip()
voices_found = myvoices.get_voices_by_language(langauge)
if len(voices_found) == 0:
raise ValueError("Error!")
[print(i.name) for i in voices_found]
# ----
print("What name would you like to choose?")
the_name = input("> ").lower().strip()
while len(the_name) == 0:
the_name = input("> ").lower().strip()
this_voice = myvoices.get_voice_by_name(the_name)
# ----
self.engine.setProperty("voice", utils.format_voice_id(this_voice.name))
self.speak()
elif user_input == 3: # gender
# ['voicegendermale', 'voicegenderneuter', 'voicegenderfemale']
myvoices = Voices()
gender_list = myvoices.get_genders()
utils.print_elements(gender_list, 6)
# ----
print("What gender would you like to choose?")
gender = input("> ").lower().strip()
while len(gender) == 0:
gender = input("> ").lower().strip()
voices_found = myvoices.get_voices_by_gender(gender)
if len(voices_found) == 0:
raise ValueError("Error!")
[print(i.name) for i in voices_found]
# ----
print("What name would you like to choose?")
the_name = input("> ").lower().strip()
while len(the_name) == 0:
the_name = input("> ").lower().strip()
this_voice = myvoices.get_voice_by_name(the_name)
print("This is the voice id: {}".format(this_voice.id))
print("This is the new id: {}".format(utils.format_voice_id(this_voice.name)))
# ----
self.engine.setProperty("voice", utils.format_voice_id(this_voice.name))
self.speak()
elif user_input == 4: # age
self.choose_by_age()
else:
raise ValueError("Error!")
def change_program_properties(self):
def is_valid(mystring, mybeg, myend):
if not utils.is_int(mystring): return False
myint = int(mystring)
while not (myint >= mybeg and myint <= myend):
return False
return True
# -----------------------------------------
print("Entering procedure change_program_properties")
voice_properties = ["change rate", "change volume", "change voice", "quit"]
for count, elem in enumerate(voice_properties):
print("{}) {}".format(count + 1, elem))
user_input = input("> ").lower().strip()
while not utils.is_valid(user_input, voice_properties):
user_input = input("> ").lower().strip()
if user_input == "quit": sys.exit()
print("You chose: {}".format(user_input))
user_input = int(user_input)
user_choice = voice_properties[user_input-1]
print(user_choice)
# -----------------------------------------
if user_choice == "change rate":
print("The defaut for 'rate' is 200. What would you like to change it to? (1 to 400)")
user_input = input("> ").lower().strip()
while not is_valid(user_input, mybeg=1,myend=400):
user_input = input("> ").lower().strip()
self.engine.setProperty('rate', user_input)
self.speak()
elif user_choice == "change volume":
print("The defaut for 'volume' is 1. What would you like to change it to? (0 to 3)")
user_input = input("> ").lower().strip()
while not is_valid(user_input, 0, 3):
user_input = input("> ").lower().strip()
self.engine.setProperty('volume', user_input)
self.speak()
elif user_choice == "change voice":
print("You are about to change the voice.")
# print("Would you like more information about the available voices? (y/n)")
# user_input = ""
# while not user_input in ["y", "yes", "n", "no"]:
# user_input = input("> ").lower().strip()
# if user_input in ["y", "yes"]:
self.select_voice()
elif user_choice == "quit":
raise NotImplemented
else:
raise ValueError("Error!")
def main(self):
choices = ["Speak some text"]
choices += ["cycle through voices"]
choices += ["Settings: Change one or more program properties"]
choices += ["quit"]
while self.keep_looping:
print("What would you like to do?")
for count, elem in enumerate(choices):
print("{}) {}".format(count + 1, elem))
user_input = ""
while not user_input in ["1", "2", "3", "4"]:
user_input = input("Main menu > ").lower().strip()
if user_input == "quit": return False
user_input = choices[int(user_input) - 1]
# -----------------------------------------
if user_input == "Speak some text":
self.speak_some_text()
elif user_input == "Settings: Change one or more program properties":
print("In main menu: Calling change_program_properites")
self.change_program_properties()
elif user_input == "cycle through voices":
self.cycle_through_voices()
elif user_input == "quit":
print("Quitting ...")
self.keep_looping = False
else:
raise ValueError("I don't recognize this: {}".format(user_input))
print("Goodtype! Have a great day! 😀😸")
# ******************************************************
if __name__ == "__main__":
# voices = Voices()
mysound = StrangeSound()
# mysound.main()
mysound.play_script()
| 40.795094
| 152
| 0.538113
|
21ee60fdb68489d8e9e72fc3dfa5d7f95ff73a3a
| 305
|
py
|
Python
|
.history/ClassFiles/Functions/Functions_20210101230056.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/ClassFiles/Functions/Functions_20210101230056.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/ClassFiles/Functions/Functions_20210101230056.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
''' FUNCTIONS
Functions are pieces(block) of code that does something.
They are reusable.
They execute or run when called by thier name.
They can have parameters(variables) and arguments(values)
They can return data as a result.
def function_name():
print("Hello World!")
'''
def sum(x,y)
| 20.333333
| 57
| 0.721311
|
62bfd5b4af628fc92590900f81738e4c5f75056e
| 1,373
|
py
|
Python
|
tests/test_arithmetic.py
|
thomasmatecki/parsley
|
0c51e9c37759fbc1c723519619952248c83e4642
|
[
"MIT"
] | null | null | null |
tests/test_arithmetic.py
|
thomasmatecki/parsley
|
0c51e9c37759fbc1c723519619952248c83e4642
|
[
"MIT"
] | 2
|
2020-03-24T18:30:15.000Z
|
2020-03-31T10:57:37.000Z
|
tests/test_arithmetic.py
|
thomasmatecki/parsley
|
0c51e9c37759fbc1c723519619952248c83e4642
|
[
"MIT"
] | null | null | null |
from examples.arithmetic import *
import pytest
@pytest.mark.parametrize(
"input_expression,expected_parsed",
[("+", ("+",)), ("-", ("-",)), ("*", ("*",)), ("/", ("/",)),],
)
def test_operator(input_expression, expected_parsed):
parsed, remaining = OPERATOR.match(input_expression)
assert parsed == expected_parsed
assert remaining == ""
@pytest.mark.parametrize(
"input_expression,expected_parsed",
[("+2", ("+", 2)), ("-2", ("-", 2)), ("*2", ("*", 2)), ("/2", ("/", 2))],
)
def test_applied_operator(input_expression, expected_parsed):
parsed, remaining = APPLIED_OPERATOR_EXPR.match(input_expression)
assert parsed == expected_parsed
assert remaining == ""
@pytest.mark.parametrize(
"input_expression,expected_parsed",
[
("1+2", (1, "+", 2)),
("1 + 2", (1, "+", 2)),
("1+2 + 3", (1, "+", 2, "+", 3)),
("1", (1, )),
],
)
def test_binary_operation_expr(input_expression, expected_parsed):
parsed, remaining = BINARY_OPERATION_EXPR.match(input_expression)
assert parsed == expected_parsed
assert remaining == ""
@pytest.mark.parametrize(
"input_expression",
[
"1+2",
"1 + 2",
"1+2 + 3",
"1",
],
)
def test_binary_operation(input_expression):
parsed, remaining = BINARY_OPERATION.match(input_expression)
print(parsed)
| 26.403846
| 77
| 0.592862
|
e37704948b7de91929eecc0139ec204049041653
| 1,756
|
py
|
Python
|
ci/models/cpu-example/model.py
|
emattia/sigopt-python
|
e6b4e5240261ddbdc84a3b4061b8935873612c23
|
[
"MIT"
] | 213
|
2015-02-24T22:26:46.000Z
|
2021-11-29T14:17:52.000Z
|
ci/models/cpu-example/model.py
|
emattia/sigopt-python
|
e6b4e5240261ddbdc84a3b4061b8935873612c23
|
[
"MIT"
] | 150
|
2015-10-22T21:59:37.000Z
|
2022-03-10T00:55:19.000Z
|
ci/models/cpu-example/model.py
|
emattia/sigopt-python
|
e6b4e5240261ddbdc84a3b4061b8935873612c23
|
[
"MIT"
] | 66
|
2016-01-01T18:25:11.000Z
|
2020-10-27T03:07:07.000Z
|
import sigopt
from data_and_model_setup import LoadTransformData, log_inference_metrics
import time
import platform
from xgboost.sklearn import XGBClassifier
def train_xgboost_model(dataset, random_state=1):
print("loading and transforming data")
load_transform_data = LoadTransformData()
trainX, testX, trainY, testY = load_transform_data.load_split_dataset(dataset)
# model architecture
sigopt.log_model("XGBClassifier") # model_keras.__class__
sigopt.log_dataset('Unscaled')
sigopt.log_metadata('Training Records', len(trainX))
sigopt.log_metadata('Testing Reccords', len(testX))
sigopt.log_metadata("Platform", platform.uname())
parameters = {
'objective': 'binary:logistic',
'learning_rate': sigopt.get_parameter('learning_rate', default=0.3),
'n_estimators': sigopt.get_parameter('n_estimators', default=20),
'max_depth': sigopt.get_parameter('max_depth', default=5),
'gamma': sigopt.get_parameter('gamma', default=0),
'min_child_weight': sigopt.get_parameter('min_child_weight', default=1),
'random_state': random_state,
'importance_type': 'gain',
'missing': None,
'verbosity': 2}
model = XGBClassifier(**parameters)
modelfit = model.fit(trainX, trainY)
# Collect model metrics
start = time.perf_counter()
prediction = modelfit.predict(testX)
sigopt.log_metric("Inference Time", time.perf_counter() - start)
probability = modelfit.predict_proba(testX)[:, 1]
log_inference_metrics(prediction, probability, testY, testX)
if __name__ == "__main__":
dataset_file = 'https://www.dropbox.com/s/437qdt4yjj64sxd/Fraud_Detection_SigOpt_dataset.csv?dl=1'
train_xgboost_model(dataset_file)
| 37.361702
| 102
| 0.722665
|
1f049b7ebb0287bf39be52c4c98671cba7b766c8
| 15,982
|
py
|
Python
|
Plugins/Classes/Weapons.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 25
|
2018-12-10T12:52:11.000Z
|
2022-01-29T14:42:57.000Z
|
Plugins/Classes/Weapons.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 4
|
2019-08-01T19:09:11.000Z
|
2022-01-02T01:47:42.000Z
|
Plugins/Classes/Weapons.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 6
|
2019-02-16T08:39:04.000Z
|
2021-12-21T06:11:58.000Z
|
from .Macro import Macro
from .Connection import Connection
from .Component import Component
from Framework import File_System
from .Shared import Physics_Properties
__all__ = [
'Weapon_System',
'Bullet',
'Missile',
'Bomb',
'Mine',
]
class Weapon_System(Macro):
'''
Weapon system, linking to a bullet. Defines model, rotation speed, etc.
Most weapon properties are part of the bullet.
* class_name
- String, one of: bomblauncher, missilelauncher, missileturret, turret,
weapon.
'''
def __init__(self, xml_node, *args, **kwargs):
super().__init__(xml_node, *args, **kwargs)
# Every weapon links to a bullet (laser or missile) that has most
# of the interesting info.
self.bullet_macro_name = self.Get('./properties/bullet', 'class')
return
def Get_Bullet(self):
'''
Returns the bullet macro.
'''
if not hasattr(self, '_bullet_macro'):
self._bullet_macro = self.database.Get_Macro(self.bullet_macro_name)
return self._bullet_macro
# TODO: fields of interest.
class Generic_Bullet(Macro):
'''
Parent class for bullet, missile, bomb.
'''
def Get_Ammo_Reload_Time(self):
value = self.Get('./properties/ammunition','reload')
return float(value) if value else None
def Get_Ammo_Rounds(self):
value = self.Get('./properties/ammunition','value')
return float(value) if value else None
def Get_Reload_Time(self):
value = self.Get('./properties/reload','time')
return float(value) if value else None
def Get_Reload_Rate(self):
value = self.Get('./properties/reload','rate')
return float(value) if value else None
def Set_Ammo_Reload_Time(self, value):
self.Set('./properties/ammunition','reload', f'{value:.3f}')
def Set_Reload_Time(self, value):
self.Set('./properties/reload','time', f'{value:.3f}')
def Set_Reload_Rate(self, value):
self.Set('./properties/reload','rate', f'{value:.3f}')
def Get_Rate_Of_Fire(self):
'''
Returns the rate of fire, in shots per second.
'''
ammo_reload_time = self.Get_Ammo_Reload_Time()
ammo_rounds = self.Get_Ammo_Rounds()
reload_time = self.Get_Reload_Time()
reload_rate = self.Get_Reload_Rate()
# Reload rate and time seem to be alternatives to each other.
# Standardize to rate.
if reload_time and not reload_rate:
reload_rate = 1 / reload_time
# If not an ammo weapon, use the above.
if not ammo_reload_time:
return reload_rate
# If this is set up for bursting but only 1 shot per burst,
# it may not have a reload_rate; default reload_rate to 1
# in this case so something can be computed easily below.
if ammo_rounds == 1 and not reload_rate:
reload_rate = 1
# If reload_rate and ammo_reload_time available, mix them
# for a burst weapon.
if (reload_rate and ammo_reload_time and ammo_rounds):
# Note: game calculates this wrongly as of ~1.5, multiplying
# the ammo_rounds-1 by reload_rate instead of 1/reload_rate.
# This will do it correctly (hopefully).
# Update: in 3.0 game computes ammo_rounds/reload_rate instead of
# subtracting one round, again incorrect.
# Test: 1 round burst, 1 reload_rate, 1 reload_time => 1 round/sec, enc says 1.
# Test: 2 round burst, 1 reload_rate, 2 reload_time => 2 round/3 sec, enc says 0.5
# So, this calc is correct, enc is wrong (in latter case).
burst_time = 1/reload_rate * (ammo_rounds -1)
time = ammo_reload_time + burst_time
return ammo_rounds / time
raise Exception()
def Set_Rate_Of_Fire(self, new_rof):
'''
Set the rate of fire, in shots per second.
'''
old_rof = self.Get_Rate_Of_Fire()
multiplier = new_rof / old_rof
# See notes above on rate of fire calculation.
# In short, need to edit the 'reload rate', 'reload time',
# and 'ammunition reload' fields to cover both normal weapons
# and burst weapons (where bursting rate is a combination of
# ammo reload and reload rate).
ammo_reload_time = self.Get_Ammo_Reload_Time()
reload_time = self.Get_Reload_Time()
reload_rate = self.Get_Reload_Rate()
if ammo_reload_time:
# Invert the multiplier to reduce reload time.
self.Set_Ammo_Reload_Time(ammo_reload_time / multiplier)
if reload_time:
# Invert the multiplier to reduce reload time.
self.Set_Reload_Time(reload_time / multiplier)
if reload_rate:
# Keep multiplier as-is.
self.Set_Reload_Rate(reload_rate * multiplier)
return
# TODO: should this method be split for bullets/missiles?
# May lead to redundancy between missiles/bombs/mines, unless broken
# out into a separate shared class.
def Adjust_Damage(self, multiplier):
'Adjust all types of bullet damage.'
# Bullet fields. Missiles won't use these.
for field in ['value', 'shield', 'hull', 'repair']:
value = self.Get('./properties/damage', field)
if not value:
continue
value = float(value)
new_value = value * multiplier
self.Set('./properties/damage', field, f'{new_value:.3f}')
# Missile fields. Also for bombs/mines.
# Possible bullets might use these? Unknown.
for field in ['value', 'shield', 'hull']:
value = self.Get('./properties/explosiondamage', field)
if not value:
continue
value = float(value)
new_value = value * multiplier
self.Set('./properties/explosiondamage', field, f'{new_value:.3f}')
return
# TODO: should this method be only for bullets?
def Adjust_Heat(self, multiplier):
'Adjust heat produced.'
value = self.Get('./properties/heat', 'value')
if not value:
return
value = float(value)
new_value = value * multiplier
self.Set('./properties/heat', 'value', f'{new_value:.3f}')
def Adjust_Range(self, multiplier):
'Change weapon range.'
# Note: range works somewhat differently for different bullet
# types.
# - Beams have range, lifetime, and speed; edit just speed.
# - Missiles have range and lifetime; edit lifetime?
# - Others have lifetime and speed; edit speed and adjust lifetime.
for tag in ['bullet','missile']:
range = self.Get(f'./properties/{tag}', 'range')
lifetime = self.Get(f'./properties/{tag}', 'lifetime')
# If it has range, edit that.
if range:
value = float(range)
new_value = value * multiplier
self.Set(f'./properties/{tag}', 'range', f'{new_value:.3f}')
# Otherwise, edit lifetime.
elif lifetime:
value = float(lifetime)
new_value = value * multiplier
self.Set(f'./properties/{tag}', 'lifetime', f'{new_value:.3f}')
return
class Bullet(Generic_Bullet):
'''
Bullet macro, such as shot from a laser. Not a missle/bomb.
'''
def Adjust_Speed(self, multiplier):
'Change bullet speed.'
#range = self.Get('./properties/bullet', 'range')
lifetime = self.Get('./properties/bullet', 'lifetime')
# Note: missiles are more complex, with a thrust and such; TODO
speed = self.Get('./properties/bullet', 'speed')
# Check for speed and lifetime, indicating a beam.
if speed and lifetime:
speed = float(speed)
lifetime = float(lifetime)
# Bump speed, decrease lifetime, beam shoots out faster
# but for same range.
self.Set(f'./properties/bullet', 'speed', f'{speed * multiplier:.3f}')
self.Set(f'./properties/bullet', 'lifetime', f'{lifetime / multiplier:.3f}')
elif speed:
speed = float(speed)
# Edit just speed.
self.Set(f'./properties/bullet', 'speed', f'{speed * multiplier:.3f}')
else:
assert False
class Missile(Generic_Bullet, Physics_Properties):
'''
Missile macro.
'''
# Speed adjustment comes from physics properties.
class Bomb(Generic_Bullet, Physics_Properties):
'''
Bomb macro. Only used by spacesuit bombs.
'''
class Mine(Macro, Physics_Properties):
'''
Mine macro. TODO: add engines/etc.
'''
# Tracker mines have physics, though others don't.
def Adjust_Speed(self, multiplier):
# Skip if no physics, else use physics method.
if not self.xml_node.xpath('./properties/physics'):
return
Physics_Properties.Adjust_Speed(self, multiplier)
'''
Fields of interest (copied from live editor):
weapon_item_macros = [
E('rotation_speed' , './/rotationspeed' , 'max' , 'Rot. Speed', ''),
E('rotation_acceleration', './/rotationacceleration' , 'max' , 'Rot. Accel.', ''),
E('heat_overheat' , './/heat' , 'overheat' , 'Overheat', 'Max heat before firing stops'),
E('heat_cool_delay' , './/heat' , 'cooldelay' , 'Cool Delay', ''),
E('heat_cool_rate' , './/heat' , 'coolrate' , 'Cool Rate', ''),
E('heat_reenable' , './/heat' , 'reenable' , 'Reenable', 'Time to start firing again after overheating'),
E('reload_rate' , './/reload' , 'rate' , 'Reload Rate', ''),
E('reload_time' , './/reload' , 'time' , 'Reload Time', ''),
E('hull' , './/hull' , 'max' , 'Hull', ''),
E('ammunition_tags' , './/ammunition' , 'tags' , 'Ammo Tags', ''),
E('storage_capacity' , './/storage' , 'capacity' , 'Storage', ''),
E('bullet_codename' , './/bullet' , 'class' , 'Bullet Codename (ref)', '', is_reference = True),
]
# Shared item types between bullets and missiles.
reload_macros = [
D('fire_rate' , Display_Update_RoF , 'Rate of Fire', ''),
E('reload_rate' , './/reload' , 'rate' , 'Reload Rate', 'For burst weapons, inverse of time between shots in the burst'),
E('reload_time' , './/reload' , 'time' , 'Reload Time', 'For non-burst weapons, time between shots'),
E('ammunition_rounds' , './/ammunition' , 'value' , 'Burst Rounds', 'For burst weapons, number of shots per burst.'),
E('ammunition_reload_time' , './/ammunition' , 'reload' , 'Interburst Time', 'For burst weapons, time from the end of a burst to the start of the next.'),
]
bullet_item_macros = [
D('dps_base' , Display_Update_Bullet_Burst_DPS , 'DPS', ''),
D('dps_shield' , Display_Update_Bullet_Burst_DPS_Shield, '+Shield', ''),
D('dps_hull' , Display_Update_Bullet_Burst_DPS_Hull , '+Hull', ''),
D('repair_rate' , Display_Update_Bullet_Repair_Rate , 'Repair Rate', ''),
D('range' , Display_Update_Bullet_Range , 'Range', ''),
*reload_macros,
E('damage' , './/damage' , 'value' , 'Damage', ''),
E('damage_shield' , './/damage' , 'shield' , '+Shield', ''),
E('damage_hull' , './/damage' , 'hull' , '+Hull', ''),
E('damage_repair' , './/damage' , 'repair' , 'Repair', 'Set to 1 to flip to repairing.'),
E('bullet_speed' , './/bullet' , 'speed' , 'Bullet Speed', ''),
E('bullet_lifetime' , './/bullet' , 'lifetime' , 'Bullet Lifetime', ''),
E('bullet_range' , './/bullet' , 'range' , 'Bullet Range', ''),
E('bullet_amount' , './/bullet' , 'amount' , 'Bullet Amount', ''),
E('barrel_amount' , './/bullet' , 'barrelamount' , 'Bullet Barrel Amount', ''),
E('bullet_timediff' , './/bullet' , 'timediff' , 'Bullet Time Diff', ''),
E('bullet_angle' , './/bullet' , 'angle' , 'Bullet Angle', ''),
E('bullet_max_hits' , './/bullet' , 'maxhits' , 'Bullet Max Hits', ''),
E('bullet_ricochet' , './/bullet' , 'ricochet' , 'Bullet Ricochet', ''),
E('bullet_scale' , './/bullet' , 'scale' , 'Bullet Scale', ''),
E('bullet_attach' , './/bullet' , 'attach' , 'Bullet Attach', ''),
E('bullet_sticktime' , './/bullet' , 'sticktime' , 'Bullet Stick Time', ''),
E('heat' , './/heat' , 'value' , '+Heat', 'Heat added per bullet (or burst of bullets)'),
]
missile_item_macros = [
# No heat on these, so don't bother with burst dps for now.
D('dps_base' , Display_Update_Missile_DPS , 'DPS', ''),
D('dps_shield' , Display_Update_Missile_DPS_Shield , '+Shield', ''),
D('dps_hull' , Display_Update_Missile_DPS_Hull , '+Hull', ''),
D('speed' , Display_Update_Missile_Speed , 'Speed', ''),
D('effective_range' , Display_Update_Missile_Range , 'Effective Range', ''),
*reload_macros,
E('weapon_system' , './/weapon' , 'system' , 'Weapon System' , ''),
E('explosion_damage' , './/explosiondamage' , 'value' , 'Explosion Damage' , ''),
E('explosion_damage_shield' , './/explosiondamage' , 'shield' , 'Explosion +Shield' , ''),
E('explosion_damage_hull' , './/explosiondamage' , 'hull' , 'Explosion +Hull' , ''),
E('missile_amount' , './/missile' , 'amount' , 'Amount' , ''),
E('missile_barrel_amount' , './/missile' , 'barrelamount' , 'Barrel Amount' , ''),
E('missile_lifetime' , './/missile' , 'lifetime' , 'Lifetime' , ''),
E('missile_range' , './/missile' , 'range' , 'Range' , ''),
E('missile_guided' , './/missile' , 'guided' , 'Guided' , ''),
E('missile_retarget' , './/missile' , 'retarget' , 'Retarget' , ''),
E('missile_hull' , './/hull' , 'max' , 'Hull' , ''),
E('lock_time' , './/lock' , 'time' , 'Lock Time' , ''),
E('lock_range' , './/lock' , 'range' , 'Lock Range' , ''),
E('lock_angle' , './/lock' , 'angle' , 'Lock Angle' , ''),
E('counter_resilience' , './/countermeasure' , 'resilience' , 'Counter Resiliance' , 'Missile resiliance against countermeasures'),
E('longrangescan' , './/longrangescan' , 'minlevel' , 'Long Range Scan' , ''),
*physics_item_macros,
*connection_item_macros,
]
'''
| 45.403409
| 172
| 0.534539
|
c384fde523727d27ee366ee013f08c97e6065bc3
| 9,685
|
py
|
Python
|
stream_alert/apps/_apps/onelogin.py
|
opsbay/streamalert
|
557fb3f604661cdd9bd36486cccc8ce3a34bd1f1
|
[
"Apache-2.0"
] | 7
|
2018-12-26T14:38:08.000Z
|
2022-03-09T13:21:00.000Z
|
stream_alert/apps/_apps/onelogin.py
|
opsbay/streamalert
|
557fb3f604661cdd9bd36486cccc8ce3a34bd1f1
|
[
"Apache-2.0"
] | 14
|
2018-05-09T19:18:15.000Z
|
2021-06-02T02:34:09.000Z
|
stream_alert/apps/_apps/onelogin.py
|
opsbay/streamalert
|
557fb3f604661cdd9bd36486cccc8ce3a34bd1f1
|
[
"Apache-2.0"
] | 1
|
2018-12-06T20:51:58.000Z
|
2018-12-06T20:51:58.000Z
|
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from . import AppIntegration, StreamAlertApp, get_logger
LOGGER = get_logger(__name__)
@StreamAlertApp
class OneLoginApp(AppIntegration):
"""OneLogin StreamAlert App"""
_ONELOGIN_EVENTS_URL = 'https://api.{}.onelogin.com/api/1/events'
_ONELOGIN_TOKEN_URL = 'https://api.{}.onelogin.com/auth/oauth2/v2/token'
_ONELOGIN_RATE_LIMIT_URL = 'https://api.{}.onelogin.com/auth/rate_limit'
# OneLogin API returns 50 events per page
_MAX_EVENTS_LIMIT = 50
# Define our authorization headers variable
def __init__(self, event, config):
super(OneLoginApp, self).__init__(event, config)
self._auth_headers = None
self._next_page_url = None
self._rate_limit_sleep = 0
@classmethod
def _type(cls):
return 'events'
@classmethod
def service(cls):
return 'onelogin'
@classmethod
def date_formatter(cls):
"""OneLogin API expects the ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ"""
return '%Y-%m-%dT%H:%M:%SZ'
def _token_endpoint(self):
"""Get the endpoint URL to retrieve tokens
Returns:
str: Full URL to generate tokens for the OneLogin API
"""
return self._ONELOGIN_TOKEN_URL.format(self._config.auth['region'])
def _events_endpoint(self):
"""Get the endpoint URL to retrieve events
Returns:
str: Full URL to retrieve events in the OneLogin API
"""
return self._ONELOGIN_EVENTS_URL.format(self._config.auth['region'])
def _rate_limit_endpoint(self):
"""Get the endpoint URL to retrieve rate limit details
Returns:
str: Full URL to retrieve rate limit details in the OneLogin API
"""
return self._ONELOGIN_RATE_LIMIT_URL.format(self._config.auth['region'])
def _generate_headers(self):
"""Each request will request a new token to call the resources APIs.
More details to be found here:
https://developers.onelogin.com/api-docs/1/oauth20-tokens/generate-tokens-2
Returns:
str: Bearer token to be used to call the OneLogin resource APIs
"""
if self._auth_headers:
return True
authorization = 'client_id: {}, client_secret: {}'.format(
self._config.auth['client_id'], self._config.auth['client_secret'])
headers_token = {'Authorization': authorization,
'Content-Type': 'application/json'}
result, response = self._make_post_request(self._token_endpoint(),
headers_token,
{'grant_type':'client_credentials'})
if not result:
return False
if not response:
LOGGER.error('[%s] Response invalid, could not generate headers', self)
return False
bearer = 'bearer:{}'.format(response.get('access_token'))
self._auth_headers = {'Authorization': bearer}
return True
def _gather_logs(self):
"""Gather the authentication log events."""
if not self._generate_headers():
return False
return self._get_onelogin_events()
def _set_rate_limit_sleep(self):
"""Get the number of seconds we need to sleep until we are clear to continue"""
# Make sure we have authentication headers
if not self._auth_headers:
self._rate_limit_sleep = 0
LOGGER.error('[%s] No authentication headers set', self)
return
result, response = self._make_get_request(self._rate_limit_endpoint(),
self._auth_headers)
if not result:
self._rate_limit_sleep = 0
return
# Making sure we have a valid response
if not response:
LOGGER.error('[%s] Response invalid, could not get rate limit info', self)
self._rate_limit_sleep = 0
return
self._rate_limit_sleep = response.get('data')['X-RateLimit-Reset']
LOGGER.info('[%s] Rate limit sleep set: %d', self, self._rate_limit_sleep)
def _get_onelogin_events(self):
"""Get all events from the endpoint for this timeframe
Returns:
[
{
'id': <int:id>,
'created_at': <str:created_at>,
'account_id': <int:account_id>,
'user_id': <int:user_id>,
'event_type_id': <int:event_type_id>,
'notes': <str:notes>,
'ipaddr': <str:ipaddr>,
'actor_user_id': <int:actor_user_id>,
'assuming_acting_user_id': null,
'role_id': <int:role_id>,
'app_id': <int:app_id>,
'group_id': <int:group_id>,
'otp_device_id': <int:otp_device_id>,
'policy_id': <int:policy_id>,
'actor_system': <str:actor_system>,
'custom_message': <str:custom_message>,
'role_name': <str:role_name>,
'app_name': <str:app_name>,
'group_name': <str:group_name>,
'actor_user_name': <str:actor_user_name>,
'user_name': <str:user_name>,
'policy_name': <str:policy_name>,
'otp_device_name': <str:otp_device_name>,
'operation_name': <str:operation_name>,
'directory_sync_run_id': <int:directory_sync_run_id>,
'directory_id': <int:directory_id>,
'resolution': <str:resolution>,
'client_id': <int:client_id>,
'resource_type_id': <int:resource_type_id>,
'error_description': <str:error_description>
}
]
"""
# Make sure we have authentication headers
if not self._auth_headers:
LOGGER.error('[%s] No authentication headers set', self)
return False
# Are we just getting events or getting paginated events?
if self._next_page_url:
params = None
request_url = self._next_page_url
else:
params = {'since': self._last_timestamp}
request_url = self._events_endpoint()
result, response = self._make_get_request(request_url, self._auth_headers, params)
if not result:
# If we hit the rate limit, update the sleep time
if response and response.get('status'):
r_status = response.get('status')
if r_status['code'] == 400 and r_status['message'] == 'rate_limit_exceeded':
self._set_rate_limit_sleep()
return False
# Fail if response is invalid
if not response:
LOGGER.error('[%s] Received invalid response', self)
return False
# Set pagination link, if there is any
self._next_page_url = response['pagination']['next_link']
self._more_to_poll = bool(self._next_page_url)
# Adjust the last seen event, if the events list is not empty
if not response['data']:
LOGGER.info('[%s] Received empty list of events', self)
return False
self._last_timestamp = response['data'][-1]['created_at']
# Return the list of events to the caller so they can be send to the batcher
return response['data']
@classmethod
def _required_auth_info(cls):
return {
'region':
{
'description': ('the region for the OneLogin API. This should be'
'just "en" or "us".'),
'format': re.compile(r'^(en|us)$')
},
'client_secret':
{
'description': ('the client secret for the OneLogin API. This '
'should be a string of 64 alphanumeric characters'),
'format': re.compile(r'^[a-z0-9]{64}$')
},
'client_id':
{
'description': ('the client id for the OneLogin API. This '
'should be a string of 64 alphanumeric characters'),
'format': re.compile(r'^[a-z0-9]{64}$')
}
}
def _sleep_seconds(self):
"""Return the number of seconds this polling function should sleep for
between requests to avoid failed requests. OneLogin tokens allows for 5000 requests
every hour, but if the rate limit is reached, we can retrieve how long until we are clear.
More information about this here:
https://developers.onelogin.com/api-docs/1/oauth20-tokens/get-rate-limit
Returns:
int: Number of seconds that this function should sleep for between requests
"""
return self._rate_limit_sleep
| 37.393822
| 98
| 0.575736
|
6cd5b0f835680a60264aa58f86885e84821beac8
| 5,583
|
py
|
Python
|
metrics/perplexity/perplexity.py
|
MitchellTesla/datasets
|
bf08ea3f95e8209a7afd2b50410ad5db51409d11
|
[
"Apache-2.0"
] | 1
|
2022-03-22T16:49:47.000Z
|
2022-03-22T16:49:47.000Z
|
metrics/perplexity/perplexity.py
|
MitchellTesla/datasets
|
bf08ea3f95e8209a7afd2b50410ad5db51409d11
|
[
"Apache-2.0"
] | null | null | null |
metrics/perplexity/perplexity.py
|
MitchellTesla/datasets
|
bf08ea3f95e8209a7afd2b50410ad5db51409d11
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perplexity Metric."""
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_CITATION = """\
"""
_DESCRIPTION = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_KWARGS_DESCRIPTION = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry. Perplexity returned will be an average of
the perplexity for each list entry.
stride (int): stride size, defaults to 512
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the average perplexity score for the text
in the input list.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts,
... stride=1)
>>> round(results["perplexity"], 1)
78.2
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:10] # doctest:+ELLIPSIS
[...]
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts,
... stride=256)
>>> round(results["perplexity"], 1)
117.9
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Perplexity(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"input_texts": datasets.Value("string"),
}
),
reference_urls=["https://huggingface.co/docs/transformers/perplexity"],
)
def _compute(self, input_texts, model_id, stride=512, device=None):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
device = "cuda"
else:
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(model_id)
model = model.to(device)
tokenizer = AutoTokenizer.from_pretrained(model_id, pad_token="<PAD>")
encodings = tokenizer(input_texts, padding=True, return_tensors="pt", return_special_tokens_mask=True).to(
device
)
encoded_texts = encodings["input_ids"]
special_tokens_masks = encodings["special_tokens_mask"]
max_model_length = model.config.n_positions
ppls = []
for text_index in logging.tqdm(range(0, len(encoded_texts))):
encoded_text = encoded_texts[text_index]
special_tokens_mask = special_tokens_masks[text_index]
encoded_text_length = len(encoded_text) - special_tokens_mask.sum()
nlls = []
target_index = max(1, min(stride - 1, encoded_text_length - 1))
while target_index < encoded_text_length:
start_index = max(0, target_index - (max_model_length - 1))
input_ids = encoded_text[start_index : target_index + 1]
target_ids = input_ids.clone()
target_ids[:-1] = -100
attn_mask = torch.ones(len(input_ids)).to(device)
attn_mask[-1] = 0
with torch.no_grad():
outputs = model(input_ids, labels=target_ids, attention_mask=attn_mask)
neg_log_likelihood = outputs[0]
nlls.append(neg_log_likelihood)
target_index += stride
if len(nlls) > 0:
ppls.append(torch.exp2(torch.mean(torch.stack(nlls))))
ppl = torch.mean(torch.stack(ppls))
return {"perplexity": float(ppl)}
| 36.97351
| 121
| 0.617589
|
ce6d666b6e78b5a16bc11f4ea1e9d2aa076e5151
| 10,753
|
py
|
Python
|
TrainingHelper.py
|
meet-minimalist/Learn-pytorch-in-one-example
|
6a5093b964a5ad756bba5bbc5b6d7613fb8c41e6
|
[
"MIT"
] | null | null | null |
TrainingHelper.py
|
meet-minimalist/Learn-pytorch-in-one-example
|
6a5093b964a5ad756bba5bbc5b6d7613fb8c41e6
|
[
"MIT"
] | null | null | null |
TrainingHelper.py
|
meet-minimalist/Learn-pytorch-in-one-example
|
6a5093b964a5ad756bba5bbc5b6d7613fb8c41e6
|
[
"MIT"
] | null | null | null |
import os
import time
import torch
import datetime
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import config
from LRHelper import LRHelper
from DatasetHelper import get_train_loader, get_test_loader
from utils.Logger import Logger
from utils.CheckpointHandler import CheckpointHandler
from utils.SummaryHelper import SummaryHelper
from utils.misc import init_training, np_cpu, LossAverager
cuda = torch.device('cuda:0')
cpu = torch.device("cpu:0")
class TrainingHelper:
def __init__(self):
self.log, self.exp_path = init_training()
self.lr_helper = LRHelper()
ckpt_folder = self.exp_path + "/ckpt/"
os.makedirs(ckpt_folder, exist_ok=True)
ckpt_path = ckpt_folder + "model.pth"
self.ckpt_handler = CheckpointHandler(ckpt_path, max_to_keep=3)
def get_loss_and_accuracy(self, labels, logits, model, ce_loss_fn):
# labels : [N] dims tensor
# logits : [N x C] dims tensor
loss_reg = torch.tensor(0, dtype=torch.float32, device=cuda, requires_grad=False)
for layer in model.modules():
if isinstance(layer,torch.nn.Conv2d):
for p in layer.named_parameters():
if 'weight' in p[0]:
loss_reg += torch.sum((torch.square(p[1]) / 2))
loss_reg *= config.l2_weight_decay
loss_cls = ce_loss_fn(logits, labels)
loss_total = loss_cls + loss_reg
sm_outputs = F.softmax(logits.detach(), dim=-1)
accuracy = (torch.argmax(sm_outputs, dim=1) == labels).sum() * 100 / labels.size(0)
return loss_total, loss_cls, loss_reg, accuracy
def get_model(self):
if config.model_type == 'simplecnn':
from models.SimpleCNN import ConvModel
model = ConvModel(num_classes=config.num_classes).to(cuda, non_blocking=True)
elif config.model_type == 'resnet18':
from models.ResNetModel import ResNetModel
model = ResNetModel(num_classes=config.num_classes, freeze_backend=config.freeze_backend).to(cuda, non_blocking=True)
else:
print("Unsupported model type.")
exit()
return model
def train(self, resume=False, resume_ckpt=None, pretrained_ckpt=None):
model = self.get_model()
model_stats = summary(model, (3, config.input_size[0], config.input_size[1]))
for line in str(model_stats).split('\n'):
self.log(line)
ce_loss_fn = nn.CrossEntropyLoss()
# Why opt for nn.CrossEntropyLoss over nn.functional.cross_entropy
# Ref : https://discuss.pytorch.org/t/f-cross-entropy-vs-torch-nn-cross-entropy-loss/25505/2
opt = torch.optim.Adam(model.parameters(), lr=0.0, weight_decay=0.0)
# Setting lr equal to 0.0 here so that it wont work as per this line.
# But we will explicitly set lr for each weights dynamically, at every step.
# Same is case for weight_decay, We will calculate L2_regularization_loss on our own separately.
scaler = torch.cuda.amp.GradScaler(enabled=config.use_amp)
if resume:
checkpoint = torch.load(resume_ckpt)
model.load_state_dict(checkpoint['model'])
opt.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scalar'])
resume_g_step = checkpoint['global_step']
resume_eps = checkpoint['epoch']
self.log("Resuming training from {} epochs.".format(resume_eps))
elif pretrained_ckpt is not None and config.model_type == 'resnet18':
self.log("Using pre-trained checkpoint from :".format(pretrained_ckpt))
checkpoint = torch.load(pretrained_ckpt)
filtered_checkpoint = {}
self.log("\nFollowing variables will be restored:")
for var_name, var_value in checkpoint.items():
if var_name == 'fc.weight' or var_name == 'fc.bias':
# As these layers change due to change in num classes
continue
new_var_name = 'resnet_feat.' + var_name
# why this prefix? This comes as the model that we created contains a variable resnet_feat
# which is sequential group of layers containing resnet layers. So all the layers and parameters
# within it are prefixed with resnet_feat and for restoring resnet pretrained weights
# we need to update the statedict according to the model architectural definition.
self.log(f"{new_var_name} : {list(var_value.size())}")
filtered_checkpoint[new_var_name] = var_value
self.log("\n\nFollowing variables will be initialized:")
remaining_vars = model.load_state_dict(filtered_checkpoint, strict=False)
for var_name in remaining_vars.missing_keys:
self.log(var_name)
resume_g_step = 0
resume_eps = 0
else:
resume_g_step = 0
resume_eps = 0
train_writer = SummaryHelper(self.exp_path + "/train/")
test_writer = SummaryHelper(self.exp_path + "/test/")
input_x = torch.randn((1,3, config.input_size[0], config.input_size[1])).to(cuda, non_blocking=True)
train_writer.add_graph(model, input_x)
g_step = max(0, resume_g_step)
for eps in range(resume_eps, config.epochs):
# I hope you noticed one particular statement in the code, to which I assigned a comment “What is this?!?” — model.train().
# In PyTorch, models have a train() method which, somewhat disappointingly, does NOT perform a training step.
# Its only purpose is to set the model to training mode. Why is this important? Some models may use mechanisms like Dropout,
# for instance, which have distinct behaviors in training and evaluation phases.
# Ref: https://towardsdatascience.com/understanding-pytorch-with-an-example-a-step-by-step-tutorial-81fc5f8c4e8e
model.train()
train_loader = get_train_loader()
train_iter = iter(train_loader) # This is creating issues sometimes. Check required.
self.log("Epoch: {} Started".format(eps+1))
for batch_num in tqdm(range(config.train_steps)):
start = time.time()
batch = next(train_iter)
opt.zero_grad() # Zeroing out gradients before backprop
# We cab avoid to zero out if we want accumulate gradients for
# Multiple forward pass and single backward pass.
with torch.cuda.amp.autocast(enabled=config.use_amp):
logits = model(batch['image'].to(cuda, non_blocking=True))
loss_total, loss_cls, loss_reg, accuracy = self.get_loss_and_accuracy(batch['label'].to(cuda, non_blocking=True), logits, model, ce_loss_fn)
#loss_total.backward() # Used for normal training without AMP
scaler.scale(loss_total).backward() # Used when AMP is applied. The enabled flag will trigger normal FP32 behaviour or Mixed precision behaviour
scaler.step(opt)
scaler.update()
lr = self.lr_helper.step(g_step, opt)
opt.step()
delta = (time.time() - start) * 1000 # in milliseconds
print("Time: {:.2f} ms".format(delta))
if (batch_num+1) % config.loss_logging_frequency == 0:
self.log("Epoch: {}/{}, Batch No.: {}/{}, Total Loss: {:.4f}, Loss Cls: {:.4f}, Loss Reg: {:.4f}, Accuracy: {:.2f}".format(\
eps+1, config.epochs, batch_num+1, config.train_steps, np_cpu(loss_total), \
np_cpu(loss_cls), np_cpu(loss_reg), np_cpu(accuracy)))
train_writer.add_summary({'total_loss' : np_cpu(loss_total),
'loss_cls' : np_cpu(loss_cls),
'loss_reg' : np_cpu(loss_reg),
'accuracy' : np_cpu(accuracy),
'lr' : lr}, g_step)
g_step += 1
model.eval() # Putting model in eval mode so that batch normalization and dropout will work in inference mode.
test_loader = get_test_loader()
test_iter = iter(test_loader)
test_losses = LossAverager(num_elements=4)
with torch.no_grad(): # Disabling the gradient calculations will reduce the calculation overhead.
for batch_num in tqdm(range(config.test_steps)):
batch = next(test_iter)
logits = model(batch['image'].to(cuda))
loss_total, loss_cls, loss_reg, accuracy = self.get_loss_and_accuracy(batch['label'].to(cuda, non_blocking=True), logits, model, ce_loss_fn)
test_losses([np_cpu(loss_total), np_cpu(loss_cls), np_cpu(loss_reg), np_cpu(accuracy)])
self.log("Epoch: {}/{} Completed, Test Total Loss: {:.4f}, Loss Cls: {:.4f}, Loss Reg: {:.4f}, Accuracy: {:.2f}".format(\
eps+1, config.epochs, test_losses.avg[0], test_losses.avg[1], test_losses.avg[2], test_losses.avg[3]))
test_writer.add_summary({'total_loss' : test_losses.avg[0],
'loss_cls' : test_losses.avg[1],
'loss_reg' : test_losses.avg[2],
'accuracy' : test_losses.avg[3]}, g_step)
checkpoint = {
'epoch': eps + 1,
'global_step': g_step,
'test_loss': test_losses.avg[0],
'model': model.state_dict(),
'optimizer': opt.state_dict(),
'scalar': scaler.state_dict()
}
# Above code taken from : https://towardsdatascience.com/how-to-save-and-load-a-model-in-pytorch-with-a-complete-example-c2920e617dee
self.ckpt_handler.save(checkpoint)
self.log("Epoch {} completed. Checkpoint saved.".format(eps+1))
print("Training Completed.")
train_writer.close()
test_writer.close()
| 47.791111
| 161
| 0.588208
|
2bbb57c65f3322b61809286a9b09a44b212118cb
| 2,711
|
py
|
Python
|
formatBibTeXidentifiers.py
|
LWollatz/mendeley2bibtex
|
eadda877fc08c51e4572bb7498021fa2e324c050
|
[
"Apache-2.0"
] | null | null | null |
formatBibTeXidentifiers.py
|
LWollatz/mendeley2bibtex
|
eadda877fc08c51e4572bb7498021fa2e324c050
|
[
"Apache-2.0"
] | null | null | null |
formatBibTeXidentifiers.py
|
LWollatz/mendeley2bibtex
|
eadda877fc08c51e4572bb7498021fa2e324c050
|
[
"Apache-2.0"
] | null | null | null |
def _idgroup(number):
first_digit = int(number[0])
if first_digit <= 5 or first_digit == 7:
group = number[0]
elif first_digit == 6:
group = number[:3]
elif first_digit == 8:
group = number[:2]
elif int(number[:3]) == 999:
group = number[:5]
elif int(number[:2]) == 99:
group = number[:4]
else:
if int(number[1]) <= 4:
group = number[:2]
else:
group = number[:3]
return group
def _idpublisher(number):
if int(number[:2]) < 20:
publisher = number[0:2]
elif int(number[:3]) < 700:
publisher = number[:3]
elif int(number[:4]) < 8500:
publisher = number[:4]
elif int(number[:5]) < 90000:
publisher = number[:5]
elif int(number[:6]) < 950000:
publisher = number[:6]
else:
publisher = number[:7]
return publisher
def formatISBN10(ISBNt):
#print "ISBN10"
IDG = _idgroup(ISBNt)
ISBNt = ISBNt[len(IDG):]
IDP = _idpublisher(ISBNt)
ISBNt = ISBNt[len(IDP):]
ISBNout = IDG+"-"+IDP+"-"+ISBNt[:-1]+"-"+ISBNt[-1]
return ISBNout
def formatISBN13(ISBNt):
#print "ISBN13"
if ISBNt[:3] == "978":
IDE = "978"
elif ISBNt[:3] == "979":
IDE = "979"
else:
print "WARNING strange ISBN13",str(ISBN).replace("-","")
IDE = ISBNt[:3]
ISBNt = ISBNt[len(IDE):]
IDG = _idgroup(ISBNt)
ISBNt = ISBNt[len(IDG):]
IDP = _idpublisher(ISBNt)
ISBNt = ISBNt[len(IDP):]
ISBNout = IDE+"-"+IDG+"-"+IDP+"-"+ISBNt[:-1]+"-"+ISBNt[-1]
return ISBNout
def formatISBN(ISBN):
"""takes an ISBN and returns a string that splits the ISBN correctly"""
ISBNt = str(ISBN)
ISBNt = ISBNt.replace("ISBN","")
ISBNt = ISBNt.replace(":","")
ISBNt = ISBNt.replace(" ","")
if(ISBNt.find("-") >= 0):
return ISBNt
ISBNt = ISBNt.replace("-","")
if len(ISBNt) == 10:
ISBN = formatISBN10(ISBNt)
elif len(ISBNt) == 13:
ISBN = formatISBN13(ISBNt)
else:
raise Exception("ISBN %s is of wrong length (%d), expected 10 or 13" % (ISBNt,len(ISBNt)))
return str(ISBN)
def formatURL(url):
url = url.replace("{\\_}","\\_").replace("{\\&}","\\&").replace("{\\#}","\\#")
urls = url.split(" ")
url = urls[0]
return url
def formatDOI(doi):
doi = str(doi)
doi = doi.replace("http://","")
doi = doi.replace("https://","")
doi = doi.replace("dx.doi.org/","")
doi = doi.replace("dx.doi.org","")
doi = doi.replace("org","")
doi = doi.replace("doi:","")
doi = doi.replace("doi","")
doi = doi.replace(" ","")
doi = doi.lstrip(".")
doi = doi.lstrip("/")
return doi
| 27.948454
| 98
| 0.533383
|
7e9a490a8b8446ab33a58c78483e8895717b5867
| 13,080
|
py
|
Python
|
falkon/kernels/distance_kernel.py
|
mathurinm/falkon
|
bb2b3a0f17a3fa5968212d3c1fefb5e1f66257b5
|
[
"MIT"
] | null | null | null |
falkon/kernels/distance_kernel.py
|
mathurinm/falkon
|
bb2b3a0f17a3fa5968212d3c1fefb5e1f66257b5
|
[
"MIT"
] | null | null | null |
falkon/kernels/distance_kernel.py
|
mathurinm/falkon
|
bb2b3a0f17a3fa5968212d3c1fefb5e1f66257b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 24 21:49:21 2017
@author: alessandro
"""
import collections
import functools
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from falkon.options import BaseOptions, FalkonOptions
from falkon.sparse import sparse_ops
from falkon.sparse.sparse_tensor import SparseTensor
from falkon.kernels import Kernel, KeopsKernelMixin
DistKerContainer = collections.namedtuple('DistKerContainer', ['sq1', 'sq2'])
class L2DistanceKernel(Kernel, ABC):
"""Base class for L2-based kernels
Such kernels are characterized by the squared norm of the difference between each input
sample. This involves computing the squared norm in `_prepare`, and a simple matrix
multiplication in `_apply`.
In `_finalize` the squared norm and matrix multiplication are added together to form
the kernel matrix.
Subclasses should implement the `_transform` method which applies additional elementwise
transformations to the kernel matrix. `_transform` is called after `_finalize`.
This class supports sparse data.
Parameters
----------
name : str
Descriptive name of the specialized kernel
opt : CompOpt or dict or None
Options which will be passed to the kernel operations
Notes
------
To efficiently compute kernels of the form k(x, x') = ||x - x'||^2 between two matrices of
data-points we decompose the squared norm of the difference into 3 terms:
||X||^2 and -2*XX'^T and ||X'||^2
The first and third term are calculated in the `_prepare` method while the second is
calculated in the `_apply` method. Finally the three terms are combined in the `_finalize`
method.
"""
kernel_type = "l2distance"
def __init__(self, name, opt: Optional[FalkonOptions] = None):
super().__init__(name, self.kernel_type, opt)
def _prepare(self, X1: torch.Tensor, X2: torch.Tensor) -> DistKerContainer:
return DistKerContainer(
sq1=torch.norm(X1, p=2, dim=1, keepdim=True).pow_(2),
sq2=torch.norm(X2, p=2, dim=1, keepdim=True).pow_(2)
)
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor) -> DistKerContainer:
sq1 = torch.empty(X1.size(0), dtype=X1.dtype, device=X1.device)
sparse_ops.sparse_square_norm(X1, sq1)
sq2 = torch.empty(X2.size(0), dtype=X1.dtype, device=X1.device)
sparse_ops.sparse_square_norm(X2, sq2)
return DistKerContainer(
sq1=sq1.reshape(-1, 1), sq2=sq2.reshape(-1, 1)
)
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor) -> None:
out.addmm_(X1, X2)
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor) -> torch.Tensor:
return sparse_ops.sparse_matmul(X1, X2, out)
def _finalize(self, A: torch.Tensor, d: DistKerContainer) -> torch.Tensor:
A.mul_(-2.0)
A.add_(d.sq1.to(A))
A.add_(d.sq2.to(A).t())
A.clamp_min_(0)
return self._transform(A)
@abstractmethod
def _transform(self, A: torch.Tensor):
pass
class GaussianKernel(L2DistanceKernel, KeopsKernelMixin):
"""Class for computing the Gaussian kernel and related kernel-vector products
The Gaussian kernel is one of the most common and effective kernel embeddings
since it is infinite dimensional, and governed by a single parameter. The kernel length-scale
determines the width of the Gaussian distribution which is placed on top of each point.
A larger sigma corresponds to a wide Gaussian, so that the relative influence of far away
points will be high for computing the kernel at a given datum.
On the opposite side of the spectrum, a small sigma means that only nearby points will
influence the kernel.
Parameters
-----------
sigma
The length-scale of the kernel.
This can be a scalar, and then it corresponds to the standard deviation
of Gaussian distribution from which the kernel is derived.
It can also be a vector of size `d` or a matrix of size `d*d` where `d`
is the dimensionality of the data which will be used with the kernel.
In this case sigma will be the inverse square of the standard deviation,
so for example converting from the vectorial sigma to the scalar sigma can
be done by `vec_sigma = -1/(sigma**2)`
opt
Additional options to be forwarded to the matrix-vector multiplication
routines.
Examples
--------
Creating a Gaussian kernel with a single length-scale. Operations on this kernel will not
use KeOps.
>>> K = GaussianKernel(sigma=3.0, opt=FalkonOptions(no_keops=True))
Creating a Gaussian kernel with a different length-scale per dimension
>>> K = GaussianKernel(sigma=torch.tensor([1.0, 3.5, 7.0]))
Creating a Gaussian kernel object with full covariance matrix (randomly chosen)
>>> mat = torch.randn(3, 3, dtype=torch.float64)
>>> sym_mat = mat @ mat.T
>>> K = GaussianKernel(sigma=sym_mat)
>>> K
GaussianKernel(sigma=tensor([[ 2.0909, 0.0253, -0.2490],
[ 0.0253, 0.3399, -0.5158],
[-0.2490, -0.5158, 4.4922]], dtype=torch.float64)) #random
Notes
-----
The Gaussian kernel with a single length-scale follows
.. math::
k(x, x') = \\exp{-\\dfrac{\\lVert x - x' \\rVert^2}{2\\sigma^2}}
When the length-scales are specified as a matrix, the RBF kernel is determined by
.. math::
k(x, x') = \\exp{-\\dfrac{1}{2}x\\Sigma^{-1}x'}
In both cases, the actual computation follows a different path, working on the expanded
norm. The KeOps implementation is fully contained in the :meth:`_keops_mmv_impl` method,
while our implementation uses methods :meth:`_prepare`, :meth:`_apply`, and :meth:`_transform`
and is driven by the functions in :mod:`falkon.mmv_ops`.
"""
kernel_name = "gaussian"
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
super().__init__(self.kernel_name, opt)
self.sigma, self.gaussian_type = self._get_sigma_kt(sigma)
if self.gaussian_type == 'single':
self.gamma = torch.tensor(
-0.5 / (self.sigma.item() ** 2), dtype=torch.float64).item()
else:
self.gamma = torch.cholesky(self.sigma, upper=False)
self.kernel_type = "l2-multi-distance"
@staticmethod
def _get_sigma_kt(sigma):
if isinstance(sigma, torch.Tensor):
try:
# tensor.item() works if tensor is a scalar, otherwise it throws
# a value error.
sigma.item()
return sigma, "single"
except ValueError:
pass
if sigma.dim() == 1 or sigma.size(1) == 1:
return torch.diagflat(sigma), "multi"
if sigma.dim() != 2:
raise TypeError("Sigma can be specified as a 1D or a 2D tensor. "
"Found %dD tensor" % (sigma.dim()))
if sigma.size(0) != sigma.size(1):
raise TypeError("Sigma passed as a 2D matrix must be square. "
"Found dimensions %s" % (sigma.size()))
return sigma, "multi"
else:
try:
sigma = float(sigma)
return torch.tensor(sigma, dtype=torch.float64), "single"
except TypeError:
raise TypeError("Sigma must be a scalar or a tensor.")
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
if self.gaussian_type == 'single':
formula = 'Exp(g * SqDist(x1, x2)) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(1)'
]
other_vars = [torch.tensor([self.gamma]).to(dtype=X1.dtype)]
else:
dim = self.gamma.shape[0]
formula = (
'Exp( -IntInv(2) * SqDist('
f'TensorDot(x1, g, Ind({dim}), Ind({dim}, {dim}), Ind(0), Ind(0)), '
f'TensorDot(x2, g, Ind({dim}), Ind({dim}, {dim}), Ind(0), Ind(0)))) * v'
)
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (dim ** 2)
]
other_vars = [self.gamma.reshape(-1).to(dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt: FalkonOptions):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt: FalkonOptions):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2):
if self.gaussian_type == "single":
return super()._prepare(X1, X2)
else:
return self.prepare_multisigma(X1, X2)
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
if self.gaussian_type != "single":
raise NotImplementedError(
"Sparse Gaussian kernel only implemented with scalar sigma.")
return super()._prepare_sparse(X1, X2)
def _apply(self, X1, X2, out):
if self.gaussian_type == "single":
return super()._apply(X1, X2, out)
else:
return self.apply_multisigma(X1, X2, out)
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
if self.gaussian_type != "single":
raise NotImplementedError(
"Sparse Gaussian kernel only implemented with scalar sigma.")
return super()._apply_sparse(X1, X2, out)
def _transform(self, A) -> torch.Tensor:
if self.gaussian_type == "single":
return self.transform_singlesigma(A)
else:
return self.transform_multisigma(A)
def prepare_multisigma(self, X1, X2):
chol = self.gamma.to(X1)
return DistKerContainer(
sq1=torch.norm(X1 @ chol, p=2, dim=1, keepdim=True).pow_(2),
sq2=torch.norm(X2 @ chol, p=2, dim=1, keepdim=True).pow_(2)
)
def apply_multisigma(self, X1, X2, out):
sigma = self.sigma.to(X1)
out.addmm_(X1 @ sigma, X2)
# noinspection PyMethodMayBeStatic
def transform_multisigma(self, A: torch.Tensor) -> torch.Tensor:
A.mul_(-0.5)
A.exp_()
return A
def transform_singlesigma(self, A: torch.Tensor) -> torch.Tensor:
A.mul_(self.gamma)
A.exp_()
return A
def __repr__(self):
return f"GaussianKernel(sigma={self.sigma})"
def __str__(self):
return f"Gaussian kernel<{self.sigma}>"
class LaplacianKernel(GaussianKernel):
"""Class for computing the Laplacian kernel, and related kernel-vector products.
The Laplacian kernel is similar to the Gaussian kernel, but less sensitive to changes
in the parameter `sigma`.
Parameters
----------
sigma
The length-scale of the Laplacian kernel
Notes
-----
The Laplacian kernel is determined by the following formula
.. math::
k(x, x') = \\exp{-\\frac{\\lVert x - x' \\rVert}{\\sigma}}
"""
def __init__(self, sigma: float, opt: Optional[BaseOptions] = None):
# With respect to the Gaussian kernel we need to change the value of gamma,
# and from squared norm to norm. The latter change requires a different impl. of
# the `_prepare` methods, and of the keops formula.
self.kernel_name = "laplacian"
super().__init__(sigma, opt)
self.gaussian_type = 'single'
self.gamma = torch.tensor(
-1 / self.sigma.item(), dtype=torch.float64).item()
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
formula = 'Exp(g * Sqrt(SqDist(x1, x2))) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(1)'
]
other_vars = [torch.tensor([self.gamma]).to(dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _finalize(self, A: torch.Tensor, d: DistKerContainer) -> torch.Tensor:
A.mul_(-2.0)
A.add_(d.sq1.to(A))
A.add_(d.sq2.to(A).t())
A.clamp_min_(0)
A.sqrt_()
return self._transform(A)
def __repr__(self):
return f"LaplacianKernel(sigma={self.sigma})"
| 37.053824
| 99
| 0.610168
|
ff6fc7f51343b624f7f43dd1540b445f30178f44
| 7,099
|
py
|
Python
|
main.py
|
JamesK2754/DnD-Tool
|
6603ce9ceee4c05acce0969ce18ae2830a4ea23e
|
[
"MIT"
] | null | null | null |
main.py
|
JamesK2754/DnD-Tool
|
6603ce9ceee4c05acce0969ce18ae2830a4ea23e
|
[
"MIT"
] | null | null | null |
main.py
|
JamesK2754/DnD-Tool
|
6603ce9ceee4c05acce0969ce18ae2830a4ea23e
|
[
"MIT"
] | null | null | null |
import random
import time
import os
playerls = []
clear = lambda: os.system('clear')
print('''
=================
| DnD Dice Tool |
| by James King |
| MIT Licence |
| V2.0 |
=================''')
time.sleep(2)
clear()
v = '2.0'
vint = int(2)
def mainrun():
def set_players():
clear()
set_p = True
print("Exit code is !x")
while set_p:
input_name = input("Name: ")
if input_name == "!x":
set_p = False
menu()
playerls.append(input_name)
def config_wiz():
clear()
print("Welcome to the Config Wizard.\nPlease select from one of the following options:\n1) Load Config\n2) Create config from current settings\n3) Create new config\n4) Exit config")
config_command = ()
while config_command not in range(1,5):
config_command = int(input("DnD> "))
if config_command == 1:
print("To allow the tool to find and load the config, move to config text file into the same directory as this tool. Then, enter the name of the file - without the .dndtoolconf.txt extention.")
file_name = input("configwiz> ")
print("Searching...")
try:
config_file = open(f"./{file_name}.dndtoolconf.txt")
print("Reading...")
config_conts = config_file.read()
config_file.close()
config_conts = config_conts.replace("=", "\n")
config_ls = config_conts.split("\n")
config_ls.pop(0)
wplayersls = config_ls[5].split("~")
print("Players located!")
versionconf = config_ls[1].split(".")
print(versionconf)
input()
if versionconf[0] == "β":
print("!Warning! This config was generated by a beta version of this tool - some features may not work.")
nversionconf = int(versionconf[1])
else:
nversionconf = int(versionconf[0])
if nversionconf > vint:
print("!Warning! This config was made by a newer version of this tool than the one you are running. It is reccomended that you update your tool to avoid issues.")
if nversionconf < vint:
print("!Warning! This config was made by an older version of this tool than the one you are running, some features may not work. It is reccomended that you regenerate the config to avoid issues.")
for x in range(len(wplayersls)):
print(wplayersls[x])
confirm = ''
while confirm not in ('y', 'Y', 'n', 'N'):
confirm = input("Is that correct (y/n)? ")
if confirm in ('y', 'Y'):
for x in range(len(wplayersls)):
playerls.append(wplayersls[x])
else:
clear()
menu()
except:
input("Something went wrong...")
menu()
if config_command == 2:
filename = input("What should we call the file? ")
groupname = input("And what is the name of the group? ")
configfile = open(f"./{filename}.dndtoolconf.txt", "w")
joinchar = "~"
playerstring = joinchar.join(playerls)
configfile.write(f'''!! This is a config file generated by DnD Dice Tools. To use, move to the same directory as the tool file then navigate to the Config Wizard and select option 1
v={v}
name={groupname}
players={playerstring}''')
configfile.close()
input("File made!")
menu()
if config_command == 3:
tplayerls = []
filename = input("What should we call the file? ")
groupname = input("And what is the name of the group? ")
configfile = open(f"./{filename}.dndtoolconf.txt", "w")
joinchar = "~"
set_p = True
print("Enter the names of the players\nThe exit code is !x")
while set_p:
input_name = input("Name: ")
if input_name == "!x":
set_p = False
break
tplayerls.append(input_name)
playerstring = joinchar.join(tplayerls)
configfile.write(f'''!! This is a config file generated by DnD Dice Tools. To use, move to the same directory as the tool file then navigate to the Config Wizard and select option 1
v={v}
name={groupname}
players={playerstring}''')
configfile.close()
input("File made!")
menu()
if config_command == 4:
clear()
menu()
def menu():
if len(playerls) == 0:
print("!! No players have been added yet. Please run option run before rolling any group dice to avoid errors. !!")
print("DnD Dice Tool\nPlease select from one of the following options:\n1) Set players\n2) Run D20 for all\n3) Run D20 for one\n4) Run Dx for all\n5) Run Dx for one\n6) Print players\n7) Clear terminal\n8) Config Wizard\n9) About\n0) Exit")
menu_option = ()
while menu_option not in range(1,10):
menu_option = int(input("DnD> "))
if menu_option == 1:
set_players()
if menu_option == 2:
for x in range(len(playerls)):
num = random.randint(1,20)
print(f"{playerls[x]} {num}")
input()
menu()
if menu_option == 3:
print(random.randint(1,20))
input()
menu()
if menu_option == 4:
d = int(input("roll a d"))
for x in range(len(playerls)):
num = random.randint(1,d)
print(f"{playerls[x]} {num}")
input()
menu()
if menu_option == 5:
d = int(input("roll a d"))
print(random.randint(1, d))
input()
menu()
if menu_option == 6:
if len(playerls) > 0:
print("players are...")
for x in range(len(playerls)):
print(playerls[x])
else:
print("No players were found in list.")
input()
menu()
if menu_option == 7:
clear()
menu()
if menu_option == 8:
clear()
config_wiz()
menu()
if menu_option == 9:
print(f'''
*=======================================================*
| DnDTool by James King |
| Version {v} |
| Provided under the MIT licence, distributed on GitHub |
| www.JamesDev.co.gg |
*=======================================================*
''')
input()
clear()
menu()
if menu_option == 0:
exit()
menu()
mainrun()
| 39.659218
| 248
| 0.498098
|
ad2dfacc4019673d83ddc70bbfbec8409e4ac883
| 21,799
|
py
|
Python
|
pysoa/test/plugins/pytest/plans.py
|
viniciusfeitosa/pysoa
|
925ca0c662f2e2f2943f33f1f30f9aae3742334f
|
[
"Apache-2.0"
] | null | null | null |
pysoa/test/plugins/pytest/plans.py
|
viniciusfeitosa/pysoa
|
925ca0c662f2e2f2943f33f1f30f9aae3742334f
|
[
"Apache-2.0"
] | null | null | null |
pysoa/test/plugins/pytest/plans.py
|
viniciusfeitosa/pysoa
|
925ca0c662f2e2f2943f33f1f30f9aae3742334f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import (
absolute_import,
unicode_literals,
)
from functools import wraps
import re
import sys
from unittest import SkipTest
from _pytest._code.code import TracebackEntry
from _pytest._code.source import Source
from _pytest.mark import MARK_GEN
from _pytest.unittest import (
TestCaseFunction,
UnitTestCase,
)
import py
import six
try:
import pyparsing
TEST_PLANS_ENABLED = True
except ImportError:
pyparsing = None
TEST_PLANS_ENABLED = False
__test_plan_prune_traceback = True # ensure code in this file is not included in failure stack traces
def _get_unpacked_marks(obj):
"""
Copied/modified from _pytest.mark.structures, which is not available on all platforms
"""
# noinspection SpellCheckingInspection
mark_list = getattr(obj, 'pytestmark', [])
if not isinstance(mark_list, list):
mark_list = [mark_list]
return (getattr(mark, 'mark', mark) for mark in mark_list)
PLUGIN_STATISTICS = {
'fixture_tests_collected': 0,
'fixture_tests_executed': 0,
'fixture_tests_skipped': 0,
}
def pytest_addoption(parser):
"""
A hook called by the PyTest plugin system to add configuration options before the command line arguments are parsed
We use this to add all of the ``--pysoa-*`` command line options.
:param parser: The PyTest wrapper around the ``argparse`` library parser
"""
if not TEST_PLANS_ENABLED:
return
group = parser.getgroup('pysoa', 'pysoa test plans')
group.addoption(
'--pysoa-fixture',
action='append',
dest='pysoa_fixture',
metavar='fixture',
default=[],
help='Only run tests in this fixture filename (multiple uses allowed)',
)
group.addoption(
'--pysoa-test-case',
action='append',
dest='pysoa_test_case',
metavar='plan',
default=[],
help='Only run the test case or cases with this name or description (multiple uses allowed); matches tests in '
'any fixture (unless --pysoa-fixture); mutually exclusive with --pysoa-test-case-regex',
)
group.addoption(
'--pysoa-test-case-regex',
action='append',
dest='pysoa_test_case_regex',
metavar='pattern',
default=None,
type=lambda pattern: None if not pattern else re.compile('^{}'.format(pattern)),
help='Only run the test case or cases whose name or description matches this pattern (multiple uses allowed); '
'matches tests in any fixture (unless --pysoa-fixture); mutually exclusive with --pysoa-test-case',
)
group.addoption(
'--pysoa-disable-tb-prune',
action='store_true',
dest='pysoa_disable_tb_prune',
default=False,
help='By default, traceback frames containing PySOA test plan parsing and execution code are pruned from the '
'error report before display, giving you a less cluttered view when errors occur. This behavior can make '
'it difficult to track down bugs in the PySOA test plan code itself. Setting this option disables this '
'pruning, giving you the full stacktrace.',
)
# noinspection PyProtectedMember
parser_class = type(parser._getparser())
original_parse_args = parser_class.parse_args
@wraps(parser_class.parse_args)
def parse_args(self, args=None, namespace=None):
# Parse wrapper to raise error for mutually-exclusive arguments at the correct time
args = original_parse_args(self, args=args, namespace=namespace)
if args.pysoa_test_case and args.pysoa_test_case_regex:
self.error('use of mutually exclusive arguments: --pysoa-test-case, --pysoa-test-case-regex')
return args
parser_class.parse_args = parse_args
# noinspection SpellCheckingInspection
def pytest_pycollect_makeitem(collector, name, obj):
"""
A hook called by the PyTest main collector when collecting test plans. We use this to find all classes extending
``ServicePlanTestCase`` and return new, custom collector objects for them.
:param collector: The main collector, which must be the parent of any collector object returned
:type collector: PyCollector
:param name: The name of the item to potentially be collected
:type name: str
:param obj: The item to potentially be collected
:return: A new collector object, or ``None`` if this plugin does not recognize the item type, in which case the
collector system will call the next available plugin or hook to do the same.
:rtype: PyCollector
"""
if not TEST_PLANS_ENABLED:
return
# Lazy importing ensures that pytest-cov loads up coverage before this plugin loads other classes in PySOA
from pysoa.test.plan import ServicePlanTestCase
try:
if not issubclass(obj, ServicePlanTestCase):
return
if obj == ServicePlanTestCase:
# Don't collect the parent class
return
except TypeError:
return
return ServicePlanTestCaseCollector(name, parent=collector)
class ServicePlanTestCaseCollector(UnitTestCase):
"""
A specialized collector for collecting PySOA test plans and all of their fixtures and test cases. It yields all of
the test cases that its parent collects (normal ``test_`` methods in ``unittest`` fashion), and then yields all of
test fixture tests defined by the class extending ``ServicePlanTestCase``.
"""
def collect(self):
"""
Responsible for collecting all the items (tests, in this case traditional test methods and fixture test cases)
in this item (a ``ServicePlanTestCase`` class).
:return:
"""
if not getattr(self.obj, '__test__', True):
# Super performs this check, too, but if we yield that return, things get screwy, so we have to duplicate
# the check first to be sure.
return
for item in super(ServicePlanTestCaseCollector, self).collect():
# We let super collect normal (``unittest``) test methods, and yield each of those as it collects them,
# just as super does. After this, we can collect the fixtures and fixture tests.
yield item
for test_data in self.obj.get_fixture_test_information():
# Now we collect and field the fixture tests.
yield ServicePlanTestCaseTestFunction(parent=self, fixture_test_case_data=test_data)
PLUGIN_STATISTICS['fixture_tests_collected'] += 1
class ServicePlanTestCaseTestFunction(TestCaseFunction):
"""
A test item that PyTest executes. Largely behaves like a traditional ``unittest` test method, but overrides some
behavior to ensure the following:
- That the specialized testing code is run, and that the test fixture name and path are included in result output
- That test skips are handled properly
- That unhelpful stacktrace elements from this test plan code are pruned from result output
- That helpful information is displayed with test failures
"""
def __init__(self, parent, fixture_test_case_data):
"""
Construct a test item.
:param parent: The parent collector
:type parent: ServicePlanTestCaseCollector
:param fixture_test_case_data: The test case data
:type fixture_test_case_data: FixtureTestCaseData
"""
test_name = 'test__{fixture}__{test}'.format(
fixture=fixture_test_case_data.fixture_name,
test=fixture_test_case_data.name,
)
# First, we have to give the test plan test case class a method with this name, otherwise the TestCase class
# cannot be instantiated. However, this should never be called, because the plugin overrides it.
def fake_test(*_, **__):
raise TypeError('The incorrect test method was called')
fake_test.__doc__ = fixture_test_case_data.description
if hasattr(parent.obj, test_name):
# Lazy importing ensures that pytest-cov loads up coverage before this plugin loads other classes in PySOA
from pysoa.test.plan.errors import StatusError
raise StatusError('Duplicate test name "{name}" in fixture "{fixture}"'.format(
name=fixture_test_case_data.name,
fixture=fixture_test_case_data.fixture_file),
)
setattr(parent.obj, test_name, fake_test)
# Next we call super
super(ServicePlanTestCaseTestFunction, self).__init__(name=test_name, parent=parent)
# Finally, we do some magic to trick PyTest into accepting and displaying the actual location of the test (the
# fixture file and the line in that file) instead of the PySOA test plan parsing code.
self._location = (
self.session.fspath.bestrelpath(py.path.local(fixture_test_case_data.fixture_file)),
fixture_test_case_data.line_number,
self.location[2],
)
self.fspath = py.path.local(fixture_test_case_data.fixture_file)
self._nodeid = '::'.join(
self.nodeid.split('::', 2)[:2] + [fixture_test_case_data.fixture_name, fixture_test_case_data.name],
)
self.fixture_test_case_data = fixture_test_case_data
# Copy any class-level PyTest markers from the ServicePlanTestCase class to each fixture test case
# This allows things like pytest.mark.skip[if], pytest.mark.django_db, etc. to work
for mark in _get_unpacked_marks(parent.obj):
mark_copy = getattr(MARK_GEN, mark.name)(*mark.args, **mark.kwargs)
self.add_marker(mark_copy)
if mark.name == 'skip' or (mark.name == 'skipif' and mark.args and mark.args[0]):
PLUGIN_STATISTICS['fixture_tests_skipped'] += 1
# noinspection SpellCheckingInspection
def runtest(self):
"""
PyTest calls this to actually run the test.
"""
if self.config.pluginmanager.get_plugin('pdbinvoke') is None:
self._run(result=self)
else:
self._debug()
def _handle_skip(self):
"""
Implements the skipping machinery (see super). It's a modified version of super and ``unittest:TestCase.run``
that makes the following changes:
- Handles test fixture skip directives
- Properly handles class-level ``pytest.mark.skip[if]`` markers to make them apply to all fixture test cases
- Properly handles the changing signature of ``_addSkip`` (super incorrectly analyzes the Python version)
:return: ``True`` if this test is to be skipped, ``False`` if it is to be run.
:rtype: bool
"""
# implements the skipping machinery (see super); analog to pythons Lib/unittest/case.py:run
cls = self._testcase.__class__
skipped = False
skipped_why = 'unknown'
if getattr(cls, '__unittest_skip__', False):
skipped = True
skipped_why = getattr(cls, '__unittest_skip_why__', '')
elif self.fixture_test_case_data.skip:
skipped = True
skipped_why = self.fixture_test_case_data.skip
if skipped:
# If the class or fixture or fixture test case was skipped
try:
# noinspection PyProtectedMember
# This is the signature on Python 3.4+ or unittest2 on Python 3.3-
self._testcase._addSkip(self, self._testcase, skipped_why)
except TypeError:
if sys.version_info >= (3, 4):
# If we got this error >= Python 3.4, something is wrong, so re-raise it
raise
# noinspection PyProtectedMember
self._testcase._addSkip(self, skipped_why)
PLUGIN_STATISTICS['fixture_tests_skipped'] += 1
return True
return False
# noinspection PyProtectedMember
def _debug(self):
"""
Runs the test in debug mode, which starts a debugger as soon as an error happens. Does not run ``setUp`` or
``tearDown`` since the code in ``ServicePlanTestCase`` takes care of this. See ``unittest:TestCase.debug``.
"""
# see unittest.TestCase.debug
if self._handle_skip():
return
self.fixture_test_case_data.callable(self._testcase)
while self._testcase._cleanups:
func, args, kwargs = self._testcase._cleanups.pop(-1)
func(*args, **kwargs)
# noinspection PyProtectedMember
def _run(self, result=None):
"""
Runs the test in normal mode, which adds failures and errors to the result. Does not run ``setUp`` or
``tearDown`` since the code in ``ServicePlanTestCase`` takes care of this. See ``unittest:TestCase.run``.
:param result: The test result so far
"""
orig_result = result
if result is None:
result = self._testcase.defaultTestResult()
start_test_run = getattr(result, 'startTestRun', None)
if start_test_run is not None:
start_test_run()
self._testcase._resultForDoCleanups = result
result.startTest(self._testcase)
if self._handle_skip():
result.stopTest(self._testcase)
return
PLUGIN_STATISTICS['fixture_tests_executed'] += 1
try:
success = False
# noinspection PyBroadException
try:
self.fixture_test_case_data.callable(self._testcase)
except KeyboardInterrupt:
raise
except self._testcase.failureException:
result.addFailure(self._testcase, sys.exc_info())
except SkipTest as e:
self._testcase._addSkip(result, str(e))
except: # noqa E722
result.addError(self._testcase, sys.exc_info())
else:
success = True
success = success and self._testcase.doCleanups()
if success:
result.addSuccess(self._testcase)
finally:
result.stopTest(self._testcase)
if orig_result is None:
stop_test_run = getattr(result, 'stopTestRun', None)
if stop_test_run is not None:
stop_test_run()
# noinspection SpellCheckingInspection
def _prunetraceback(self, exception_info):
"""
Prunes unhelpful information from the traceback so that test failure report output isn't overwwhelming and
still contains useful information. Also appends the specialized fixture test case traceback entry to the end
of the traceback.
:param exception_info: The PyTest wrapper around the failure exception info object
"""
# Before any pruning, get the frame containing _run_test_case so that we can use its locals
lowest_test_case_frame = next(
(
tb for tb in reversed(exception_info.traceback)
if tb.locals.get('_test_function_frame', False) or tb.locals.get('_run_test_case_frame', False)
),
None,
)
super(ServicePlanTestCaseTestFunction, self)._prunetraceback(exception_info)
if not lowest_test_case_frame:
return
if self.config.getoption('pysoa_disable_tb_prune') is not True:
exception_info.traceback = exception_info.traceback.filter(
lambda x: not x.frame.f_globals.get('__test_plan_prune_traceback')
)
test_case = lowest_test_case_frame.locals['test_case']
locals_to_copy = {'job_response', 'action_results', 'action_case'}
if lowest_test_case_frame.locals.get('_test_function_frame', False):
locals_to_copy = {'test_fixture_results', 'test_case'}
# noinspection PyProtectedMember
extra_entry = ServicePlanFixtureTestTracebackEntry(
name='{cls}::{fixture}::{test}'.format(
cls=lowest_test_case_frame.locals['self'].__class__.__name__,
fixture=test_case['fixture_name'],
test=test_case['name'],
),
line_number=test_case['line_number'],
path=py.path.local(test_case['fixture_file_name']),
local_variables={
k: v for k, v in six.iteritems(lowest_test_case_frame.locals)
if k in locals_to_copy
},
fixture_source=test_case['fixture_source'],
test_source=test_case['source'],
raw_entry=lowest_test_case_frame._rawentry,
)
exception_info.traceback.append(extra_entry)
# noinspection SpellCheckingInspection
class ServicePlanFixtureTestTracebackEntry(TracebackEntry):
"""
A special traceback entry for displaying the relevant test fixture file contents instead of Python code when a
fixture test case fails.
"""
class Faker(object):
pass
def __init__(
self,
name,
line_number,
path,
local_variables,
fixture_source,
test_source,
raw_entry,
):
super(ServicePlanFixtureTestTracebackEntry, self).__init__(raw_entry)
self._name = name
self.lineno = line_number - 1
self._path = path
self._locals = local_variables
self._fixture_source = Source(fixture_source)
self._test_source = test_source
self._frame = self.Faker()
self._frame.statement = self.statement
self._frame.getargs = lambda *_, **__: list(six.iteritems(local_variables))
self._frame.f_locals = local_variables
self._frame.code = self.Faker()
self._frame.code.path = path
self._frame.code.raw = self.Faker()
self._frame.code.raw.co_filename = str(path)
@property
def frame(self):
return self._frame
@property
def relline(self):
return self.lineno - self.getfirstlinesource()
@property
def statement(self):
return self._fixture_source[self.lineno]
@property
def path(self):
return self._path
def getlocals(self):
return self._locals
locals = property(getlocals, None, None, str('locals of underlying frame'))
def getfirstlinesource(self):
return max(self.lineno - 3, 0)
def getsource(self, astcache=None):
start = self.getfirstlinesource()
end = start + len(self._test_source) + 5
return self._fixture_source[start:end]
source = property(getsource, None, None, str('source code of failing test'))
def ishidden(self):
return False
def name(self):
return self._name
name = property(name, None, None, str('name of underlaying code'))
def __str__(self):
return ' File {path} line {line_number} (approximate) in {test}\n {source}\n'.format(
path=self.path,
line_number=self.lineno + 1,
test=self.name,
source=self._test_source,
)
def __repr__(self):
return '<TracebackEntry {}:{}>'.format(self.path, self.lineno + 1)
def pytest_collection_modifyitems(config, items):
"""
A hook called by the PyTest main collector immediately after collecting test plans. We use this to "deselect"
test cases that do not match the supplied ``--pysoa-*`` filter command line arguments.
:param config: The PyTest config object
:param items: The list of collected test items, which includes all tests (regular tests collected by PyTest and
other plugins as well as fixture test cases). Any modifications must happen against this argument
directly (a new array can't be created and returned).
"""
if not TEST_PLANS_ENABLED:
return
reporter = None
# noinspection PyBroadException
try:
reporter = config.pluginmanager.get_plugin('terminalreporter')
except Exception:
pass
soa_test_case = config.getoption('pysoa_test_case')
soa_test_case_regex = config.getoption('pysoa_test_case_regex')
soa_fixture = config.getoption('pysoa_fixture')
deselected = []
remaining = []
for test in items:
if soa_test_case or soa_test_case_regex or soa_fixture:
if not isinstance(test, ServicePlanTestCaseTestFunction):
# At least one of the plugin filtering arguments were specified, but this is not a service plan test
deselected.append(test)
else:
test_data = test.fixture_test_case_data
if (
# The fixture argument(s) was specified, but the fixture name does not match the argument(s)
(soa_fixture and test_data.fixture_name not in soa_fixture) or
# The test case argument(s) was specified, but the test name does not match the argument(s)
(
soa_test_case and
test_data.name not in soa_test_case and
test_data.description not in soa_test_case
) or
# The test regex argument(s) was specified, but the test name does not match the argument pattern(s)
(soa_test_case_regex and not any(
p.match(test_data.name) or p.match(test_data.description) for p in soa_test_case_regex
))
):
deselected.append(test)
else:
remaining.append(test)
if deselected:
config.hook.pytest_deselected(items=deselected)
if reporter:
reporter.report_collect()
items[:] = remaining
| 39.277477
| 120
| 0.647736
|
55008bea7669fd69649742455c1030199ef14341
| 4,712
|
py
|
Python
|
mask_word.py
|
ajitrajasekharan/bert_mask
|
33c7067134f2696b849fdb273443306026c5527d
|
[
"MIT"
] | 24
|
2019-12-21T15:08:29.000Z
|
2022-01-18T16:41:41.000Z
|
mask_word.py
|
ajitrajasekharan/bert_mask
|
33c7067134f2696b849fdb273443306026c5527d
|
[
"MIT"
] | 1
|
2021-09-06T08:57:35.000Z
|
2021-09-06T09:52:52.000Z
|
mask_word.py
|
ajitrajasekharan/bert_mask
|
33c7067134f2696b849fdb273443306026c5527d
|
[
"MIT"
] | 7
|
2019-12-17T17:10:59.000Z
|
2022-03-02T20:19:05.000Z
|
import torch
from transformers import *
import pdb
import operator
from collections import OrderedDict
import sys
import traceback
import argparse
import string
import logging
DEFAULT_MODEL_PATH='bert-large-cased'
DEFAULT_TO_LOWER=False
DEFAULT_TOP_K = 20
ACCRUE_THRESHOLD = 1
def init_model(model_path,to_lower):
logging.basicConfig(level=logging.INFO)
tokenizer = BertTokenizer.from_pretrained(model_path,do_lower_case=to_lower)
model = BertForMaskedLM.from_pretrained(model_path)
#tokenizer = RobertaTokenizer.from_pretrained(model_path,do_lower_case=to_lower)
#model = RobertaForMaskedLM.from_pretrained(model_path)
model.eval()
return model,tokenizer
def get_sent():
print("Enter sentence. Type q to quit")
sent = input()
if (sent == 'q'):
return sent
else:
#return '[CLS] ' + sent + '[SEP]'
return sent
def read_descs(file_name):
ret_dict = {}
with open(file_name) as fp:
line = fp.readline().rstrip("\n")
if (len(line) >= 1):
ret_dict[line] = 1
while line:
line = fp.readline().rstrip("\n")
if (len(line) >= 1):
ret_dict[line] = 1
return ret_dict
def get_mask_index(limit):
masked_index = 0
while (True):
try:
print("Enter mask index value in range 0 -",limit-1)
masked_index = int(input())
if (masked_index < limit and masked_index >= 0):
break
except:
print("Enter Numeric value:")
return masked_index
def perform_task(model,tokenizer,top_k,accrue_threshold,text):
text = '[CLS]' + text + '[SEP]'
tokenized_text = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Create the segments tensors.
segments_ids = [0] * len(tokenized_text)
masked_index = 0
for i in range(len(tokenized_text)):
if (tokenized_text[i] == "entity"):
masked_index = i
break
if (masked_index == 0):
dstr = ""
for i in range(len(tokenized_text)):
dstr += " " + str(i) + ":"+tokenized_text[i]
print(dstr)
masked_index = get_mask_index(len(tokenized_text))
tokenized_text[masked_index] = "[MASK]"
indexed_tokens[masked_index] = 103
print(tokenized_text)
print(masked_index)
results_dict = {}
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
predictions = model(tokens_tensor, segments_tensors)
for i in range(len(predictions[0][0,masked_index])):
if (float(predictions[0][0,masked_index][i].tolist()) > accrue_threshold):
tok = tokenizer.convert_ids_to_tokens([i])[0]
results_dict[tok] = float(predictions[0][0,masked_index][i].tolist())
k = 0
sorted_d = OrderedDict(sorted(results_dict.items(), key=lambda kv: kv[1], reverse=True))
for i in sorted_d:
if (i in string.punctuation or i.startswith('##') or len(i) == 1 or i.startswith('.') or i.startswith('[')):
continue
print(i,sorted_d[i])
k += 1
if (k > top_k):
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predicting neighbors to a word in sentence using BERTMaskedLM. Neighbors are from BERT vocab (which includes subwords and full words). Type in a sentence and then choose a position to mask or type in a sentence with the word entity in the location to apply a mask ',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-model', action="store", dest="model", default=DEFAULT_MODEL_PATH,help='BERT pretrained models, or custom model path')
parser.add_argument('-topk', action="store", dest="topk", default=DEFAULT_TOP_K,type=int,help='Number of neighbors to display')
parser.add_argument('-tolower', action="store", dest="tolower", default=DEFAULT_TO_LOWER,help='Convert tokens to lowercase. Set to True only for uncased models')
parser.add_argument('-threshold', action="store", dest="threshold", default=ACCRUE_THRESHOLD,type=float,help='threshold of results to pick')
results = parser.parse_args()
try:
model,tokenizer = init_model(results.model,results.tolower)
while (True):
text = get_sent()
if (text == "q"):
print("Quitting")
break
perform_task(model,tokenizer,results.topk,results.threshold,text)
except:
print("Unexpected error:", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
| 36.246154
| 371
| 0.65556
|
58499abba87c2f1a393c1dfd59e8bac14fe65ef1
| 228
|
py
|
Python
|
nautobot/core/__init__.py
|
jfach/nautobot
|
8965c30ffb0c1ea64f9710eada7fb49a6f2a4b62
|
[
"Apache-2.0"
] | 2
|
2021-11-01T10:17:02.000Z
|
2021-11-08T08:35:44.000Z
|
nautobot/core/__init__.py
|
jfach/nautobot
|
8965c30ffb0c1ea64f9710eada7fb49a6f2a4b62
|
[
"Apache-2.0"
] | null | null | null |
nautobot/core/__init__.py
|
jfach/nautobot
|
8965c30ffb0c1ea64f9710eada7fb49a6f2a4b62
|
[
"Apache-2.0"
] | 1
|
2021-02-24T23:02:28.000Z
|
2021-02-24T23:02:28.000Z
|
from nautobot.core import checks
# This will make sure the celery app is always imported when
# Django starts so that shared_task will use this app.
from nautobot.core.celery import app as celery_app
__all__ = ("celery_app",)
| 28.5
| 60
| 0.785088
|
5ebd0788482ce9fc7b50c59d3938e84fd63d42fc
| 210,341
|
py
|
Python
|
scipy/stats/tests/test_stats.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | 1
|
2020-02-26T12:15:51.000Z
|
2020-02-26T12:15:51.000Z
|
scipy/stats/tests/test_stats.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/stats/tests/test_stats.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | null | null | null |
""" Test functions for stats module
WRITTEN BY LOUIS LUANGKESORN <lluang@yahoo.com> FOR THE STATS MODULE
BASED ON WILKINSON'S STATISTICS QUIZ
https://www.stanford.edu/~clint/bench/wilk.txt
Additional tests by a host of SciPy developers.
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import warnings
from collections import namedtuple
import multiprocessing
from numpy.testing import (dec, assert_, assert_equal,
assert_almost_equal, assert_array_almost_equal,
assert_array_equal, assert_approx_equal,
assert_allclose, assert_warns, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy.ma.testutils as mat
from numpy import array, arange, float32, float64, power
import numpy as np
import scipy.stats as stats
import scipy.stats.mstats as mstats
import scipy.stats.mstats_basic as mstats_basic
from .common_tests import check_named_results
from scipy.special import kv
from scipy.sparse.sputils import matrix
from scipy.integrate import quad
""" Numbers in docstrings beginning with 'W' refer to the section numbers
and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are
considered to be essential functionality. True testing and
evaluation of a statistics package requires use of the
NIST Statistical test data. See McCoullough(1999) Assessing The Reliability
of Statistical Software for a test methodology and its
implementation in testing SAS, SPSS, and S-Plus
"""
# Datasets
# These data sets are from the nasty.dat sets used by Wilkinson
# For completeness, I should write the relevant tests and count them as failures
# Somewhat acceptable, since this is still beta software. It would count as a
# good target for 1.0 status
X = array([1,2,3,4,5,6,7,8,9], float)
ZERO = array([0,0,0,0,0,0,0,0,0], float)
BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,
99999998,99999999], float)
LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,
0.99999997,0.99999998,0.99999999], float)
HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float)
TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float)
ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float)
class TestTrimmedStats(object):
# TODO: write these tests to handle missing values properly
dprec = np.finfo(np.float64).precision
def test_tmean(self):
y = stats.tmean(X, (2, 8), (True, True))
assert_approx_equal(y, 5.0, significant=self.dprec)
y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False))
y2 = stats.tmean(X, limits=None)
assert_approx_equal(y1, y2, significant=self.dprec)
def test_tvar(self):
y = stats.tvar(X, limits=(2, 8), inclusive=(True, True))
assert_approx_equal(y, 4.6666666666666661, significant=self.dprec)
y = stats.tvar(X, limits=None)
assert_approx_equal(y, X.var(ddof=1), significant=self.dprec)
x_2d = arange(63, dtype=float64).reshape((9, 7))
y = stats.tvar(x_2d, axis=None)
assert_approx_equal(y, x_2d.var(ddof=1), significant=self.dprec)
y = stats.tvar(x_2d, axis=0)
assert_array_almost_equal(y[0], np.full((1, 7), 367.50000000), decimal=8)
y = stats.tvar(x_2d, axis=1)
assert_array_almost_equal(y[0], np.full((1, 9), 4.66666667), decimal=8)
y = stats.tvar(x_2d[3, :])
assert_approx_equal(y, 4.666666666666667, significant=self.dprec)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "Degrees of freedom <= 0 for slice.")
# Limiting some values along one axis
y = stats.tvar(x_2d, limits=(1, 5), axis=1, inclusive=(True, True))
assert_approx_equal(y[0], 2.5, significant=self.dprec)
# Limiting all values along one axis
y = stats.tvar(x_2d, limits=(0, 6), axis=1, inclusive=(True, True))
assert_approx_equal(y[0], 4.666666666666667, significant=self.dprec)
assert_equal(y[1], np.nan)
def test_tstd(self):
y = stats.tstd(X, (2, 8), (True, True))
assert_approx_equal(y, 2.1602468994692865, significant=self.dprec)
y = stats.tstd(X, limits=None)
assert_approx_equal(y, X.std(ddof=1), significant=self.dprec)
def test_tmin(self):
assert_equal(stats.tmin(4), 4)
x = np.arange(10)
assert_equal(stats.tmin(x), 0)
assert_equal(stats.tmin(x, lowerlimit=0), 0)
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1)
x = x.reshape((5, 2))
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1])
assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8])
assert_equal(stats.tmin(x, axis=None), 0)
x = np.arange(10.)
x[9] = np.nan
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmin(x), np.nan)
assert_equal(stats.tmin(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.tmin, x, nan_policy='raise')
assert_raises(ValueError, stats.tmin, x, nan_policy='foobar')
msg = "'propagate', 'raise', 'omit'"
with assert_raises(ValueError, match=msg):
stats.tmin(x, nan_policy='foo')
def test_tmax(self):
assert_equal(stats.tmax(4), 4)
x = np.arange(10)
assert_equal(stats.tmax(x), 9)
assert_equal(stats.tmax(x, upperlimit=9), 9)
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8)
x = x.reshape((5, 2))
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7])
assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9])
assert_equal(stats.tmax(x, axis=None), 9)
x = np.arange(10.)
x[6] = np.nan
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmax(x), np.nan)
assert_equal(stats.tmax(x, nan_policy='omit'), 9.)
assert_raises(ValueError, stats.tmax, x, nan_policy='raise')
assert_raises(ValueError, stats.tmax, x, nan_policy='foobar')
def test_tsem(self):
y = stats.tsem(X, limits=(3, 8), inclusive=(False, True))
y_ref = np.array([4, 5, 6, 7, 8])
assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size),
significant=self.dprec)
assert_approx_equal(stats.tsem(X, limits=[-1, 10]),
stats.tsem(X, limits=None),
significant=self.dprec)
class TestCorrPearsonr(object):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN correlations, if
your program has them.
"""
def test_pXX(self):
y = stats.pearsonr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXBIG(self):
y = stats.pearsonr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXLITTLE(self):
y = stats.pearsonr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXHUGE(self):
y = stats.pearsonr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXTINY(self):
y = stats.pearsonr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXROUND(self):
y = stats.pearsonr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGBIG(self):
y = stats.pearsonr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGLITTLE(self):
y = stats.pearsonr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGHUGE(self):
y = stats.pearsonr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGTINY(self):
y = stats.pearsonr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGROUND(self):
y = stats.pearsonr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLELITTLE(self):
y = stats.pearsonr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEHUGE(self):
y = stats.pearsonr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLETINY(self):
y = stats.pearsonr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEROUND(self):
y = stats.pearsonr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEHUGE(self):
y = stats.pearsonr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGETINY(self):
y = stats.pearsonr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEROUND(self):
y = stats.pearsonr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYTINY(self):
y = stats.pearsonr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYROUND(self):
y = stats.pearsonr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pROUNDROUND(self):
y = stats.pearsonr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_r_almost_exactly_pos1(self):
a = arange(3.0)
r, prob = stats.pearsonr(a, a)
assert_allclose(r, 1.0, atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
def test_r_almost_exactly_neg1(self):
a = arange(3.0)
r, prob = stats.pearsonr(a, -a)
assert_allclose(r, -1.0, atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
def test_basic(self):
# A basic test, with a correlation coefficient
# that is not 1 or -1.
a = array([-1, 0, 1])
b = array([0, 0, 3])
r, prob = stats.pearsonr(a, b)
assert_approx_equal(r, np.sqrt(3)/2)
assert_approx_equal(prob, 1/3)
def test_constant_input(self):
# Zero variance input
# See https://github.com/scipy/scipy/issues/3728
with assert_warns(stats.PearsonRConstantInputWarning):
r, p = stats.pearsonr([0.667, 0.667, 0.667], [0.123, 0.456, 0.789])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_near_constant_input(self):
# Near constant input (but not constant):
x = [2, 2, 2 + np.spacing(2)]
y = [3, 3, 3 + 6*np.spacing(3)]
with assert_warns(stats.PearsonRNearConstantInputWarning):
# r and p are garbage, so don't bother checking them in this case.
# (The exact value of r would be 1.)
r, p = stats.pearsonr(x, y)
def test_very_small_input_values(self):
# Very small values in an input. A naive implementation will
# suffer from underflow.
# See https://github.com/scipy/scipy/issues/9353
x = [0.004434375, 0.004756007, 0.003911996, 0.0038005, 0.003409971]
y = [2.48e-188, 7.41e-181, 4.09e-208, 2.08e-223, 2.66e-245]
r, p = stats.pearsonr(x,y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.7272930540750450)
assert_allclose(p, 0.1637805429533202)
def test_very_large_input_values(self):
# Very large values in an input. A naive implementation will
# suffer from overflow.
# See https://github.com/scipy/scipy/issues/8980
x = 1e90*np.array([0, 0, 0, 1, 1, 1, 1])
y = 1e90*np.arange(7)
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.8660254037844386)
assert_allclose(p, 0.011724811003954638)
def test_extremely_large_input_values(self):
# Extremely large values in x and y. These values would cause the
# product sigma_x * sigma_y to overflow if the two factors were
# computed independently.
x = np.array([2.3e200, 4.5e200, 6.7e200, 8e200])
y = np.array([1.2e199, 5.5e200, 3.3e201, 1.0e200])
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.351312332103289)
assert_allclose(p, 0.648687667896711)
def test_length_two_pos1(self):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
r, p = stats.pearsonr([1, 2], [3, 5])
assert_equal(r, 1)
assert_equal(p, 1)
def test_length_two_neg2(self):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
r, p = stats.pearsonr([2, 1], [3, 5])
assert_equal(r, -1)
assert_equal(p, 1)
def test_more_basic_examples(self):
x = [1, 2, 3, 4]
y = [0, 1, 0.5, 1]
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
assert_allclose(r, 0.674199862463242)
assert_allclose(p, 0.325800137536758)
x = [1, 2, 3]
y = [5, -4, -13]
r, p = stats.pearsonr(x, y)
# The expected r and p are exact.
assert_allclose(r, -1.0)
assert_allclose(p, 0.0, atol=1e-7)
def test_unequal_lengths(self):
x = [1, 2, 3]
y = [4, 5]
assert_raises(ValueError, stats.pearsonr, x, y)
def test_len1(self):
x = [1]
y = [2]
assert_raises(ValueError, stats.pearsonr, x, y)
class TestFisherExact(object):
"""Some tests to show that fisher_exact() works correctly.
Note that in SciPy 0.9.0 this was not working well for large numbers due to
inaccuracy of the hypergeom distribution (see #1218). Fixed now.
Also note that R and SciPy have different argument formats for their
hypergeometric distribution functions.
R:
> phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
[1] 1.701815e-09
"""
def test_basic(self):
fisher_exact = stats.fisher_exact
res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
assert_approx_equal(res, 0.01106, significant=4)
res = fisher_exact([[100, 2], [1000, 5]])[1]
assert_approx_equal(res, 0.1301, significant=4)
res = fisher_exact([[2, 7], [8, 2]])[1]
assert_approx_equal(res, 0.0230141, significant=6)
res = fisher_exact([[5, 1], [10, 10]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 15], [20, 20]])[1]
assert_approx_equal(res, 0.0958044, significant=6)
res = fisher_exact([[5, 16], [20, 25]])[1]
assert_approx_equal(res, 0.1725862, significant=6)
res = fisher_exact([[10, 5], [10, 1]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 0], [1, 4]])[1]
assert_approx_equal(res, 0.04761904, significant=6)
res = fisher_exact([[0, 1], [3, 2]])[1]
assert_approx_equal(res, 1.0)
res = fisher_exact([[0, 2], [6, 4]])[1]
assert_approx_equal(res, 0.4545454545)
res = fisher_exact([[2, 7], [8, 2]])
assert_approx_equal(res[1], 0.0230141, significant=6)
assert_approx_equal(res[0], 4.0 / 56)
def test_precise(self):
# results from R
#
# R defines oddsratio differently (see Notes section of fisher_exact
# docstring), so those will not match. We leave them in anyway, in
# case they will be useful later on. We test only the p-value.
tablist = [
([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
]
for table, res_r in tablist:
res = stats.fisher_exact(np.asarray(table))
np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
verbose=True)
@pytest.mark.slow
def test_large_numbers(self):
# Test with some large numbers. Regression test for #1401
pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R
for pval, num in zip(pvals, [75, 76, 77]):
res = stats.fisher_exact([[17704, 496], [1065, num]])[1]
assert_approx_equal(res, pval, significant=4)
res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 0.2751, significant=4)
def test_raises(self):
# test we raise an error for wrong shape of input.
assert_raises(ValueError, stats.fisher_exact,
np.arange(6).reshape(2, 3))
def test_row_or_col_zero(self):
tables = ([[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]])
for table in tables:
oddsratio, pval = stats.fisher_exact(table)
assert_equal(pval, 1.0)
assert_equal(oddsratio, np.nan)
def test_less_greater(self):
tables = (
# Some tables to compare with R:
[[2, 7], [8, 2]],
[[200, 7], [8, 300]],
[[28, 21], [6, 1957]],
[[190, 800], [200, 900]],
# Some tables with simple exact values
# (includes regression test for ticket #1568):
[[0, 2], [3, 0]],
[[1, 1], [2, 1]],
[[2, 0], [1, 2]],
[[0, 1], [2, 3]],
[[1, 0], [1, 4]],
)
pvals = (
# from R:
[0.018521725952066501, 0.9990149169715733],
[1.0, 2.0056578803889148e-122],
[1.0, 5.7284374608319831e-44],
[0.7416227, 0.2959826],
# Exact:
[0.1, 1.0],
[0.7, 0.9],
[1.0, 0.3],
[2./3, 1.0],
[1.0, 1./3],
)
for table, pval in zip(tables, pvals):
res = []
res.append(stats.fisher_exact(table, alternative="less")[1])
res.append(stats.fisher_exact(table, alternative="greater")[1])
assert_allclose(res, pval, atol=0, rtol=1e-7)
def test_gh3014(self):
# check if issue #3014 has been fixed.
# before, this would have risen a ValueError
odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])
class TestCorrSpearmanr(object):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN corelations, if
your program has them.
"""
def test_scalar(self):
y = stats.spearmanr(4., 2.)
assert_(np.isnan(y).all())
def test_uneven_lengths(self):
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9])
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8)
def test_uneven_2d_shapes(self):
# Different number of columns should work - those just get concatenated.
np.random.seed(232324)
x = np.random.randn(4, 3)
y = np.random.randn(4, 2)
assert stats.spearmanr(x, y).correlation.shape == (5, 5)
assert stats.spearmanr(x.T, y.T, axis=1).pvalue.shape == (5, 5)
assert_raises(ValueError, stats.spearmanr, x, y, axis=1)
assert_raises(ValueError, stats.spearmanr, x.T, y.T)
def test_ndim_too_high(self):
np.random.seed(232324)
x = np.random.randn(4, 3, 2)
assert_raises(ValueError, stats.spearmanr, x)
assert_raises(ValueError, stats.spearmanr, x, x)
assert_raises(ValueError, stats.spearmanr, x, None, None)
# But should work with axis=None (raveling axes) for two input arrays
assert_allclose(stats.spearmanr(x, x, axis=None),
stats.spearmanr(x.flatten(), x.flatten(), axis=0))
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0.0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
def test_sXX(self):
y = stats.spearmanr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXBIG(self):
y = stats.spearmanr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXLITTLE(self):
y = stats.spearmanr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXHUGE(self):
y = stats.spearmanr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXTINY(self):
y = stats.spearmanr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXROUND(self):
y = stats.spearmanr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGBIG(self):
y = stats.spearmanr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGLITTLE(self):
y = stats.spearmanr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGHUGE(self):
y = stats.spearmanr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGTINY(self):
y = stats.spearmanr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGROUND(self):
y = stats.spearmanr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLELITTLE(self):
y = stats.spearmanr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEHUGE(self):
y = stats.spearmanr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLETINY(self):
y = stats.spearmanr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEROUND(self):
y = stats.spearmanr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEHUGE(self):
y = stats.spearmanr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGETINY(self):
y = stats.spearmanr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEROUND(self):
y = stats.spearmanr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYTINY(self):
y = stats.spearmanr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYROUND(self):
y = stats.spearmanr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sROUNDROUND(self):
y = stats.spearmanr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_spearmanr_result_attributes(self):
res = stats.spearmanr(X, X)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes)
def test_1d_vs_2d(self):
x1 = [1, 2, 3, 4, 5, 6]
x2 = [1, 2, 3, 4, 6, 5]
res1 = stats.spearmanr(x1, x2)
res2 = stats.spearmanr(np.asarray([x1, x2]).T)
assert_allclose(res1, res2)
def test_1d_vs_2d_nans(self):
# Now the same with NaNs present. Regression test for gh-9103.
for nan_policy in ['propagate', 'omit']:
x1 = [1, np.nan, 3, 4, 5, 6]
x2 = [1, 2, 3, 4, 6, np.nan]
res1 = stats.spearmanr(x1, x2, nan_policy=nan_policy)
res2 = stats.spearmanr(np.asarray([x1, x2]).T, nan_policy=nan_policy)
assert_allclose(res1, res2)
def test_3cols(self):
x1 = np.arange(6)
x2 = -x1
x3 = np.array([0, 1, 2, 3, 5, 4])
x = np.asarray([x1, x2, x3]).T
actual = stats.spearmanr(x)
expected_corr = np.array([[1, -1, 0.94285714],
[-1, 1, -0.94285714],
[0.94285714, -0.94285714, 1]])
expected_pvalue = np.zeros((3, 3), dtype=float)
expected_pvalue[2, 0:2] = 0.00480466472
expected_pvalue[0:2, 2] = 0.00480466472
assert_allclose(actual.correlation, expected_corr)
assert_allclose(actual.pvalue, expected_pvalue)
def test_gh_9103(self):
# Regression test for gh-9103.
x = np.array([[np.nan, 3.0, 4.0, 5.0, 5.1, 6.0, 9.2],
[5.0, np.nan, 4.1, 4.8, 4.9, 5.0, 4.1],
[0.5, 4.0, 7.1, 3.8, 8.0, 5.1, 7.6]]).T
corr = np.array([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 1.]])
assert_allclose(stats.spearmanr(x, nan_policy='propagate').correlation,
corr)
res = stats.spearmanr(x, nan_policy='omit').correlation
assert_allclose((res[0][1], res[0][2], res[1][2]),
(0.2051957, 0.4857143, -0.4707919), rtol=1e-6)
def test_gh_8111(self):
# Regression test for gh-8111 (different result for float/int/bool).
n = 100
np.random.seed(234568)
x = np.random.rand(n)
m = np.random.rand(n) > 0.7
# bool against float, no nans
a = (x > .5)
b = np.array(x)
res1 = stats.spearmanr(a, b, nan_policy='omit').correlation
# bool against float with NaNs
b[m] = np.nan
res2 = stats.spearmanr(a, b, nan_policy='omit').correlation
# int against float with NaNs
a = a.astype(np.int32)
res3 = stats.spearmanr(a, b, nan_policy='omit').correlation
expected = [0.865895477, 0.866100381, 0.866100381]
assert_allclose([res1, res2, res3], expected)
class TestCorrSpearmanr2(object):
"""Some further tests of the spearmanr function."""
def test_spearmanr_vs_r(self):
# Cross-check with R:
# cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr")
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
expected = (0.82078268166812329, 0.088587005313543798)
res = stats.spearmanr(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
def test_empty_arrays(self):
assert_equal(stats.spearmanr([], []), (np.nan, np.nan))
def test_normal_draws(self):
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.28659685838743354, 6.579862219051161e-11)
res = stats.spearmanr(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
def test_corr_1(self):
assert_approx_equal(stats.spearmanr([1, 1, 2], [1, 1, 2])[0], 1.0)
def test_nan_policies(self):
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_allclose(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
def test_unequal_lengths(self):
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.spearmanr, x, y)
def test_omit_paired_value(self):
x1 = [1, 2, 3, 4]
x2 = [8, 7, 6, np.nan]
res1 = stats.spearmanr(x1, x2, nan_policy='omit')
res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit')
assert_equal(res1, res2)
def test_gh_issue_6061_windows_overflow(self):
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
# rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
# = 1 - (1 / 500)
# = 0.998
x.append(np.nan)
y.append(3.0)
assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998)
def test_tie0(self):
# with only ties in one or both inputs
with assert_warns(stats.SpearmanRConstantInputWarning):
r, p = stats.spearmanr([2, 2, 2], [2, 2, 2])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr([2, 0, 2], [2, 2, 2])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr([2, 2, 2], [2, 0, 2])
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_tie1(self):
# Data
x = [1.0, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 2.0, 3.0]
# Ranks of the data, with tie-handling.
xr = [1.0, 2.0, 3.0, 4.0]
yr = [1.0, 2.5, 2.5, 4.0]
# Result of spearmanr should be the same as applying
# pearsonr to the ranks.
sr = stats.spearmanr(x, y)
pr = stats.pearsonr(xr, yr)
assert_almost_equal(sr, pr)
def test_tie2(self):
# Test tie-handling if inputs contain nan's
# Data without nan's
x1 = [1, 2, 2.5, 2]
y1 = [1, 3, 2.5, 4]
# Same data with nan's
x2 = [1, 2, 2.5, 2, np.nan]
y2 = [1, 3, 2.5, 4, np.nan]
# Results for two data sets should be the same if nan's are ignored
sr1 = stats.spearmanr(x1, y1)
sr2 = stats.spearmanr(x2, y2, nan_policy='omit')
assert_almost_equal(sr1, sr2)
def test_ties_axis_1(self):
z1 = np.array([[1, 1, 1, 1], [1, 2, 3, 4]])
z2 = np.array([[1, 2, 3, 4], [1, 1, 1, 1]])
z3 = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])
with assert_warns(stats.SpearmanRConstantInputWarning):
r, p = stats.spearmanr(z1, axis=1)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr(z2, axis=1)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
r, p = stats.spearmanr(z3, axis=1)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_gh_11111(self):
x = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587,
0.0007535430349118562, 0.0002661781514710257, 0, 0,
0.0007835762419683435])
with assert_warns(stats.SpearmanRConstantInputWarning):
r, p = stats.spearmanr(x, y)
assert_equal(r, np.nan)
assert_equal(p, np.nan)
def test_index_error(self):
x = np.array([1.0, 7.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587,
0.0007535430349118562, 0.0002661781514710257, 0, 0,
0.0007835762419683435])
assert_raises(ValueError, stats.spearmanr, x, y, axis=2)
# W.II.E. Tabulate X against X, using BIG as a case weight. The values
# should appear on the diagonal and the total should be 899999955.
# If the table cannot hold these values, forget about working with
# census data. You can also tabulate HUGE against TINY. There is no
# reason a tabulation program should not be able to distinguish
# different values regardless of their magnitude.
# I need to figure out how to do this one.
def test_kendalltau():
# case without ties, con-dis equal zero
x = [5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 6, 3, 1, 8, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.0, 1.0)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# case without ties, con-dis equal zero
x = [0, 5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 0, 6, 3, 1, 8, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.0, 1.0)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# case without ties, con-dis close to zero
x = [5, 2, 1, 3, 6, 4, 7]
y = [5, 2, 6, 3, 1, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.14285714286, 0.77261904762)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# case without ties, con-dis close to zero
x = [2, 1, 3, 6, 4, 7, 8]
y = [2, 6, 3, 1, 8, 7, 4]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.047619047619, 1.0)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# simple case without ties
x = np.arange(10)
y = np.arange(10)
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (1.0, 5.511463844797e-07)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.9555555555555556, 5.511463844797e-06)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (0.9111111111111111, 2.976190476190e-05)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# same in opposite direction
x = np.arange(10)
y = np.arange(10)[::-1]
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-1.0, 5.511463844797e-07)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.9555555555555556, 5.511463844797e-06)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = (-0.9111111111111111, 2.976190476190e-05)
res = stats.kendalltau(x, y)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# check exception in case of ties
y[2] = y[1]
assert_raises(ValueError, stats.kendalltau, x, y, method='exact')
# check exception in case of invalid method keyword
assert_raises(ValueError, stats.kendalltau, x, y, method='banana')
# with some ties
# Cross-check with R:
# cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE)
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
expected = (-0.47140452079103173, 0.28274545993277478)
res = stats.kendalltau(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
res = stats.kendalltau(x1, x2)
check_named_results(res, attributes)
# with only ties in one or both inputs
assert_equal(stats.kendalltau([2,2,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,0,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,2,2], [2,0,2]), (np.nan, np.nan))
# empty arrays provided as input
assert_equal(stats.kendalltau([], []), (np.nan, np.nan))
# check with larger arrays
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.19291382765531062, 1.1337095377742629e-10)
res = stats.kendalltau(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# and do we get a tau of 1 for identical inputs?
assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0)
# test nan_policy
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan))
assert_allclose(stats.kendalltau(x, x, nan_policy='omit'),
(1.0, 5.5114638e-6), rtol=1e-06)
assert_allclose(stats.kendalltau(x, x, nan_policy='omit', method='asymptotic'),
(1.0, 0.00017455009626808976), rtol=1e-06)
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar')
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.kendalltau, x, y)
# test all ties
tau, p_value = stats.kendalltau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.kendalltau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Regression test for GitHub issue #6061 - Overflow on Windows
x = np.arange(2000, dtype=float)
x = np.ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(stats.kendalltau(x,y)[1]))
def test_kendalltau_vs_mstats_basic():
np.random.seed(42)
for s in range(2,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
expected = mstats_basic.kendalltau(a, b)
actual = stats.kendalltau(a, b)
assert_approx_equal(actual[0], expected[0])
assert_approx_equal(actual[1], expected[1])
def test_kendalltau_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [1., 2., 3., 4.]
y = [np.nan, 2.4, 3.4, 3.4]
r1 = stats.kendalltau(x, y, nan_policy='omit')
r2 = stats.kendalltau(x[1:], y[1:])
assert_allclose(r1.correlation, r2.correlation, atol=1e-15)
def test_weightedtau():
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, 0]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, additive=False)
assert_approx_equal(tau, -0.62205716951801038)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# Asymmetric, ranked version
tau, p_value = stats.weightedtau(x, y, rank=None)
assert_approx_equal(tau, -0.4157652301037516)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None)
assert_approx_equal(tau, -0.7181341329699029)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=None, additive=False)
assert_approx_equal(tau, -0.40644850966246893)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None, additive=False)
assert_approx_equal(tau, -0.83766582937355172)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=False)
assert_approx_equal(tau, -0.51604397940261848)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# Test argument conversion
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64))
assert_approx_equal(tau, -0.56694968153682723)
# All ties
tau, p_value = stats.weightedtau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Size mismatches
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2])
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0])
# NaNs
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, np.nan]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
x = [12, 2, np.nan, 12, 2]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
def test_kendall_tau_large():
n = 172.
x = np.arange(n)
y = np.arange(n)
_, pval = stats.kendalltau(x, y, method='exact')
assert_equal(pval, 0.0)
y[-1], y[-2] = y[-2], y[-1]
_, pval = stats.kendalltau(x, y, method='exact')
assert_equal(pval, 0.0)
y[-3], y[-4] = y[-4], y[-3]
_, pval = stats.kendalltau(x, y, method='exact')
assert_equal(pval, 0.0)
def test_weightedtau_vs_quadratic():
# Trivial quadratic implementation, all parameters mandatory
def wkq(x, y, rank, weigher, add):
tot = conc = disc = u = v = 0
for i in range(len(x)):
for j in range(len(x)):
w = weigher(rank[i]) + weigher(rank[j]) if add else weigher(rank[i]) * weigher(rank[j])
tot += w
if x[i] == x[j]:
u += w
if y[i] == y[j]:
v += w
if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]:
conc += w
elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]:
disc += w
return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v)
np.random.seed(42)
for s in range(3,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
# First pass: use element indices as ranks
rank = np.arange(len(a), dtype=np.intp)
for _ in range(2):
for add in [True, False]:
expected = wkq(a, b, rank, lambda x: 1./(x+1), add)
actual = stats.weightedtau(a, b, rank, lambda x: 1./(x+1), add).correlation
assert_approx_equal(expected, actual)
# Second pass: use a random rank
np.random.shuffle(rank)
class TestFindRepeats(object):
def test_basic(self):
a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5]
res, nums = stats.find_repeats(a)
assert_array_equal(res, [1, 2, 3, 4])
assert_array_equal(nums, [3, 3, 2, 2])
def test_empty_result(self):
# Check that empty arrays are returned when there are no repeats.
for a in [[10, 20, 50, 30, 40], []]:
repeated, counts = stats.find_repeats(a)
assert_array_equal(repeated, [])
assert_array_equal(counts, [])
class TestRegression(object):
def test_linregressBIGX(self):
# W.II.F. Regress BIG on X.
# The constant should be 99999990 and the regression coefficient should be 1.
y = stats.linregress(X,BIG)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,99999990)
assert_almost_equal(r,1.0)
def test_regressXX(self):
# W.IV.B. Regress X on X.
# The constant should be exactly 0 and the regression coefficient should be 1.
# This is a perfectly valid regression. The program should not complain.
y = stats.linregress(X,X)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,1.0)
# W.IV.C. Regress X on BIG and LITTLE (two predictors). The program
# should tell you that this model is "singular" because BIG and
# LITTLE are linear combinations of each other. Cryptic error
# messages are unacceptable here. Singularity is the most
# fundamental regression error.
# Need to figure out how to handle multiple linear regression. Not obvious
def test_regressZEROX(self):
# W.IV.D. Regress ZERO on X.
# The program should inform you that ZERO has no variance or it should
# go ahead and compute the regression and report a correlation and
# total sum of squares of exactly 0.
y = stats.linregress(X,ZERO)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,0.0)
def test_regress_simple(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_rows(self):
# Regress a line w sinusoidal noise, with a single input of shape (2, N).
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
rows = np.vstack((x, y))
res = stats.linregress(rows)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_cols(self):
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))
res = stats.linregress(cols)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_shape_error(self):
# Check that a single input argument to linregress with wrong shape
# results in a ValueError.
assert_raises(ValueError, stats.linregress, np.ones((3, 3)))
def test_linregress(self):
# compared with multivariate ols with pinv
x = np.arange(11)
y = np.arange(5,16)
y[[(1),(-2)]] -= 1
y[[(0),(-1)]] += 1
res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733)
assert_array_almost_equal(stats.linregress(x,y),res,decimal=14)
def test_regress_simple_negative_cor(self):
# If the slope of the regression is negative the factor R tend to -1 not 1.
# Sometimes rounding errors makes it < -1 leading to stderr being NaN
a, n = 1e-71, 100000
x = np.linspace(a, 2 * a, n)
y = np.linspace(2 * a, a, n)
stats.linregress(x, y)
res = stats.linregress(x, y)
assert_(res[2] >= -1) # propagated numerical errors were not corrected
assert_almost_equal(res[2], -1) # perfect negative correlation case
assert_(not np.isnan(res[4])) # stderr should stay finite
def test_linregress_result_attributes(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(res, attributes)
def test_regress_two_inputs(self):
# Regress a simple line formed by two points.
x = np.arange(2)
y = np.arange(3, 5)
res = stats.linregress(x, y)
assert_almost_equal(res[3], 0.0) # non-horizontal line
assert_almost_equal(res[4], 0.0) # zero stderr
def test_regress_two_inputs_horizontal_line(self):
# Regress a horizontal line formed by two points.
x = np.arange(2)
y = np.ones(2)
res = stats.linregress(x, y)
assert_almost_equal(res[3], 1.0) # horizontal line
assert_almost_equal(res[4], 0.0) # zero stderr
def test_nist_norris(self):
x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0,
558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1,
995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0,
11.1, 118.3, 229.2, 669.1, 448.9, 0.5]
y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9,
559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3,
998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9,
10.2, 117.6, 228.9, 668.4, 449.2, 0.2]
# Expected values
exp_slope = 1.00211681802045
exp_intercept = -0.262323073774029
exp_rsquared = 0.999993745883712
actual = stats.linregress(x, y)
assert_almost_equal(actual.slope, exp_slope)
assert_almost_equal(actual.intercept, exp_intercept)
assert_almost_equal(actual.rvalue**2, exp_rsquared)
def test_empty_input(self):
assert_raises(ValueError, stats.linregress, [], [])
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.linregress(x, x),
(np.nan, np.nan, np.nan, np.nan, np.nan))
def test_theilslopes():
# Basic slope test.
slope, intercept, lower, upper = stats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test of confidence intervals.
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_cumfreq():
x = [1, 4, 2, 1, 3, 1]
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.]))
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4,
defaultreallimits=(1.5, 5))
assert_(extrapoints == 3)
# test for namedtuple attribute results
attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
check_named_results(res, attributes)
def test_relfreq():
a = np.array([1, 4, 2, 1, 3, 1])
relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
assert_array_almost_equal(relfreqs,
array([0.5, 0.16666667, 0.16666667, 0.16666667]))
# test for namedtuple attribute results
attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.relfreq(a, numbins=4)
check_named_results(res, attributes)
# check array_like input is accepted
relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1],
numbins=4)
assert_array_almost_equal(relfreqs, relfreqs2)
class TestScoreatpercentile(object):
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_basic(self):
x = arange(8) * 0.5
assert_equal(stats.scoreatpercentile(x, 0), 0.)
assert_equal(stats.scoreatpercentile(x, 100), 3.5)
assert_equal(stats.scoreatpercentile(x, 50), 1.75)
def test_fraction(self):
scoreatperc = stats.scoreatpercentile
# Test defaults
assert_equal(scoreatperc(list(range(10)), 50), 4.5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)
# explicitly specify interpolation_method 'fraction' (the default)
assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),
interpolation_method='fraction'),
55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),
interpolation_method='fraction'),
5.5)
def test_lower_higher(self):
scoreatperc = stats.scoreatpercentile
# interpolation_method 'lower'/'higher'
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),
interpolation_method='lower'), 10)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),
interpolation_method='higher'), 100)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),
interpolation_method='lower'), 1)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),
interpolation_method='higher'), 10)
def test_sequence_per(self):
x = arange(8) * 0.5
expected = np.array([0, 3.5, 1.75])
res = stats.scoreatpercentile(x, [0, 100, 50])
assert_allclose(res, expected)
assert_(isinstance(res, np.ndarray))
# Test with ndarray. Regression test for gh-2861
assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),
expected)
# Also test combination of 2-D array, axis not None and array-like per
res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),
np.array([0, 1, 100, 100]), axis=1)
expected2 = array([[0, 4, 8],
[0.03, 4.03, 8.03],
[3, 7, 11],
[3, 7, 11]])
assert_allclose(res2, expected2)
def test_axis(self):
scoreatperc = stats.scoreatpercentile
x = arange(12).reshape(3, 4)
assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)
x = array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
score = stats.scoreatpercentile(x, 50)
assert_equal(score.shape, ())
assert_equal(score, 1.0)
score = stats.scoreatpercentile(x, 50, axis=0)
assert_equal(score.shape, (3,))
assert_equal(score, [1, 1, 1])
def test_exception(self):
assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,
interpolation_method='foobar')
assert_raises(ValueError, stats.scoreatpercentile, [1], 101)
assert_raises(ValueError, stats.scoreatpercentile, [1], -1)
def test_empty(self):
assert_equal(stats.scoreatpercentile([], 50), np.nan)
assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)
assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])
class TestItemfreq(object):
a = [5, 7, 1, 2, 1, 5, 7] * 10
b = [1, 2, 5, 7]
def test_numeric_types(self):
# Check itemfreq works for all dtypes (adapted from np.unique tests)
def _check_itemfreq(dt):
a = np.array(self.a, dt)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(a)
assert_array_equal(v[:, 0], [1, 2, 5, 7])
assert_array_equal(v[:, 1], np.array([20, 10, 20, 20], dtype=dt))
dtypes = [np.int32, np.int64, np.float32, np.float64,
np.complex64, np.complex128]
for dt in dtypes:
_check_itemfreq(dt)
def test_object_arrays(self):
a, b = self.a, self.b
dt = 'O'
aa = np.empty(len(a), dt)
aa[:] = a
bb = np.empty(len(b), dt)
bb[:] = b
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(aa)
assert_array_equal(v[:, 0], bb)
def test_structured_arrays(self):
a, b = self.a, self.b
dt = [('', 'i'), ('', 'i')]
aa = np.array(list(zip(a, a)), dt)
bb = np.array(list(zip(b, b)), dt)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(aa)
# Arrays don't compare equal because v[:,0] is object array
assert_equal(tuple(v[2, 0]), tuple(bb[2]))
class TestMode(object):
def test_empty(self):
vals, counts = stats.mode([])
assert_equal(vals, np.array([]))
assert_equal(counts, np.array([]))
def test_scalar(self):
vals, counts = stats.mode(4.)
assert_equal(vals, np.array([4.]))
assert_equal(counts, np.array([1]))
def test_basic(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
vals = stats.mode(data1)
assert_equal(vals[0][0], 6)
assert_equal(vals[1][0], 3)
def test_axes(self):
data1 = [10, 10, 30, 40]
data2 = [10, 10, 10, 10]
data3 = [20, 10, 20, 20]
data4 = [30, 30, 30, 30]
data5 = [40, 30, 30, 30]
arr = np.array([data1, data2, data3, data4, data5])
vals = stats.mode(arr, axis=None)
assert_equal(vals[0], np.array([30]))
assert_equal(vals[1], np.array([8]))
vals = stats.mode(arr, axis=0)
assert_equal(vals[0], np.array([[10, 10, 30, 30]]))
assert_equal(vals[1], np.array([[2, 3, 3, 2]]))
vals = stats.mode(arr, axis=1)
assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]]))
assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]]))
def test_strings(self):
data1 = ['rain', 'showers', 'showers']
vals = stats.mode(data1)
assert_equal(vals[0][0], 'showers')
assert_equal(vals[1][0], 2)
def test_mixed_objects(self):
objects = [10, True, np.nan, 'hello', 10]
arr = np.empty((5,), dtype=object)
arr[:] = objects
vals = stats.mode(arr)
assert_equal(vals[0][0], 10)
assert_equal(vals[1][0], 2)
def test_objects(self):
# Python objects must be sortable (le + eq) and have ne defined
# for np.unique to work. hash is for set.
class Point(object):
def __init__(self, x):
self.x = x
def __eq__(self, other):
return self.x == other.x
def __ne__(self, other):
return self.x != other.x
def __lt__(self, other):
return self.x < other.x
def __hash__(self):
return hash(self.x)
points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]]
arr = np.empty((8,), dtype=object)
arr[:] = points
assert_(len(set(points)) == 4)
assert_equal(np.unique(arr).shape, (4,))
vals = stats.mode(arr)
assert_equal(vals[0][0], Point(2))
assert_equal(vals[1][0], 4)
def test_mode_result_attributes(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
data2 = []
actual = stats.mode(data1)
attributes = ('mode', 'count')
check_named_results(actual, attributes)
actual2 = stats.mode(data2)
check_named_results(actual2, attributes)
def test_mode_nan(self):
data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
actual = stats.mode(data1)
assert_equal(actual, (6, 3))
actual = stats.mode(data1, nan_policy='omit')
assert_equal(actual, (6, 3))
assert_raises(ValueError, stats.mode, data1, nan_policy='raise')
assert_raises(ValueError, stats.mode, data1, nan_policy='foobar')
@pytest.mark.parametrize("data", [
[3, 5, 1, 1, 3],
[3, np.nan, 5, 1, 1, 3],
[3, 5, 1],
[3, np.nan, 5, 1],
])
def test_smallest_equal(self, data):
result = stats.mode(data, nan_policy='omit')
assert_equal(result[0][0], 1)
def test_obj_arrays_ndim(self):
# regression test for gh-9645: `mode` fails for object arrays w/ndim > 1
data = [['Oxidation'], ['Oxidation'], ['Polymerization'], ['Reduction']]
ar = np.array(data, dtype=object)
m = stats.mode(ar, axis=0)
assert np.all(m.mode == 'Oxidation') and m.mode.shape == (1, 1)
assert np.all(m.count == 2) and m.count.shape == (1, 1)
data1 = data + [[np.nan]]
ar1 = np.array(data1, dtype=object)
m = stats.mode(ar1, axis=0)
assert np.all(m.mode == 'Oxidation') and m.mode.shape == (1, 1)
assert np.all(m.count == 2) and m.count.shape == (1, 1)
class TestVariability(object):
testcase = [1,2,3,4]
scalar_testcase = 4.
def test_sem(self):
# This is not in R, so used:
# sqrt(var(testcase)*3/4)/sqrt(3)
# y = stats.sem(self.shoes[0])
# assert_approx_equal(y,0.775177399)
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
y = stats.sem(self.scalar_testcase)
assert_(np.isnan(y))
y = stats.sem(self.testcase)
assert_approx_equal(y, 0.6454972244)
n = len(self.testcase)
assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
stats.sem(self.testcase, ddof=2))
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.sem(x), np.nan)
assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769)
assert_raises(ValueError, stats.sem, x, nan_policy='raise')
assert_raises(ValueError, stats.sem, x, nan_policy='foobar')
def test_zmap(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zmap(self.testcase,self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zmap_axis(self):
# Test use of 'axis' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zmap(x, x, axis=0)
z1 = stats.zmap(x, x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zmap_ddof(self):
# Test use of 'ddof' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zmap(x, x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
def test_zscore(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zscore(self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zscore_axis(self):
# Test use of 'axis' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zscore(x, axis=0)
z1 = stats.zscore(x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zscore_ddof(self):
# Test use of 'ddof' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zscore(x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
def test_zscore_nan_propagate(self):
x = np.array([1, 2, np.nan, 4, 5])
z = stats.zscore(x, nan_policy='propagate')
assert all(np.isnan(z))
def test_zscore_nan_omit(self):
x = np.array([1, 2, np.nan, 4, 5])
z = stats.zscore(x, nan_policy='omit')
expected = np.array([-1.2649110640673518,
-0.6324555320336759,
np.nan,
0.6324555320336759,
1.2649110640673518
])
assert_array_almost_equal(z, expected)
def test_zscore_nan_raise(self):
x = np.array([1, 2, np.nan, 4, 5])
assert_raises(ValueError, stats.zscore, x, nan_policy='raise')
def test_mad(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, 28.95])
assert_almost_equal(stats.median_absolute_deviation(dat, axis=None), 0.526323)
dat = dat.reshape(6, 4)
mad = stats.median_absolute_deviation(dat, axis=0)
mad_expected = np.asarray([0.644931, 0.7413, 0.66717, 0.59304])
assert_array_almost_equal(mad, mad_expected)
def test_mad_empty(self):
dat = []
mad = stats.median_absolute_deviation(dat)
assert_equal(mad, np.nan)
def test_mad_nan_propagate(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, np.nan])
mad = stats.median_absolute_deviation(dat, nan_policy='propagate')
assert_equal(mad, np.nan)
def test_mad_nan_raise(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, np.nan])
with assert_raises(ValueError):
stats.median_absolute_deviation(dat, nan_policy='raise')
def test_mad_nan_omit(self):
dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7,
3.7, 3.7,3.77, 5.28, np.nan])
mad = stats.median_absolute_deviation(dat, nan_policy='omit')
assert_almost_equal(mad, 0.504084)
def _check_warnings(warn_list, expected_type, expected_len):
"""
Checks that all of the warnings from a list returned by
`warnings.catch_all(record=True)` are of the required type and that the list
contains expected number of warnings.
"""
assert_equal(len(warn_list), expected_len, "number of warnings")
for warn_ in warn_list:
assert_(warn_.category is expected_type)
class TestIQR(object):
def test_basic(self):
x = np.arange(8) * 0.5
np.random.shuffle(x)
assert_equal(stats.iqr(x), 1.75)
def test_api(self):
d = np.ones((5, 5))
stats.iqr(d)
stats.iqr(d, None)
stats.iqr(d, 1)
stats.iqr(d, (0, 1))
stats.iqr(d, None, (10, 90))
stats.iqr(d, None, (30, 20), 'raw')
stats.iqr(d, None, (25, 75), 1.5, 'propagate')
stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear')
stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True)
def test_empty(self):
assert_equal(stats.iqr([]), np.nan)
assert_equal(stats.iqr(np.arange(0)), np.nan)
def test_constant(self):
# Constant array always gives 0
x = np.ones((7, 4))
assert_equal(stats.iqr(x), 0.0)
assert_array_equal(stats.iqr(x, axis=0), np.zeros(4))
assert_array_equal(stats.iqr(x, axis=1), np.zeros(7))
assert_equal(stats.iqr(x, interpolation='linear'), 0.0)
assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0)
assert_equal(stats.iqr(x, interpolation='nearest'), 0.0)
assert_equal(stats.iqr(x, interpolation='lower'), 0.0)
assert_equal(stats.iqr(x, interpolation='higher'), 0.0)
# 0 only along constant dimensions
# This also tests much of `axis`
y = np.ones((4, 5, 6)) * np.arange(6)
assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6)))
assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6)))
assert_array_equal(stats.iqr(y, axis=2), np.full((4, 5), 2.5))
assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6))
assert_array_equal(stats.iqr(y, axis=(0, 2)), np.full(5, 3.))
assert_array_equal(stats.iqr(y, axis=(1, 2)), np.full(4, 3.))
def test_scalarlike(self):
x = np.arange(1) + 7.0
assert_equal(stats.iqr(x[0]), 0.0)
assert_equal(stats.iqr(x), 0.0)
assert_array_equal(stats.iqr(x, keepdims=True), [0.0])
def test_2D(self):
x = np.arange(15).reshape((3, 5))
assert_equal(stats.iqr(x), 7.0)
assert_array_equal(stats.iqr(x, axis=0), np.full(5, 5.))
assert_array_equal(stats.iqr(x, axis=1), np.full(3, 2.))
assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0)
assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0)
def test_axis(self):
# The `axis` keyword is also put through its paces in `test_keepdims`.
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10) # x.shape = (71, 23, 10)
q = stats.iqr(o)
assert_equal(stats.iqr(x, axis=(0, 1)), q)
x = np.rollaxis(x, -1, 0) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(2, 1)), q)
x = x.swapaxes(0, 1) # x.shape = (71, 10, 23)
assert_equal(stats.iqr(x, axis=(0, 2)), q)
x = x.swapaxes(0, 1) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(0, 1, 2)),
stats.iqr(x, axis=None))
assert_equal(stats.iqr(x, axis=(0,)),
stats.iqr(x, axis=0))
d = np.arange(3 * 5 * 7 * 11)
# Older versions of numpy only shuffle along axis=0.
# Not sure about newer, don't care.
np.random.shuffle(d)
d = d.reshape((3, 5, 7, 11))
assert_equal(stats.iqr(d, axis=(0, 1, 2))[0],
stats.iqr(d[:,:,:, 0].ravel()))
assert_equal(stats.iqr(d, axis=(0, 1, 3))[1],
stats.iqr(d[:,:, 1,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, -4))[2],
stats.iqr(d[:,:, 2,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, 2))[2],
stats.iqr(d[2,:,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 2))[2, 1],
stats.iqr(d[2, 1,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(1, -2))[2, 1],
stats.iqr(d[2, :, :, 1].ravel()))
assert_equal(stats.iqr(d, axis=(1, 3))[2, 2],
stats.iqr(d[2, :, 2,:].ravel()))
assert_raises(IndexError, stats.iqr, d, axis=4)
assert_raises(ValueError, stats.iqr, d, axis=(0, 0))
def test_rng(self):
x = np.arange(5)
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5)
assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5)
assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4
assert_raises(ValueError, stats.iqr, x, rng=(0, 101))
assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25))
assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60))
def test_interpolation(self):
x = np.arange(5)
y = np.arange(4)
# Default
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(y), 1.5)
# Linear
assert_equal(stats.iqr(x, interpolation='linear'), 2)
assert_equal(stats.iqr(y, interpolation='linear'), 1.5)
# Higher
assert_equal(stats.iqr(x, interpolation='higher'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3)
assert_equal(stats.iqr(y, interpolation='higher'), 2)
# Lower (will generally, but not always be the same as higher)
assert_equal(stats.iqr(x, interpolation='lower'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2)
assert_equal(stats.iqr(y, interpolation='lower'), 2)
# Nearest
assert_equal(stats.iqr(x, interpolation='nearest'), 2)
assert_equal(stats.iqr(y, interpolation='nearest'), 1)
# Midpoint
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5)
assert_equal(stats.iqr(y, interpolation='midpoint'), 2)
assert_raises(ValueError, stats.iqr, x, interpolation='foobar')
def test_keepdims(self):
# Also tests most of `axis`
x = np.ones((3, 5, 7, 11))
assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7))
assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,))
assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1))
assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))
def test_nanpolicy(self):
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, nan_policy='propagate'), 7)
assert_equal(stats.iqr(x, nan_policy='omit'), 7)
assert_equal(stats.iqr(x, nan_policy='raise'), 7)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_equal(stats.iqr(x, nan_policy='omit'), 7.5)
assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), np.full(5, 5))
assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2])
assert_raises(ValueError, stats.iqr, x, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise')
# Bad policy
assert_raises(ValueError, stats.iqr, x, nan_policy='barfood')
def test_scale(self):
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, scale='raw'), 7)
assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0), 3.5)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan)
# axis=1 chosen to show behavior with both nans and without
assert_equal(stats.iqr(x, axis=1, scale='raw',
nan_policy='propagate'), [2, np.nan, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, np.nan, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'),
[1, np.nan, 1])
# Since NumPy 1.17.0.dev, warnings are no longer emitted by
# np.percentile with nans, so we don't check the number of
# warnings here. See https://github.com/numpy/numpy/pull/12679.
assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 7.5)
assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),
7.5 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75)
# Bad scale
assert_raises(ValueError, stats.iqr, x, scale='foobar')
class TestMoments(object):
"""
Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
testmathworks comes from documentation for the
Statistics Toolbox for Matlab and can be found at both
https://www.mathworks.com/help/stats/kurtosis.html
https://www.mathworks.com/help/stats/skewness.html
Note that both test cases came from here.
"""
testcase = [1,2,3,4]
scalar_testcase = 4.
np.random.seed(1234)
testcase_moment_accuracy = np.random.rand(42)
testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965]
def test_moment(self):
# mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))
y = stats.moment(self.scalar_testcase)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 0)
assert_approx_equal(y, 1.0)
y = stats.moment(self.testcase, 1)
assert_approx_equal(y, 0.0, 10)
y = stats.moment(self.testcase, 2)
assert_approx_equal(y, 1.25)
y = stats.moment(self.testcase, 3)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 4)
assert_approx_equal(y, 2.5625)
# check array_like input for moment
y = stats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = stats.moment(self.testcase, 0.0)
assert_approx_equal(y, 1.0)
assert_raises(ValueError, stats.moment, self.testcase, 1.2)
y = stats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
y = stats.moment([])
assert_equal(y, np.nan)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.moment(x, 2), np.nan)
assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0)
assert_raises(ValueError, stats.moment, x, nan_policy='raise')
assert_raises(ValueError, stats.moment, x, nan_policy='foobar')
def test_moment_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
mm = stats.moment(a, 2, axis=1, nan_policy="propagate")
np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15)
def test_variation(self):
# variation = samplestd / mean
y = stats.variation(self.scalar_testcase)
assert_approx_equal(y, 0.0)
y = stats.variation(self.testcase)
assert_approx_equal(y, 0.44721359549996, 10)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.variation(x), np.nan)
assert_almost_equal(stats.variation(x, nan_policy='omit'),
0.6454972243679028)
assert_raises(ValueError, stats.variation, x, nan_policy='raise')
assert_raises(ValueError, stats.variation, x, nan_policy='foobar')
def test_variation_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
vv = stats.variation(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(vv, [0.7453559924999299, np.nan], atol=1e-15)
def test_skewness(self):
# Scalar test case
y = stats.skew(self.scalar_testcase)
assert_approx_equal(y, 0.0)
# sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) /
# ((sqrt(var(testmathworks)*4/5))**3)/5
y = stats.skew(self.testmathworks)
assert_approx_equal(y, -0.29322304336607, 10)
y = stats.skew(self.testmathworks, bias=0)
assert_approx_equal(y, -0.437111105023940, 10)
y = stats.skew(self.testcase)
assert_approx_equal(y, 0.0, 10)
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid='ignore'):
assert_equal(stats.skew(x), np.nan)
assert_equal(stats.skew(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.skew, x, nan_policy='raise')
assert_raises(ValueError, stats.skew, x, nan_policy='foobar')
def test_skewness_scalar(self):
# `skew` must return a scalar for 1-dim input
assert_equal(stats.skew(arange(10)), 0.0)
def test_skew_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
with np.errstate(invalid='ignore'):
s = stats.skew(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(s, [0, np.nan], atol=1e-15)
def test_kurtosis(self):
# Scalar test case
y = stats.kurtosis(self.scalar_testcase)
assert_approx_equal(y, -3.0)
# sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
# sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5
# Set flags for axis = 0 and
# fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab)
y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_approx_equal(y, 2.1658856802973, 10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = stats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_approx_equal(y, 3.663542721189047, 10)
y = stats.kurtosis(self.testcase, 0, 0)
assert_approx_equal(y, 1.64)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kurtosis(x), np.nan)
assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000)
assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar')
def test_kurtosis_array_scalar(self):
assert_equal(type(stats.kurtosis([1,2,3])), float)
def test_kurtosis_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
k = stats.kurtosis(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15)
def test_moment_accuracy(self):
# 'moment' must have a small enough error compared to the slower
# but very accurate numpy.power() implementation.
tc_no_mean = self.testcase_moment_accuracy - \
np.mean(self.testcase_moment_accuracy)
assert_allclose(np.power(tc_no_mean, 42).mean(),
stats.moment(self.testcase_moment_accuracy, 42))
class TestStudentTest(object):
X1 = np.array([-1, 0, 1])
X2 = np.array([0, 1, 2])
T1_0 = 0
P1_0 = 1
T1_1 = -1.732051
P1_1 = 0.2254033
T1_2 = -3.464102
P1_2 = 0.0741799
T2_0 = 1.732051
P2_0 = 0.2254033
def test_onesample(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_1samp(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
t, p = stats.ttest_1samp(self.X1, 0)
assert_array_almost_equal(t, self.T1_0)
assert_array_almost_equal(p, self.P1_0)
res = stats.ttest_1samp(self.X1, 0)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
t, p = stats.ttest_1samp(self.X2, 0)
assert_array_almost_equal(t, self.T2_0)
assert_array_almost_equal(p, self.P2_0)
t, p = stats.ttest_1samp(self.X1, 1)
assert_array_almost_equal(t, self.T1_1)
assert_array_almost_equal(p, self.P1_1)
t, p = stats.ttest_1samp(self.X1, 2)
assert_array_almost_equal(t, self.T1_2)
assert_array_almost_equal(p, self.P1_2)
# check nan policy
np.random.seed(7654567)
x = stats.norm.rvs(loc=5, scale=10, size=51)
x[50] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'),
(-1.6412624074367159, 0.107147027334048005))
assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise')
assert_raises(ValueError, stats.ttest_1samp, x, 5.0,
nan_policy='foobar')
def test_percentileofscore():
pcos = stats.percentileofscore
assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0)
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
assert_equal(pcos(np.arange(10) + 1, 4, kind=kind), result)
# multiple - 2
for (kind, result) in [('rank', 45.0),
('strict', 30.0),
('weak', 50.0),
('mean', 40.0)]:
assert_equal(pcos([1,2,3,4,4,5,6,7,8,9], 4, kind=kind), result)
# multiple - 3
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0)
for (kind, result) in [('rank', 50.0),
('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4, kind=kind), result)
# missing
for kind in ('rank', 'mean', 'strict', 'weak'):
assert_equal(pcos([1,2,3,5,6,7,8,9,10,11], 4, kind=kind), 30)
# larger numbers
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
assert_equal(
pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40,
kind=kind), result)
for (kind, result) in [('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
assert_equal(
pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80],
40, kind=kind), result)
for kind in ('rank', 'mean', 'strict', 'weak'):
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
40, kind=kind), 30.0)
# boundaries
for (kind, result) in [('rank', 10.0),
('mean', 5.0),
('strict', 0.0),
('weak', 10.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
10, kind=kind), result)
for (kind, result) in [('rank', 100.0),
('mean', 95.0),
('strict', 90.0),
('weak', 100.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
110, kind=kind), result)
# out of bounds
for (kind, score, result) in [('rank', 200, 100.0),
('mean', 200, 100.0),
('mean', 0, 0.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
score, kind=kind), result)
assert_raises(ValueError, pcos, [1, 2, 3, 3, 4], 3, kind='unrecognized')
PowerDivCase = namedtuple('Case', ['f_obs', 'f_exp', 'ddof', 'axis',
'chi2', # Pearson's
'log', # G-test (log-likelihood)
'mod_log', # Modified log-likelihood
'cr', # Cressie-Read (lambda=2/3)
])
# The details of the first two elements in power_div_1d_cases are used
# in a test in TestPowerDivergence. Check that code before making
# any changes here.
power_div_1d_cases = [
# Use the default f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# Give a non-uniform f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None,
chi2=24,
log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)),
mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)),
cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) +
8*((8/2)**(2/3) - 1))/(5/9)),
# f_exp is a scalar.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# f_exp equal to f_obs.
PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
]
power_div_empty_cases = [
# Shape is (0,)--a data set with length 0. The computed
# test statistic should be 0.
PowerDivCase(f_obs=[],
f_exp=None, ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
# Shape is (0, 3). This is 3 data sets, but each data set has
# length 0, so the computed test statistic should be [0, 0, 0].
PowerDivCase(f_obs=np.array([[],[],[]]).T,
f_exp=None, ddof=0, axis=0,
chi2=[0, 0, 0],
log=[0, 0, 0],
mod_log=[0, 0, 0],
cr=[0, 0, 0]),
# Shape is (3, 0). This represents an empty collection of
# data sets in which each data set has length 3. The test
# statistic should be an empty array.
PowerDivCase(f_obs=np.array([[],[],[]]),
f_exp=None, ddof=0, axis=0,
chi2=[],
log=[],
mod_log=[],
cr=[]),
]
class TestPowerDivergence(object):
def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_,
expected_stat):
f_obs = np.asarray(f_obs)
if axis is None:
num_obs = f_obs.size
else:
b = np.broadcast(f_obs, f_exp)
num_obs = b.shape[axis]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
stat, p = stats.power_divergence(
f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_=lambda_)
assert_allclose(stat, expected_stat)
if lambda_ == 1 or lambda_ == "pearson":
# Also test stats.chisquare.
stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis)
assert_allclose(stat, expected_stat)
ddof = np.asarray(ddof)
expected_p = stats.distributions.chi2.sf(expected_stat,
num_obs - 1 - ddof)
assert_allclose(p, expected_p)
def test_basic(self):
for case in power_div_1d_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_basic_masked(self):
for case in power_div_1d_cases:
mobs = np.ma.array(case.f_obs)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_axis(self):
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
f_obs = np.vstack((case0.f_obs, case1.f_obs))
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp))
# Check the four computational code paths in power_divergence
# using a 2D array with axis=1.
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"pearson", [case0.chi2, case1.chi2])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"log-likelihood", [case0.log, case1.log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"mod-log-likelihood", [case0.mod_log, case1.mod_log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"cressie-read", [case0.cr, case1.cr])
# Reshape case0.f_obs to shape (2,2), and use axis=None.
# The result should be the same.
self.check_power_divergence(
np.array(case0.f_obs).reshape(2, 2), None, 0, None,
"pearson", case0.chi2)
def test_ddof_broadcasting(self):
# Test that ddof broadcasts correctly.
# ddof does not affect the test statistic. It is broadcast
# with the computed test statistic for the computation of
# the p value.
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
# Create 4x2 arrays of observed and expected frequencies.
f_obs = np.vstack((case0.f_obs, case1.f_obs)).T
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp)).T
expected_chi2 = [case0.chi2, case1.chi2]
# ddof has shape (2, 1). This is broadcast with the computed
# statistic, so p will have shape (2,2).
ddof = np.array([[0], [1]])
stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof)
assert_allclose(stat, expected_chi2)
# Compute the p values separately, passing in scalars for ddof.
stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0])
stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0])
assert_array_equal(p, np.vstack((p0, p1)))
def test_empty_cases(self):
with warnings.catch_warnings():
for case in power_div_empty_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
def test_power_divergence_result_attributes(self):
f_obs = power_div_1d_cases[0].f_obs
f_exp = power_div_1d_cases[0].f_exp
ddof = power_div_1d_cases[0].ddof
axis = power_div_1d_cases[0].axis
res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_="pearson")
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
@pytest.mark.parametrize("n, dtype", [(200, np.uint8), (1000000, np.int32)])
def test_chiquare_data_types(n, dtype):
# Regression test for gh-10159.
obs = np.array([n, 0], dtype=dtype)
exp = np.array([n // 2, n // 2], dtype=dtype)
stat, p = stats.chisquare(obs, exp)
assert_allclose(stat, n, rtol=1e-13)
def test_chisquare_masked_arrays():
# Test masked arrays.
obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T
mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T
mobs = np.ma.masked_array(obs, mask)
expected_chisq = np.array([24.0, 0.5])
expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)),
2*(3*np.log(0.75) + 5*np.log(1.25))])
chi2 = stats.distributions.chi2
chisq, p = stats.chisquare(mobs)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.count(axis=0) - 1))
g, p = stats.power_divergence(mobs, lambda_='log-likelihood')
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
chisq, p = stats.chisquare(mobs.T, axis=1)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.T.count(axis=1) - 1))
g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood")
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0])
exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1])
chi2, p = stats.chisquare(obs1, f_exp=exp1)
# Because of the mask at index 3 of obs1 and at index 4 of exp1,
# only the first three elements are included in the calculation
# of the statistic.
mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8)
# When axis=None, the two values should have type np.float64.
chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None)
assert_(isinstance(chisq, np.float64))
assert_(isinstance(p, np.float64))
assert_equal(chisq, 1.0)
assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2))
# Empty arrays:
# A data set with length 0 returns a masked scalar.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(np.ma.array([]))
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, ())
assert_(chisq.mask)
empty3 = np.ma.array([[],[],[]])
# empty3 is a collection of 0 data sets (whose lengths would be 3, if
# there were any), so the return value is an array with length 0.
chisq, p = stats.chisquare(empty3)
assert_(isinstance(chisq, np.ma.MaskedArray))
mat.assert_array_equal(chisq, [])
# empty3.T is an array containing 3 data sets, each with length 0,
# so an array of size (3,) is returned, with all values masked.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(empty3.T)
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, (3,))
assert_(np.all(chisq.mask))
def test_power_divergence_against_cressie_read_data():
# Test stats.power_divergence against tables 4 and 5 from
# Cressie and Read, "Multimonial Goodness-of-Fit Tests",
# J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464.
# This tests the calculation for several values of lambda.
# `table4` holds just the second and third columns from Table 4.
table4 = np.array([
# observed, expected,
15, 15.171,
11, 13.952,
14, 12.831,
17, 11.800,
5, 10.852,
11, 9.9796,
10, 9.1777,
4, 8.4402,
8, 7.7620,
10, 7.1383,
7, 6.5647,
9, 6.0371,
11, 5.5520,
3, 5.1059,
6, 4.6956,
1, 4.3183,
1, 3.9713,
4, 3.6522,
]).reshape(-1, 2)
table5 = np.array([
# lambda, statistic
-10.0, 72.2e3,
-5.0, 28.9e1,
-3.0, 65.6,
-2.0, 40.6,
-1.5, 34.0,
-1.0, 29.5,
-0.5, 26.5,
0.0, 24.6,
0.5, 23.4,
0.67, 23.1,
1.0, 22.7,
1.5, 22.6,
2.0, 22.9,
3.0, 24.8,
5.0, 35.5,
10.0, 21.4e1,
]).reshape(-1, 2)
for lambda_, expected_stat in table5:
stat, p = stats.power_divergence(table4[:,0], table4[:,1],
lambda_=lambda_)
assert_allclose(stat, expected_stat, rtol=5e-3)
def test_friedmanchisquare():
# see ticket:113
# verified with matlab and R
# From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets"
# 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)
x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,
0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),
array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,
0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),
array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,
0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),
array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,
0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]
# From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001:
x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),
array([2,2,1,2,3,1,2,3,2,1,1,3]),
array([2,4,3,3,4,3,3,4,4,1,2,1]),
array([3,5,4,3,4,4,3,3,3,4,4,4])]
# From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01:
# Probability from this example is inexact using Chisquare approximation of Friedman Chisquare.
x3 = [array([7.0,9.9,8.5,5.1,10.3]),
array([5.3,5.7,4.7,3.5,7.7]),
array([4.9,7.6,5.5,2.8,8.4]),
array([8.8,8.9,8.1,3.3,9.1])]
assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),
(10.2283464566929, 0.0167215803284414))
assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
(18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.friedmanchisquare(*x1)
check_named_results(res, attributes)
# test using mstats
assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1],
x1[2], x1[3]),
(10.2283464566929, 0.0167215803284414))
# the following fails
# assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
# (18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1],
x3[2], x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1])
def test_kstest():
# comparing with values from R
x = np.linspace(-1,1,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.15865525393145705, 12)
assert_almost_equal(p, 0.95164069201518386, 1)
x = np.linspace(-15,15,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.44435602715924361, 15)
assert_almost_equal(p, 0.038850140086788665, 8)
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.kstest(x, 'norm')
check_named_results(res, attributes)
# the following tests rely on deterministicaly replicated rvs
np.random.seed(987654321)
x = stats.norm.rvs(loc=0.2, size=100)
D,p = stats.kstest(x, 'norm', mode='asymp')
assert_almost_equal(D, 0.12464329735846891, 15)
assert_almost_equal(p, 0.089444888711820769, 15)
assert_almost_equal(np.array(stats.kstest(x, 'norm', mode='asymp')),
np.array((0.12464329735846891, 0.089444888711820769)), 15)
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='less')),
np.array((0.12464329735846891, 0.040989164077641749)), 15)
# this 'greater' test fails with precision of decimal=14
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='greater')),
np.array((0.0072115233216310994, 0.98531158590396228)), 12)
# missing: no test that uses *args
class TestKSTwoSamples(object):
def _testOne(self, x1, x2, alternative, expected_statistic, expected_prob, mode='auto'):
result = stats.ks_2samp(x1, x2, alternative, mode=mode)
expected = np.array([expected_statistic, expected_prob])
assert_array_almost_equal(np.array(result), expected)
def testSmall(self):
self._testOne([0], [1], 'two-sided', 1.0/1, 1.0)
self._testOne([0], [1], 'greater', 1.0/1, 0.5)
self._testOne([0], [1], 'less', 0.0/1, 1.0)
self._testOne([1], [0], 'two-sided', 1.0/1, 1.0)
self._testOne([1], [0], 'greater', 0.0/1, 1.0)
self._testOne([1], [0], 'less', 1.0/1, 0.5)
def testTwoVsThree(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data1p, data2, 'two-sided', 1.0 / 3, 1.0)
self._testOne(data1p, data2, 'greater', 1.0 / 3, 0.7)
self._testOne(data1p, data2, 'less', 1.0 / 3, 0.7)
self._testOne(data1m, data2, 'two-sided', 2.0 / 3, 0.6)
self._testOne(data1m, data2, 'greater', 2.0 / 3, 0.3)
self._testOne(data1m, data2, 'less', 0, 1.0)
def testTwoVsFour(self):
data1 = np.array([1.0, 2.0])
data1p = data1 + 0.01
data1m = data1 - 0.01
data2 = np.array([1.0, 2.0, 3.0, 4.0])
self._testOne(data1p, data2, 'two-sided', 2.0 / 4, 14.0/15)
self._testOne(data1p, data2, 'greater', 2.0 / 4, 8.0/15)
self._testOne(data1p, data2, 'less', 1.0 / 4, 12.0/15)
self._testOne(data1m, data2, 'two-sided', 3.0 / 4, 6.0/15)
self._testOne(data1m, data2, 'greater', 3.0 / 4, 3.0/15)
self._testOne(data1m, data2, 'less', 0, 1.0)
def test100_100(self):
x100 = np.linspace(1, 100, 100)
x100_2_p1 = x100 + 2 + 0.1
x100_2_m1 = x100 + 2 - 0.1
self._testOne(x100, x100_2_p1, 'two-sided', 3.0 / 100, 0.9999999999962055)
self._testOne(x100, x100_2_p1, 'greater', 3.0 / 100, 0.9143290114276248)
self._testOne(x100, x100_2_p1, 'less', 0, 1.0)
self._testOne(x100, x100_2_m1, 'two-sided', 2.0 / 100, 1.0)
self._testOne(x100, x100_2_m1, 'greater', 2.0 / 100, 0.960978450786184)
self._testOne(x100, x100_2_m1, 'less', 0, 1.0)
def test100_110(self):
x100 = np.linspace(1, 100, 100)
x110 = np.linspace(1, 100, 110)
x110_20_p1 = x110 + 20 + 0.1
x110_20_m1 = x110 + 20 - 0.1
# 100, 110
self._testOne(x100, x110_20_p1, 'two-sided', 232.0 / 1100, 0.015739183865607353)
self._testOne(x100, x110_20_p1, 'greater', 232.0 / 1100, 0.007869594319053203)
self._testOne(x100, x110_20_p1, 'less', 0, 1)
self._testOne(x100, x110_20_m1, 'two-sided', 229.0 / 1100, 0.017803803861026313)
self._testOne(x100, x110_20_m1, 'greater', 229.0 / 1100, 0.008901905958245056)
self._testOne(x100, x110_20_m1, 'less', 0.0, 1.0)
def testRepeatedValues(self):
x2233 = np.array([2] * 3 + [3] * 4 + [5] * 5 + [6] * 4, dtype=int)
x3344 = x2233 + 1
x2356 = np.array([2] * 3 + [3] * 4 + [5] * 10 + [6] * 4, dtype=int)
x3467 = np.array([3] * 10 + [4] * 2 + [6] * 10 + [7] * 4, dtype=int)
self._testOne(x2233, x3344, 'two-sided', 5.0/16, 0.4262934613454952)
self._testOne(x2233, x3344, 'greater', 5.0/16, 0.21465428276573786)
self._testOne(x2233, x3344, 'less', 0.0/16, 1.0)
self._testOne(x2356, x3467, 'two-sided', 190.0/21/26, 0.0919245790168125)
self._testOne(x2356, x3467, 'greater', 190.0/21/26, 0.0459633806858544)
self._testOne(x2356, x3467, 'less', 70.0/21/26, 0.6121593130022775)
def testEqualSizes(self):
data2 = np.array([1.0, 2.0, 3.0])
self._testOne(data2, data2+1, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+1, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+1, 'less', 0.0/3, 1.)
self._testOne(data2, data2+0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2+0.5, 'greater', 1.0/3, 0.75)
self._testOne(data2, data2+0.5, 'less', 0.0/3, 1.)
self._testOne(data2, data2-0.5, 'two-sided', 1.0/3, 1.0)
self._testOne(data2, data2-0.5, 'greater', 0.0/3, 1.0)
self._testOne(data2, data2-0.5, 'less', 1.0/3, 0.75)
def testMiddlingBoth(self):
# 500, 600
n1, n2 = 500, 600
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='auto')
self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='asymp')
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='asymp')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='asymp')
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "ks_2samp: Exact calculation overflowed. Switching to mode=asymp")
self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='exact')
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
_check_warnings(w, RuntimeWarning, 1)
def testMediumBoth(self):
# 1000, 1100
n1, n2 = 1000, 1100
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='asymp')
self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='auto')
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='asymp')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='asymp')
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "ks_2samp: Exact calculation overflowed. Switching to mode=asymp")
self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='exact')
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
_check_warnings(w, RuntimeWarning, 1)
def testLarge(self):
# 10000, 110
n1, n2 = 10000, 110
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 100, n2)
self._testOne(x, y, 'two-sided', 55275.0 / lcm, 4.2188474935755949e-15)
self._testOne(x, y, 'greater', 561.0 / lcm, 0.99115454582047591)
self._testOne(x, y, 'less', 55275.0 / lcm, 3.1317328311518713e-26)
def test_gh11184(self):
# 3000, 3001, exact two-sided
np.random.seed(123456)
x = np.random.normal(size=3000)
y = np.random.normal(size=3001) * 1.5
print(x[0], x[-1], y[0], y[-1])
self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, mode='asymp')
self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, mode='exact')
def test_gh11184_bigger(self):
# 10000, 10001, exact two-sided
np.random.seed(123456)
x = np.random.normal(size=10000)
y = np.random.normal(size=10001) * 1.5
print(x[0], x[-1], y[0], y[-1])
self._testOne(x, y, 'two-sided', 0.10597913208679133, 3.3149311398483503e-49, mode='asymp')
self._testOne(x, y, 'two-sided', 0.10597913208679133, 2.7755575615628914e-15, mode='exact')
self._testOne(x, y, 'greater', 0.10597913208679133, 2.7947433906389253e-41, mode='asymp')
self._testOne(x, y, 'less', 0.09658002199780022, 2.7947433906389253e-41, mode='asymp')
@pytest.mark.slow
def testLargeBoth(self):
# 10000, 11000
n1, n2 = 10000, 11000
lcm = n1*11.0
delta = 1.0/n1/n2/2/2
x = np.linspace(1, 200, n1) - delta
y = np.linspace(2, 200, n2)
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.99915729949018561, mode='asymp')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990456491488628, mode='exact')
self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.99915729949018561, mode='auto')
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673)
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "ks_2samp: Exact calculation overflowed. Switching to mode=asymp")
self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673, mode='exact')
self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724, mode='exact')
def testNamedAttributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ks_2samp([1, 2], [3])
check_named_results(res, attributes)
def test_some_code_paths(self):
# Check that some code paths are executed
from scipy.stats.stats import _count_paths_outside_method, _compute_prob_inside_method
_compute_prob_inside_method(1, 1, 1, 1)
_count_paths_outside_method(1000, 1, 1, 1001)
assert_raises(FloatingPointError, _count_paths_outside_method, 1100, 1099, 1, 1)
assert_raises(FloatingPointError, _count_paths_outside_method, 2000, 1000, 1, 1)
def test_argument_checking(self):
# Check that an empty array causes a ValueError
assert_raises(ValueError, stats.ks_2samp, [], [1])
assert_raises(ValueError, stats.ks_2samp, [1], [])
assert_raises(ValueError, stats.ks_2samp, [], [])
def test_ttest_rel():
# regression test
tr,pr = 0.81248591389165692, 0.41846234511362157
tpr = ([tr,-tr],[pr,pr])
rvs1 = np.linspace(1,100,100)
rvs2 = np.linspace(1.01,99.989,100)
rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
# test scalars
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_rel(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_rel(rvs1, rvs2, axis=0)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# check nan policy
np.random.seed(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501)
x[500] = np.nan
y = (stats.norm.rvs(loc=5, scale=10, size=501) +
stats.norm.rvs(scale=0.2, size=501))
y[500] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'),
(0.25299925303978066, 0.8003729814201519))
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar')
# test zero division problem
t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
# test incorrect input shape raise an error
x = np.arange(24)
assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),
x.reshape((2, 3, 4)))
def test_ttest_rel_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_rel(x, y, nan_policy='omit')
r2 = stats.ttest_rel(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are paired when NaNs are dropped
r3 = stats.ttest_rel(y[1:], x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, paired=TRUE)
assert_allclose(r2, (-2, 0.1835), atol=1e-4)
def _desc_stats(x1, x2, axis=0):
def _stats(x, axis=0):
x = np.asarray(x)
mu = np.mean(x, axis=axis)
std = np.std(x, axis=axis, ddof=1)
nobs = x.shape[axis]
return mu, std, nobs
return _stats(x1, axis) + _stats(x2, axis)
def test_ttest_ind():
# regression test
tr = 1.0912746897927283
pr = 0.27647818616351882
tpr = ([tr,-tr],[pr,pr])
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
# test from_stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2)),
[t, p])
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
# test scalars
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_ind(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# check nan policy
np.random.seed(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501)
x[500] = np.nan
y = stats.norm.rvs(loc=5, scale=10, size=500)
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'),
(0.24779670949091914, 0.80434267337517906))
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar')
# test zero division problem
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
def test_ttest_ind_with_uneq_var():
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
# test from desc stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
# regression test
tr = 1.0912746897927283
tr_uneq_n = 0.66745638708050492
pr = 0.27647831993021388
pr_uneq_n = 0.50873585065616544
tpr = ([tr,-tr],[pr,pr])
rvs3 = np.linspace(1,100, 25)
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
assert_array_almost_equal([t,p],(tr,pr))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False)
assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs3),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
args = _desc_stats(rvs1_3D, rvs2_3D, axis=1)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2),
axis=2, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
args = _desc_stats(np.rollaxis(rvs1_3D, 2),
np.rollaxis(rvs2_3D, 2), axis=2)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# test zero division problem
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(all='ignore'):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False),
(np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False),
([0, np.nan], [1, np.nan]))
def test_ttest_ind_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_ind(x, y, nan_policy='omit')
r2 = stats.ttest_ind(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are not paired when NaNs are dropped
r3 = stats.ttest_ind(y, x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, var.equal=TRUE)
assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901), atol=1e-15)
def test_gh5686():
mean1, mean2 = np.array([1, 2]), np.array([3, 4])
std1, std2 = np.array([5, 3]), np.array([4, 5])
nobs1, nobs2 = np.array([130, 140]), np.array([100, 150])
# This will raise a TypeError unless gh-5686 is fixed.
stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
def test_ttest_1samp_new():
n1, n2, n3 = (10,15,20)
rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))
# check multidimensional array and correct axis handling
# deterministic rvn1 and rvn2 would be better as in test_ttest_rel
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)
t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n2,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)
t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)
t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n2))
# test zero division problem
t, p = stats.ttest_1samp([0, 0, 0], 1)
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(all='ignore'):
assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan],[-1, 1]])
assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan]))
class TestDescribe(object):
def test_describe_scalar(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
n, mm, m, v, sk, kurt = stats.describe(4.)
assert_equal(n, 1)
assert_equal(mm, (4.0, 4.0))
assert_equal(m, 4.0)
assert_(np.isnan(v))
assert_array_almost_equal(sk, 0.0, decimal=13)
assert_array_almost_equal(kurt, -3.0, decimal=13)
def test_describe_numbers(self):
x = np.vstack((np.ones((3,4)), np.full((2, 4), 2)))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.3, 0.3, 0.3, 0.3])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
x = np.arange(10.)
x[9] = np.nan
nc, mmc = (9, (0.0, 8.0))
mc = 4.0
vc = 7.5
skc = 0.0
kurtc = -1.2300000000000002
n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit')
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc)
assert_array_almost_equal(kurt, kurtc, decimal=13)
assert_raises(ValueError, stats.describe, x, nan_policy='raise')
assert_raises(ValueError, stats.describe, x, nan_policy='foobar')
def test_describe_result_attributes(self):
actual = stats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes)
def test_describe_ddof(self):
x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.24, 0.24, 0.24, 0.24])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x, ddof=0)
assert_equal(n, nc)
assert_allclose(mm, mmc, rtol=1e-15)
assert_allclose(m, mc, rtol=1e-15)
assert_allclose(v, vc, rtol=1e-15)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
def test_describe_axis_none(self):
x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
# expected values
e_nobs, e_minmax = (20, (1.0, 2.0))
e_mean = 1.3999999999999999
e_var = 0.25263157894736848
e_skew = 0.4082482904638634
e_kurt = -1.8333333333333333
# actual values
a = stats.describe(x, axis=None)
assert_equal(a.nobs, e_nobs)
assert_almost_equal(a.minmax, e_minmax)
assert_almost_equal(a.mean, e_mean)
assert_almost_equal(a.variance, e_var)
assert_array_almost_equal(a.skewness, e_skew, decimal=13)
assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13)
def test_describe_empty(self):
assert_raises(ValueError, stats.describe, [])
def test_normalitytests():
assert_raises(ValueError, stats.skewtest, 4.)
assert_raises(ValueError, stats.kurtosistest, 4.)
assert_raises(ValueError, stats.normaltest, 4.)
# numbers verified with R: dagoTest in package fBasics
st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)
pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
attributes = ('statistic', 'pvalue')
assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal))
check_named_results(stats.normaltest(x), attributes)
assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew))
check_named_results(stats.skewtest(x), attributes)
assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt))
check_named_results(stats.kurtosistest(x), attributes)
# Test axis=None (equal to axis=0 for 1-D input)
assert_array_almost_equal(stats.normaltest(x, axis=None),
(st_normal, pv_normal))
assert_array_almost_equal(stats.skewtest(x, axis=None),
(st_skew, pv_skew))
assert_array_almost_equal(stats.kurtosistest(x, axis=None),
(st_kurt, pv_kurt))
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.skewtest(x), (np.nan, np.nan))
expected = (1.0184643553962129, 0.30845733195153502)
assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected)
with np.errstate(all='ignore'):
assert_raises(ValueError, stats.skewtest, x, nan_policy='raise')
assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar')
x = np.arange(30.)
x[29] = np.nan
with np.errstate(all='ignore'):
assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan))
expected = (-2.2683547379505273, 0.023307594135872967)
assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'),
expected)
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar')
with np.errstate(all='ignore'):
assert_array_equal(stats.normaltest(x), (np.nan, np.nan))
expected = (6.2260409514287449, 0.04446644248650191)
assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected)
assert_raises(ValueError, stats.normaltest, x, nan_policy='raise')
assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar')
# regression test for issue gh-9033: x cleary non-normal but power of
# negtative denom needs to be handled correctly to reject normality
counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
assert_equal(stats.kurtosistest(x)[1] < 0.01, True)
class TestRankSums(object):
def test_ranksums_result_attributes(self):
res = stats.ranksums(np.arange(5), np.arange(25))
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestJarqueBera(object):
def test_jarque_bera_stats(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
y = np.random.chisquare(10000, 100000)
z = np.random.rayleigh(1, 100000)
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1])
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1])
assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1])
def test_jarque_bera_array_like(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
JB1, p1 = stats.jarque_bera(list(x))
JB2, p2 = stats.jarque_bera(tuple(x))
JB3, p3 = stats.jarque_bera(x.reshape(2, 50000))
assert_(JB1 == JB2 == JB3)
assert_(p1 == p2 == p3)
def test_jarque_bera_size(self):
assert_raises(ValueError, stats.jarque_bera, [])
def test_skewtest_too_few_samples():
# Regression test for ticket #1492.
# skewtest requires at least 8 samples; 7 should raise a ValueError.
x = np.arange(7.0)
assert_raises(ValueError, stats.skewtest, x)
def test_kurtosistest_too_few_samples():
# Regression test for ticket #1425.
# kurtosistest requires at least 5 samples; 4 should raise a ValueError.
x = np.arange(4.0)
assert_raises(ValueError, stats.kurtosistest, x)
class TestMannWhitneyU(object):
X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589,
20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105,
19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953,
20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274,
20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021,
19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892,
17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179,
20.4970638083542, 19.5567594734914]
Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575,
19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655,
19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841,
18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636,
19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356]
significant = 14
def test_mannwhitneyu_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999957683256589, significant=self.significant)
assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant)
def test_mannwhitneyu_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 9.188326533255e-05,
significant=self.significant)
def test_mannwhitneyu_default(self):
# The default value for alternative is None
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling `mannwhitneyu` without .*`alternative`")
u1, p1 = stats.mannwhitneyu(self.X, self.Y)
u2, p2 = stats.mannwhitneyu(self.Y, self.X)
u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative=None)
assert_equal(p1, p2)
assert_equal(p1, p3)
assert_equal(u1, 102)
assert_equal(u2, 102)
assert_equal(u3, 102)
assert_approx_equal(p1, 4.5941632666275e-05,
significant=self.significant)
def test_mannwhitneyu_no_correct_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999955905990004, significant=self.significant)
assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant)
def test_mannwhitneyu_no_correct_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 8.81880199916178e-05,
significant=self.significant)
def test_mannwhitneyu_no_correct_default(self):
# The default value for alternative is None
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling `mannwhitneyu` without .*`alternative`")
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False)
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False)
u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
alternative=None)
assert_equal(p1, p2)
assert_equal(p1, p3)
assert_equal(u1, 102)
assert_equal(u2, 102)
assert_equal(u3, 102)
assert_approx_equal(p1, 4.40940099958089e-05,
significant=self.significant)
def test_mannwhitneyu_ones(self):
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
# p-value verified with matlab and R to 5 significant digits
assert_array_almost_equal(stats.stats.mannwhitneyu(x, y,
alternative='less'),
(16980.5, 2.8214327656317373e-005),
decimal=12)
def test_mannwhitneyu_result_attributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.mannwhitneyu(self.X, self.Y, alternative="less")
check_named_results(res, attributes)
def test_pointbiserial():
# same as mstats test except for the nan
# Test data: https://web.archive.org/web/20060504220742/https://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1]
assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
res = stats.pointbiserialr(x, y)
check_named_results(res, attributes)
def test_obrientransform():
# A couple tests calculated by hand.
x1 = np.array([0, 2, 4])
t1 = stats.obrientransform(x1)
expected = [7, -2, 7]
assert_allclose(t1[0], expected)
x2 = np.array([0, 3, 6, 9])
t2 = stats.obrientransform(x2)
expected = np.array([30, 0, 0, 30])
assert_allclose(t2[0], expected)
# Test two arguments.
a, b = stats.obrientransform(x1, x2)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
# Test three arguments.
a, b, c = stats.obrientransform(x1, x2, x1)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
assert_equal(c, t1[0])
# This is a regression test to check np.var replacement.
# The author of this test didn't separately verify the numbers.
x1 = np.arange(5)
result = np.array(
[[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],
[21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])
assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)
# Example from "O'Brien Test for Homogeneity of Variance"
# by Herve Abdi.
values = range(5, 11)
reps = np.array([5, 11, 9, 3, 2, 2])
data = np.repeat(values, reps)
transformed_values = np.array([3.1828, 0.5591, 0.0344,
1.6086, 5.2817, 11.0538])
expected = np.repeat(transformed_values, reps)
result = stats.obrientransform(data)
assert_array_almost_equal(result[0], expected, decimal=4)
def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
# Note this doesn't test when axis is not specified
x = stats.gmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
x = stats.hmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
class TestHarMean(object):
def test_1d_list(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
desired = 34.1417152147
check_equal_hmean(a, desired)
a = [1, 2, 3, 4]
desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4)
check_equal_hmean(a, desired)
def test_1d_array(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 34.1417152147
check_equal_hmean(a, desired)
def test_1d_array_with_zero(self):
a = np.array([1, 0])
desired = 0.0
assert_equal(stats.hmean(a), desired)
def test_1d_array_with_negative_value(self):
a = np.array([1, 0, -1])
assert_raises(ValueError, stats.hmean, a)
# Note the next tests use axis=None as default, not axis=0
def test_2d_list(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(a, desired)
def test_2d_array(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(np.array(a), desired)
def test_2d_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545])
check_equal_hmean(a, desired, axis=0)
def test_2d_axis0_with_zero(self):
a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.88135593, 0.0, 52.90076336, 65.45454545])
assert_allclose(stats.hmean(a, axis=0), desired)
def test_2d_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([19.2, 63.03939962, 103.80078637])
check_equal_hmean(a, desired, axis=1)
def test_2d_axis1_with_zero(self):
a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([0.0, 63.03939962, 103.80078637])
assert_allclose(stats.hmean(a, axis=1), desired)
def test_2d_matrix_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]])
check_equal_hmean(matrix(a), desired, axis=0)
def test_2d_matrix_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[19.2, 63.03939962, 103.80078637]]).T
check_equal_hmean(matrix(a), desired, axis=1)
class TestGeoMean(object):
def test_1d_list(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
desired = 45.2872868812
check_equal_gmean(a, desired)
a = [1, 2, 3, 4]
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_array(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 45.2872868812
check_equal_gmean(a, desired)
a = array([1, 2, 3, 4], float32)
desired = power(1 * 2 * 3 * 4, 1. / 4.)
check_equal_gmean(a, desired, dtype=float32)
# Note the next tests use axis=None as default, not axis=0
def test_2d_list(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(a, desired)
def test_2d_array(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(array(a), desired)
def test_2d_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])
check_equal_gmean(a, desired, axis=0)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = array([1, 2, 3, 4])
check_equal_gmean(a, desired, axis=0, rtol=1e-14)
def test_2d_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = np.array([22.13363839, 64.02171746, 104.40086817])
check_equal_gmean(a, desired, axis=1)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
v = power(1 * 2 * 3 * 4, 1. / 4.)
desired = array([v, v, v])
check_equal_gmean(a, desired, axis=1, rtol=1e-14)
def test_2d_matrix_axis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]])
check_equal_gmean(matrix(a), desired, axis=0)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = matrix([1, 2, 3, 4])
check_equal_gmean(matrix(a), desired, axis=0, rtol=1e-14)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
desired = matrix(stats.gmean(a, axis=0))
check_equal_gmean(matrix(a), desired, axis=0, rtol=1e-14)
def test_2d_matrix_axis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = matrix([[22.13363839, 64.02171746, 104.40086817]]).T
check_equal_gmean(matrix(a), desired, axis=1)
a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
v = power(1 * 2 * 3 * 4, 1. / 4.)
desired = matrix([[v], [v], [v]])
check_equal_gmean(matrix(a), desired, axis=1, rtol=1e-14)
def test_large_values(self):
a = array([1e100, 1e200, 1e300])
desired = 1e200
check_equal_gmean(a, desired, rtol=1e-13)
def test_1d_list0(self):
# Test a 1d list with zero element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
desired = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
check_equal_gmean(a, desired)
finally:
np.seterr(**olderr)
def test_1d_array0(self):
# Test a 1d array with zero element
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
desired = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
check_equal_gmean(a, desired)
finally:
np.seterr(**olderr)
class TestGeometricStandardDeviation(object):
# must add 1 as `gstd` is only defined for positive values
array_1d = np.arange(2 * 3 * 4) + 1
gstd_array_1d = 2.294407613602
array_3d = array_1d.reshape(2, 3, 4)
def test_1d_array(self):
gstd_actual = stats.gstd(self.array_1d)
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_1d_numeric_array_like_input(self):
gstd_actual = stats.gstd(tuple(self.array_1d))
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_raises_value_error_non_array_like_input(self):
with pytest.raises(ValueError, match='Invalid array input'):
stats.gstd('This should fail as it can not be cast to an array.')
def test_raises_value_error_zero_entry(self):
with pytest.raises(ValueError, match='Non positive value'):
stats.gstd(np.append(self.array_1d, [0]))
def test_raises_value_error_negative_entry(self):
with pytest.raises(ValueError, match='Non positive value'):
stats.gstd(np.append(self.array_1d, [-1]))
def test_raises_value_error_inf_entry(self):
with pytest.raises(ValueError, match='Infinite value'):
stats.gstd(np.append(self.array_1d, [np.inf]))
def test_propagates_nan_values(self):
a = array([[1, 1, 1, 16], [np.nan, 1, 2, 3]])
gstd_actual = stats.gstd(a, axis=1)
assert_allclose(gstd_actual, np.array([4, np.nan]))
def test_ddof_equal_to_number_of_observations(self):
with pytest.raises(ValueError, match='Degrees of freedom <= 0'):
stats.gstd(self.array_1d, ddof=self.array_1d.size)
def test_3d_array(self):
gstd_actual = stats.gstd(self.array_3d, axis=None)
assert_allclose(gstd_actual, self.gstd_array_1d)
def test_3d_array_axis_type_tuple(self):
gstd_actual = stats.gstd(self.array_3d, axis=(1,2))
assert_allclose(gstd_actual, [2.12939215, 1.22120169])
def test_3d_array_axis_0(self):
gstd_actual = stats.gstd(self.array_3d, axis=0)
gstd_desired = np.array([
[6.1330555493918, 3.958900210120, 3.1206598248344, 2.6651441426902],
[2.3758135028411, 2.174581428192, 2.0260062829505, 1.9115518327308],
[1.8205343606803, 1.746342404566, 1.6846557065742, 1.6325269194382]
])
assert_allclose(gstd_actual, gstd_desired)
def test_3d_array_axis_1(self):
gstd_actual = stats.gstd(self.array_3d, axis=1)
gstd_desired = np.array([
[3.118993630946, 2.275985934063, 1.933995977619, 1.742896469724],
[1.271693593916, 1.254158641801, 1.238774141609, 1.225164057869]
])
assert_allclose(gstd_actual, gstd_desired)
def test_3d_array_axis_2(self):
gstd_actual = stats.gstd(self.array_3d, axis=2)
gstd_desired = np.array([
[1.8242475707664, 1.2243686572447, 1.1318311657788],
[1.0934830582351, 1.0724479791887, 1.0591498540749]
])
assert_allclose(gstd_actual, gstd_desired)
def test_masked_3d_array(self):
ma = np.ma.masked_where(self.array_3d > 16, self.array_3d)
gstd_actual = stats.gstd(ma, axis=2)
gstd_desired = stats.gstd(self.array_3d, axis=2)
mask = [[0, 0, 0], [0, 1, 1]]
assert_allclose(gstd_actual, gstd_desired)
assert_equal(gstd_actual.mask, mask)
def test_binomtest():
# precision tests compared to R for ticket:986
pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5),
np.linspace(0.85,0.95,5)))
n = 501
x = 450
results = [0.0, 0.0, 1.0159969301994141e-304,
2.9752418572150531e-275, 7.7668382922535275e-250,
2.3381250925167094e-099, 7.8284591587323951e-081,
9.9155947819961383e-065, 2.8729390725176308e-050,
1.7175066298388421e-037, 0.0021070691951093692,
0.12044570587262322, 0.88154763174802508, 0.027120993063129286,
2.6102587134694721e-006]
for p, res in zip(pp,results):
assert_approx_equal(stats.binom_test(x, n, p), res,
significant=12, err_msg='fail forp=%f' % p)
assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024,
significant=12, err_msg='fail forp=%f' % p)
def test_binomtest2():
# test added for issue #2384
res2 = [
[1.0, 1.0],
[0.5,1.0,0.5],
[0.25,1.00,1.00,0.25],
[0.125,0.625,1.000,0.625,0.125],
[0.0625,0.3750,1.0000,1.0000,0.3750,0.0625],
[0.03125,0.21875,0.68750,1.00000,0.68750,0.21875,0.03125],
[0.015625,0.125000,0.453125,1.000000,1.000000,0.453125,0.125000,0.015625],
[0.0078125,0.0703125,0.2890625,0.7265625,1.0000000,0.7265625,0.2890625,
0.0703125,0.0078125],
[0.00390625,0.03906250,0.17968750,0.50781250,1.00000000,1.00000000,
0.50781250,0.17968750,0.03906250,0.00390625],
[0.001953125,0.021484375,0.109375000,0.343750000,0.753906250,1.000000000,
0.753906250,0.343750000,0.109375000,0.021484375,0.001953125]
]
for k in range(1, 11):
res1 = [stats.binom_test(v, k, 0.5) for v in range(k + 1)]
assert_almost_equal(res1, res2[k-1], decimal=10)
def test_binomtest3():
# test added for issue #2384
# test when x == n*p and neighbors
res3 = [stats.binom_test(v, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_equal(res3, np.ones(len(res3), int))
#> bt=c()
#> for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testm1 = np.array([
0.5, 0.5555555555555556, 0.578125, 0.5904000000000003,
0.5981224279835393, 0.603430543396034, 0.607304096221924,
0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115,
0.68853759765625, 0.6980101120000006, 0.703906431368616,
0.70793209416498, 0.7108561134173507, 0.713076544331419,
0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174,
0.74986110468096, 0.7548015520398076, 0.7581671424768577,
0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625,
0.761553963657302, 0.774800934828818, 0.7818005980538996,
0.78613491480358, 0.789084353140195, 0.7912217659828884,
0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176,
0.7976688481430754, 0.8039848974727624, 0.807891868948366,
0.8105487660137676, 0.812473307174702, 0.8139318233591120,
0.815075399104785, 0.7744140625, 0.8037322594985427,
0.814742863657656, 0.8205425178645808, 0.8241275984172285,
0.8265645374416, 0.8283292196088257, 0.829666291102775,
0.8307144686362666, 0.7905273437499996, 0.8178712053954738,
0.828116983756619, 0.833508948940494, 0.8368403871552892,
0.839104213210105, 0.840743186196171, 0.84198481438049,
0.8429580531563676, 0.803619384765625, 0.829338573944648,
0.8389591907548646, 0.84401876783902, 0.84714369697889,
0.8492667010581667, 0.850803474598719, 0.851967542858308,
0.8528799045949524, 0.8145294189453126, 0.838881732845347,
0.847979024541911, 0.852760894015685, 0.8557134656773457,
0.8577190131799202, 0.85917058278431, 0.860270010472127,
0.861131648404582, 0.823802947998047, 0.846984756807511,
0.855635653643743, 0.860180994825685, 0.86298688573253,
0.864892525675245, 0.866271647085603, 0.867316125625004,
0.8681346531755114
])
# > bt=c()
# > for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testp1 = np.array([
0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551,
0.2635138663069203, 0.2636951804161073, 0.2638162407564354,
0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875,
0.4295746560000003, 0.43473045988554, 0.4383309503172684,
0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875,
0.4927602499618962, 0.5096031427383425, 0.5189636628480,
0.5249280070771274, 0.5290623300865124, 0.5320974248125793,
0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808,
0.5669248746708034, 0.576436455045805, 0.5824538812831795,
0.5866053321547824, 0.589642781414643, 0.5919618019300193,
0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209,
0.617303847446822, 0.623172512167948, 0.627208862156123,
0.6301556891501057, 0.632401894928977, 0.6341708982290303,
0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579,
0.65392850011132, 0.657816519817211, 0.660650782947676,
0.662808780346311, 0.6645068560246006, 0.7905273437499996,
0.6478843304312477, 0.6640468318879372, 0.6727589686071775,
0.6782129857784873, 0.681950188903695, 0.684671508668418,
0.686741824999918, 0.688369886732168, 0.803619384765625,
0.668716055304315, 0.684360013879534, 0.6927642396829181,
0.6980155964704895, 0.701609591890657, 0.7042244320992127,
0.7062125081341817, 0.707775152962577, 0.8145294189453126,
0.686243374488305, 0.7013873696358975, 0.709501223328243,
0.714563595144314, 0.718024953392931, 0.7205416252126137,
0.722454130389843, 0.723956813292035, 0.823802947998047,
0.701255953767043, 0.715928221686075, 0.723772209289768,
0.7286603031173616, 0.7319999279787631, 0.7344267920995765,
0.736270323773157, 0.737718376096348
])
res4_p1 = [stats.binom_test(v+1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
res4_m1 = [stats.binom_test(v-1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_almost_equal(res4_p1, binom_testp1, decimal=13)
assert_almost_equal(res4_m1, binom_testm1, decimal=13)
class TestTrim(object):
# test trim functions
def test_trim1(self):
a = np.arange(11)
assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10))
assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9))
assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')),
np.arange(2, 11))
assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')),
np.arange(3, 11))
assert_equal(stats.trim1(a, 1.0), [])
assert_equal(stats.trim1(a, 1.0, tail='left'), [])
# empty input
assert_equal(stats.trim1([], 0.1), [])
assert_equal(stats.trim1([], 3/11., tail='left'), [])
assert_equal(stats.trim1([], 4/6.), [])
def test_trimboth(self):
a = np.arange(11)
assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8))
assert_equal(np.sort(stats.trimboth(a, 0.2)),
np.array([2, 3, 4, 5, 6, 7, 8]))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)),
np.arange(4, 20).reshape(4, 4))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T,
2/6.)),
np.array([[2, 8, 14, 20], [3, 9, 15, 21]]))
assert_raises(ValueError, stats.trimboth,
np.arange(24).reshape(4, 6).T, 4/6.)
# empty input
assert_equal(stats.trimboth([], 0.1), [])
assert_equal(stats.trimboth([], 3/11.), [])
assert_equal(stats.trimboth([], 4/6.), [])
def test_trim_mean(self):
# don't use pre-sorted arrays
a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6])
idx = np.array([3, 5, 0, 1, 2, 4])
a2 = np.arange(24).reshape(6, 4)[idx, :]
a3 = np.arange(24).reshape(6, 4, order='F')[idx, :]
assert_equal(stats.trim_mean(a3, 2/6.),
np.array([2.5, 8.5, 14.5, 20.5]))
assert_equal(stats.trim_mean(a2, 2/6.),
np.array([10., 11., 12., 13.]))
idx4 = np.array([1, 0, 3, 2])
a4 = np.arange(24).reshape(4, 6)[idx4, :]
assert_equal(stats.trim_mean(a4, 2/6.),
np.array([9., 10., 11., 12., 13., 14.]))
# shuffled arange(24) as array_like
a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23,
20, 2, 14, 4, 13, 8, 3]
assert_equal(stats.trim_mean(a, 2/6.), 11.5)
assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)
# check axis argument
np.random.seed(1234)
a = np.random.randint(20, size=(5, 6, 4, 7))
for axis in [0, 1, 2, 3, -1]:
res1 = stats.trim_mean(a, 2/6., axis=axis)
res2 = stats.trim_mean(np.rollaxis(a, axis), 2/6.)
assert_equal(res1, res2)
res1 = stats.trim_mean(a, 2/6., axis=None)
res2 = stats.trim_mean(a.ravel(), 2/6.)
assert_equal(res1, res2)
assert_raises(ValueError, stats.trim_mean, a, 0.6)
# empty input
assert_equal(stats.trim_mean([], 0.0), np.nan)
assert_equal(stats.trim_mean([], 0.6), np.nan)
class TestSigmaClip(object):
def test_sigmaclip1(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 4 # default
c, low, upp = stats.sigmaclip(a)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, a.size)
def test_sigmaclip2(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 1.5
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, 4)
assert_equal(a.size, 36) # check original array unchanged
def test_sigmaclip3(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c, np.linspace(9.5, 10.5, 11))
def test_sigmaclip_result_attributes(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
res = stats.sigmaclip(a, fact, fact)
attributes = ('clipped', 'lower', 'upper')
check_named_results(res, attributes)
def test_std_zero(self):
# regression test #8632
x = np.ones(10)
assert_equal(stats.sigmaclip(x)[0], x)
class TestFOneWay(object):
def test_trivial(self):
# A trivial test of stats.f_oneway, with F=0.
F, p = stats.f_oneway([0,2], [0,2])
assert_equal(F, 0.0)
def test_basic(self):
# Despite being a floating point calculation, this data should
# result in F being exactly 2.0.
F, p = stats.f_oneway([0,2], [2,4])
assert_equal(F, 2.0)
def test_large_integer_array(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
F, p = stats.f_oneway(a, b)
assert_almost_equal(F, 0.77450216931805538)
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = stats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nist(self):
# These are the nist ANOVA files. They can be found at:
# https://www.itl.nist.gov/div898/strd/anova/anova.html
filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat',
'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat',
'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat']
for test_case in filenames:
rtol = 1e-7
fname = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/nist_anova', test_case))
with open(fname, 'r') as f:
content = f.read().split('\n')
certified = [line.split() for line in content[40:48]
if line.strip()]
dataf = np.loadtxt(fname, skiprows=60)
y, x = dataf.T
y = y.astype(int)
caty = np.unique(y)
f = float(certified[0][-1])
xlist = [x[y == i] for i in caty]
res = stats.f_oneway(*xlist)
# With the hard test cases we relax the tolerance a bit.
hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat')
if test_case in hard_tc:
rtol = 1e-4
assert_allclose(res[0], f, rtol=rtol,
err_msg='Failing testcase: %s' % test_case)
class TestKruskal(object):
def test_simple(self):
x = [1]
y = [2]
h, p = stats.kruskal(x, y)
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
def test_basic(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
h, p = stats.kruskal(x, y)
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
def test_simple_tie(self):
x = [1]
y = [1, 2]
h_uncorr = 1.5**2 + 2*2.25**2 - 12
corr = 0.75
expected = h_uncorr / corr # 0.5
h, p = stats.kruskal(x, y)
# Since the expression is simple and the exact answer is 0.5, it
# should be safe to use assert_equal().
assert_equal(h, expected)
def test_another_tie(self):
x = [1, 1, 1, 2]
y = [2, 2, 2, 2]
h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr
h, p = stats.kruskal(x, y)
assert_approx_equal(h, expected)
def test_three_groups(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = [2, 2]
h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr # 7.0
h, p = stats.kruskal(x, y, z)
assert_approx_equal(h, expected)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 2))
def test_empty(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = []
assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan))
def test_kruskal_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = stats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kruskal(x, x), (np.nan, np.nan))
assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0))
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar')
def test_large_no_samples(self):
# Test to see if large samples are handled correctly.
n = 50000
x = np.random.randn(n)
y = np.random.randn(n) + 50
h, p = stats.kruskal(x, y)
expected = 0
assert_approx_equal(p, expected)
class TestCombinePvalues(object):
def test_fisher(self):
# Example taken from https://en.wikipedia.org/wiki/Fisher%27s_exact_test#Example
xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher')
assert_approx_equal(p, 0.02156, significant=4)
def test_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer')
assert_approx_equal(p, 0.01651, significant=4)
def test_stouffer2(self):
Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer')
assert_approx_equal(p, 0.5, significant=4)
def test_weighted_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.ones(3))
assert_approx_equal(p, 0.01651, significant=4)
def test_weighted_stouffer2(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.array((1, 4, 9)))
assert_approx_equal(p, 0.1464, significant=4)
def test_pearson(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='pearson')
assert_approx_equal(p, 0.97787, significant=4)
def test_tippett(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='tippett')
assert_approx_equal(p, 0.970299, significant=4)
def test_mudholkar_george(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='mudholkar_george')
assert_approx_equal(p, 3.7191571041915e-07, significant=4)
def test_mudholkar_george_equal_fisher_minus_pearson(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='mudholkar_george')
Z_f, p_f = stats.combine_pvalues([.01, .2, .3], method='fisher')
Z_p, p_p = stats.combine_pvalues([.01, .2, .3], method='pearson')
# 0.5 here is because logistic = log(u) - log(1-u), i.e. no 2 factors
assert_approx_equal(0.5 * (Z_f-Z_p), Z, significant=4)
class TestCdfDistanceValidation(object):
"""
Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors
for bad inputs.
"""
def test_distinct_value_and_weight_lengths(self):
# When the number of weights does not match the number of values,
# a ValueError should be raised.
assert_raises(ValueError, stats.wasserstein_distance,
[1], [2], [4], [3, 1])
assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0])
def test_zero_weight(self):
# When a distribution is given zero weight, a ValueError should be
# raised.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [0, 0])
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [3, 1], [0])
def test_negative_weights(self):
# A ValueError should be raised if there are any negative weights.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2, 2], [1, 1], [3, -1])
def test_empty_distribution(self):
# A ValueError should be raised when trying to measure the distance
# between something and nothing.
assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2])
assert_raises(ValueError, stats.wasserstein_distance, [1], [])
def test_inf_weight(self):
# An inf weight is not valid.
assert_raises(ValueError, stats.wasserstein_distance,
[1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1])
class TestWassersteinDistance(object):
""" Tests for wasserstein_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the Wasserstein distance is
# straightforward.
assert_almost_equal(
stats.wasserstein_distance([0, 1], [0], [1, 1], [1]),
.5)
assert_almost_equal(stats.wasserstein_distance(
[0, 1], [0], [3, 1], [1]),
.25)
assert_almost_equal(stats.wasserstein_distance(
[0, 2], [0], [1, 1], [1]),
1)
assert_almost_equal(stats.wasserstein_distance(
[0, 1, 2], [1, 2, 3]),
1)
def test_same_distribution(self):
# Any distribution moved to itself should have a Wasserstein distance of
# zero.
assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.wasserstein_distance([1, 1, 1, 4], [4, 1],
[1, 1, 1, 1], [1, 3]),
0)
def test_shift(self):
# If the whole distribution is shifted by x, then the Wasserstein
# distance should be x.
assert_almost_equal(stats.wasserstein_distance([0], [1]), 1)
assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10)
assert_almost_equal(
stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]),
10)
assert_almost_equal(
stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2],
[3, 1, 1], [1, 3, 1]),
2.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.wasserstein_distance(
[0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.wasserstein_distance([5, 0, 1], [0, 4, 3],
[1, 2, 4], [1, 2, 4]))
def test_collapse(self):
# Collapsing a distribution to a point distribution at zero is
# equivalent to taking the average of the absolute values of the values.
u = np.arange(-10, 30, 0.3)
v = np.zeros_like(u)
assert_almost_equal(
stats.wasserstein_distance(u, v),
np.mean(np.abs(u)))
u_weights = np.arange(len(u))
v_weights = u_weights[::-1]
assert_almost_equal(
stats.wasserstein_distance(u, v, u_weights, v_weights),
np.average(np.abs(u), weights=u_weights))
def test_zero_weight(self):
# Values with zero weight have no impact on the Wasserstein distance.
assert_almost_equal(
stats.wasserstein_distance([1, 2, 100000], [1, 1],
[1, 1, 0], [1, 1]),
stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1]))
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [1, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
class TestEnergyDistance(object):
""" Tests for energy_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the energy distance is
# straightforward.
assert_almost_equal(
stats.energy_distance([0, 1], [0], [1, 1], [1]),
np.sqrt(2) * .5)
assert_almost_equal(stats.energy_distance(
[0, 1], [0], [3, 1], [1]),
np.sqrt(2) * .25)
assert_almost_equal(stats.energy_distance(
[0, 2], [0], [1, 1], [1]),
2 * .5)
assert_almost_equal(
stats.energy_distance([0, 1, 2], [1, 2, 3]),
np.sqrt(2) * (3*(1./3**2))**.5)
def test_same_distribution(self):
# Any distribution moved to itself should have a energy distance of
# zero.
assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]),
0)
def test_shift(self):
# If a single-point distribution is shifted by x, then the energy
# distance should be sqrt(2) * sqrt(x).
assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2))
assert_almost_equal(
stats.energy_distance([-5], [5]),
np.sqrt(2) * 10**.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4]))
def test_zero_weight(self):
# Values with zero weight have no impact on the energy distance.
assert_almost_equal(
stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]),
stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1]))
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf)
assert_equal(
stats.energy_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.energy_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.energy_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
class TestBrunnerMunzel(object):
# Data from (Lumley, 1996)
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
def test_brunnermunzel_one_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='less')
u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='greater')
u3, p3 = stats.brunnermunzel(self.X, self.Y, alternative='greater')
u4, p4 = stats.brunnermunzel(self.Y, self.X, alternative='less')
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(p3, p4, significant=self.significant)
assert_(p1 != p3)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(u3, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u4, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0028931043330757342,
significant=self.significant)
assert_approx_equal(p3, 0.99710689566692423,
significant=self.significant)
def test_brunnermunzel_two_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='two-sided')
u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='two-sided')
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
def test_brunnermunzel_default(self):
# The default value for alternative is two-sided
u1, p1 = stats.brunnermunzel(self.X, self.Y)
u2, p2 = stats.brunnermunzel(self.Y, self.X)
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
def test_brunnermunzel_alternative_error(self):
alternative = "error"
distribution = "t"
nan_policy = "propagate"
assert_(alternative not in ["two-sided", "greater", "less"])
assert_raises(ValueError,
stats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_distribution_norm(self):
u1, p1 = stats.brunnermunzel(self.X, self.Y, distribution="normal")
u2, p2 = stats.brunnermunzel(self.Y, self.X, distribution="normal")
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0017041417600383024,
significant=self.significant)
def test_brunnermunzel_distribution_error(self):
alternative = "two-sided"
distribution = "error"
nan_policy = "propagate"
assert_(alternative not in ["t", "normal"])
assert_raises(ValueError,
stats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_empty_imput(self):
u1, p1 = stats.brunnermunzel(self.X, [])
u2, p2 = stats.brunnermunzel([], self.Y)
u3, p3 = stats.brunnermunzel([], [])
assert_equal(u1, np.nan)
assert_equal(p1, np.nan)
assert_equal(u2, np.nan)
assert_equal(p2, np.nan)
assert_equal(u3, np.nan)
assert_equal(p3, np.nan)
def test_brunnermunzel_nan_input_propagate(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
u1, p1 = stats.brunnermunzel(X, Y, nan_policy="propagate")
u2, p2 = stats.brunnermunzel(Y, X, nan_policy="propagate")
assert_equal(u1, np.nan)
assert_equal(p1, np.nan)
assert_equal(u2, np.nan)
assert_equal(p2, np.nan)
def test_brunnermunzel_nan_input_raise(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
alternative = "two-sided"
distribution = "t"
nan_policy = "raise"
assert_raises(ValueError,
stats.brunnermunzel,
X,
Y,
alternative,
distribution,
nan_policy)
assert_raises(ValueError,
stats.brunnermunzel,
Y,
X,
alternative,
distribution,
nan_policy)
def test_brunnermunzel_nan_input_omit(self):
X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
u1, p1 = stats.brunnermunzel(X, Y, nan_policy="omit")
u2, p2 = stats.brunnermunzel(Y, X, nan_policy="omit")
assert_approx_equal(p1, p2, significant=self.significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=self.significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=self.significant)
assert_approx_equal(p1, 0.0057862086661515377,
significant=self.significant)
class TestRatioUniforms(object):
""" Tests for rvs_ratio_uniforms.
"""
def test_rv_generation(self):
# use KS test to check distribution of rvs
# normal distribution
f = stats.norm.pdf
v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500,
random_state=12345)
assert_equal(stats.kstest(rvs, 'norm')[1] > 0.25, True)
# exponential distribution
rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
vmin=0, vmax=2*np.exp(-1),
size=1000, random_state=12345)
assert_equal(stats.kstest(rvs, 'expon')[1] > 0.25, True)
def test_shape(self):
# test shape of return value depending on size parameter
f = stats.norm.pdf
v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=3,
random_state=1234)
r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3,),
random_state=1234)
r3 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 1),
random_state=1234)
assert_equal(r1, r2)
assert_equal(r2, r3.flatten())
assert_equal(r1.shape, (3,))
assert_equal(r3.shape, (3, 1))
r4 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 3, 3),
random_state=12)
r5 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=27,
random_state=12)
assert_equal(r4.flatten(), r5)
assert_equal(r4.shape, (3, 3, 3))
r6 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, random_state=1234)
r7 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=1,
random_state=1234)
r8 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(1, ),
random_state=1234)
assert_equal(r6, r7)
assert_equal(r7, r8)
def test_random_state(self):
f = stats.norm.pdf
v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
np.random.seed(1234)
r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4))
r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4),
random_state=1234)
assert_equal(r1, r2)
def test_exceptions(self):
f = stats.norm.pdf
# need vmin < vmax
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=3, vmax=1)
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=1, vmax=1)
# need umax > 0
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=-1, vmin=1, vmax=1)
assert_raises(ValueError,
stats.rvs_ratio_uniforms, pdf=f, umax=0, vmin=1, vmax=1)
class TestEppsSingleton(object):
def test_statistic_1(self):
# first example in Goerg & Kaiser, also in original paper of
# Epps & Singleton. Note: values do not match exactly, the
# value of the interquartile range varies depending on how
# quantiles are computed
x = np.array([-0.35, 2.55, 1.73, 0.73, 0.35, 2.69, 0.46, -0.94, -0.37, 12.07])
y = np.array([-1.15, -0.15, 2.48, 3.25, 3.71, 4.29, 5.00, 7.74, 8.38, 8.60])
w, p = stats.epps_singleton_2samp(x, y)
assert_almost_equal(w, 15.14, decimal=1)
assert_almost_equal(p, 0.00442, decimal=3)
def test_statistic_2(self):
# second example in Goerg & Kaiser, again not a perfect match
x = np.array((0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 10,
10, 10, 10))
y = np.array((10, 4, 0, 5, 10, 10, 0, 5, 6, 7, 10, 3, 1, 7, 0, 8, 1,
5, 8, 10))
w, p = stats.epps_singleton_2samp(x, y)
assert_allclose(w, 8.900, atol=0.001)
assert_almost_equal(p, 0.06364, decimal=3)
def test_epps_singleton_array_like(self):
np.random.seed(1234)
x, y = np.arange(30), np.arange(28)
w1, p1 = stats.epps_singleton_2samp(list(x), list(y))
w2, p2 = stats.epps_singleton_2samp(tuple(x), tuple(y))
w3, p3 = stats.epps_singleton_2samp(x, y)
assert_(w1 == w2 == w3)
assert_(p1 == p2 == p3)
def test_epps_singleton_size(self):
# raise error if less than 5 elements
x, y = (1, 2, 3, 4), np.arange(10)
assert_raises(ValueError, stats.epps_singleton_2samp, x, y)
def test_epps_singleton_nonfinite(self):
# raise error if there are non-finite values
x, y = (1, 2, 3, 4, 5, np.inf), np.arange(10)
assert_raises(ValueError, stats.epps_singleton_2samp, x, y)
x, y = np.arange(10), (1, 2, 3, 4, 5, np.nan)
assert_raises(ValueError, stats.epps_singleton_2samp, x, y)
def test_epps_singleton_1d_input(self):
x = np.arange(100).reshape(-1, 1)
assert_raises(ValueError, stats.epps_singleton_2samp, x, x)
def test_names(self):
x, y = np.arange(20), np.arange(30)
res = stats.epps_singleton_2samp(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestMGCErrorWarnings(object):
""" Tests errors and warnings derived from MGC.
"""
def test_error_notndarray(self):
# raises error if x or y is not a ndarray
x = np.arange(20)
y = [5] * 20
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
assert_raises(ValueError, stats.multiscale_graphcorr, y, x)
def test_error_shape(self):
# raises error if number of samples different (n)
x = np.arange(100).reshape(25, 4)
y = x.reshape(10, 10)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_lowsamples(self):
# raises error if samples are low (< 3)
x = np.arange(3)
y = np.arange(3)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_nans(self):
# raises error if inputs contain NaNs
x = np.arange(20, dtype=float)
x[0] = np.nan
assert_raises(ValueError, stats.multiscale_graphcorr, x, x)
y = np.arange(20)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_wrongdisttype(self):
# raises error if compute_distance is not a function
x = np.arange(20)
compute_distance = 0
assert_raises(ValueError, stats.multiscale_graphcorr, x, x,
compute_distance=compute_distance)
@pytest.mark.parametrize("reps", [
-1, # reps is negative
'1', # reps is not integer
])
def test_error_reps(self, reps):
# raises error if reps is negative
x = np.arange(20)
assert_raises(ValueError, stats.multiscale_graphcorr, x, x, reps=reps)
def test_warns_reps(self):
# raises warning when reps is less than 1000
x = np.arange(20)
reps = 100
assert_warns(RuntimeWarning, stats.multiscale_graphcorr, x, x, reps=reps)
def test_error_infty(self):
# raises error if input contains infinities
x = np.arange(20)
y = np.ones(20) * np.inf
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
class TestMGCStat(object):
""" Test validity of MGC test statistic
"""
def _simulations(self, samps=100, dims=1, sim_type=""):
# linear simulation
if sim_type == "linear":
x = np.random.uniform(-1, 1, size=(samps, 1))
y = x + 0.3 * np.random.random_sample(size=(x.size, 1))
# spiral simulation
elif sim_type == "nonlinear":
unif = np.array(np.random.uniform(0, 5, size=(samps, 1)))
x = unif * np.cos(np.pi * unif)
y = unif * np.sin(np.pi * unif) + (0.4
* np.random.random_sample(size=(x.size, 1)))
# independence (tests type I simulation)
elif sim_type == "independence":
u = np.random.normal(0, 1, size=(samps, 1))
v = np.random.normal(0, 1, size=(samps, 1))
u_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
v_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
x = u/3 + 2*u_2 - 1
y = v/3 + 2*v_2 - 1
# raises error if not approved sim_type
else:
raise ValueError("sim_type must be linear, nonlinear, or "
"independence")
# add dimensions of noise for higher dimensions
if dims > 1:
dims_noise = np.random.normal(0, 1, size=(samps, dims-1))
x = np.concatenate((x, dims_noise), axis=1)
return x, y
@dec.slow
@pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
("linear", 0.97, 1/1000), # test linear simulation
("nonlinear", 0.163, 1/1000), # test spiral simulation
("independence", -0.0094, 0.78) # test independence simulation
])
def test_oned(self, sim_type, obs_stat, obs_pvalue):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type=sim_type)
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
assert_approx_equal(stat, obs_stat, significant=1)
assert_approx_equal(pvalue, obs_pvalue, significant=1)
@dec.slow
@pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
("linear", 0.184, 1/1000), # test linear simulation
("nonlinear", 0.0190, 0.117), # test spiral simulation
])
def test_fived(self, sim_type, obs_stat, obs_pvalue):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=5, sim_type=sim_type)
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
assert_approx_equal(stat, obs_stat, significant=1)
assert_approx_equal(pvalue, obs_pvalue, significant=1)
@dec.slow
def test_twosamp(self):
np.random.seed(12345678)
# generate x and y
x = np.random.binomial(100, 0.5, size=(100, 5))
y = np.random.normal(0, 1, size=(80, 5))
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
assert_approx_equal(stat, 1.0, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
# generate x and y
y = np.random.normal(0, 1, size=(100, 5))
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y, is_twosamp=True)
assert_approx_equal(stat, 1.0, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
@pytest.mark.skipif(multiprocessing.get_start_method() != 'fork',
reason=('multiprocessing with spawn method is not'
' compatible with pytest.'))
def test_workers(self):
np.random.seed(12345678)
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type="linear")
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y, workers=2)
assert_approx_equal(stat, 0.97, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
def test_random_state(self):
# generate x and y
x, y = self._simulations(samps=100, dims=1, sim_type="linear")
# test stat and pvalue
stat, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1)
assert_approx_equal(stat, 0.97, significant=1)
assert_approx_equal(pvalue, 0.001, significant=1)
| 40.110793
| 134
| 0.575689
|
e392825d4e8f145ba60f68c71fa81c45971c3b80
| 13,702
|
py
|
Python
|
tensorflow_federated/python/learning/templates/client_works.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/learning/templates/client_works.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/learning/templates/client_works.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""Abstractions for client work in learning algorithms."""
from typing import Callable
import attr
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.templates import errors
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.optimizers import optimizer as optimizer_base
@attr.s(frozen=True)
class ClientResult():
"""A structure containing the result of `ClientWorkProcess.next` computation.
Attributes:
update: The local update to model weights produced by clients.
update_weight: A weight for weighted aggregation of the `update`.
"""
update = attr.ib()
update_weight = attr.ib()
class ModelWeightsTypeError(TypeError):
"""`TypeError` for incorrect container of model weights."""
class ClientDataTypeError(TypeError):
"""`TypeError` for incorrect type of client data."""
class ClientResultTypeError(TypeError):
"""`TypeError` for incorrect structure of result of client work."""
class ClientWorkProcess(measured_process.MeasuredProcess):
"""A stateful process capturing work at clients during learning.
Client work encapsulates the main work performed by clinets as part of a
federated learning algorithm, such as several steps of gradient descent based
on the client data, and returning a update to the initial model weights.
A `ClientWorkProcess` is a `tff.templates.MeasuredProcess` that formalizes the
type signature of `initialize_fn` and `next_fn` for the core work performed by
clients in a learning process.
The `initialize_fn` and `next_fn` must have the following type signatures:
```
- initialize_fn: ( -> S@SERVER)
- next_fn: (<S@SERVER,
ModelWeights(TRAINABLE, NON_TRAINABLE)@CLIENTS,
DATA@CLIENTS>
->
<state=S@SERVER,
result=ClientResult(TRAINABLE, W)@CLIENTS,
measurements=M@SERVER>)
```
with `W` and `M` being arbitrary types not dependent on other types here.
`ClientWorkProcess` requires `next_fn` with a second and a third input
argument, which are both values placed at `CLIENTS`. The second argument is
initial model weights to be used for the work to be performed by clients. It
must be of a type matching `tff.learning.ModelWeights`, for these to be
assignable to the weights of a `tff.learning.Model`. The third argument must
be a `tff.SequenceType` representing the data available at clients.
The `result` field of the returned `tff.templates.MeasuredProcessOutput` must
be placed at `CLIENTS`, and be of type matching `ClientResult`, of which the
`update` field represents the update to the trainable model weights, and
`update_weight` represents the weight to be used for weighted aggregation of
the updates.
The `measurements` field of the returned `tff.templates.MeasuredProcessOutput`
must be placed at `SERVER`. Thus, implementation of this process must include
aggregation of any metrics computed during training. TODO(b/190334722):
Confirm this aspect, or change it.
"""
def __init__(self, initialize_fn, next_fn):
super().__init__(initialize_fn, next_fn, next_is_multi_arg=True)
if not initialize_fn.type_signature.result.is_federated():
raise errors.TemplateNotFederatedError(
f'Provided `initialize_fn` must return a federated type, but found '
f'return type:\n{initialize_fn.type_signature.result}\nTip: If you '
f'see a collection of federated types, try wrapping the returned '
f'value in `tff.federated_zip` before returning.')
next_types = (
structure.flatten(next_fn.type_signature.parameter) +
structure.flatten(next_fn.type_signature.result))
if not all([t.is_federated() for t in next_types]):
offending_types = '\n- '.join(
[t for t in next_types if not t.is_federated()])
raise errors.TemplateNotFederatedError(
f'Provided `next_fn` must be a *federated* computation, that is, '
f'operate on `tff.FederatedType`s, but found\n'
f'next_fn with type signature:\n{next_fn.type_signature}\n'
f'The non-federated types are:\n {offending_types}.')
if initialize_fn.type_signature.result.placement != placements.SERVER:
raise errors.TemplatePlacementError(
f'The state controlled by a `ClientWorkProcess` must be placed at '
f'the SERVER, but found type: {initialize_fn.type_signature.result}.')
# Note that state of next_fn being placed at SERVER is now ensured by the
# assertions in base class which would otherwise raise
# TemplateStateNotAssignableError.
next_fn_param = next_fn.type_signature.parameter
if not next_fn_param.is_struct():
raise errors.TemplateNextFnNumArgsError(
f'The `next_fn` must have exactly three input arguments, but found '
f'the following input type which is not a Struct: {next_fn_param}.')
if len(next_fn_param) != 3:
next_param_str = '\n- '.join([str(t) for t in next_fn_param])
raise errors.TemplateNextFnNumArgsError(
f'The `next_fn` must have exactly three input arguments, but found '
f'{len(next_fn_param)} input arguments:\n{next_param_str}')
model_weights_param = next_fn_param[1]
client_data_param = next_fn_param[2]
if model_weights_param.placement != placements.CLIENTS:
raise errors.TemplatePlacementError(
f'The second input argument of `next_fn` must be placed at CLIENTS '
f'but found {model_weights_param}.')
if (not model_weights_param.member.is_struct_with_python() or
model_weights_param.member.python_container
is not model_utils.ModelWeights):
raise ModelWeightsTypeError(
f'The second input argument of `next_fn` must have the '
f'`tff.learning.ModelWeights` container but found '
f'{model_weights_param}')
if client_data_param.placement != placements.CLIENTS:
raise errors.TemplatePlacementError(
f'The third input argument of `next_fn` must be placed at CLIENTS '
f'but found {client_data_param}.')
if not client_data_param.member.is_sequence():
raise ClientDataTypeError(
f'The third input argument of `next_fn` must be a sequence but found '
f'{client_data_param}.')
next_fn_result = next_fn.type_signature.result
if (not next_fn_result.result.is_federated() or
next_fn_result.result.placement != placements.CLIENTS):
raise errors.TemplatePlacementError(
f'The "result" attribute of the return type of `next_fn` must be '
f'placed at CLIENTS, but found {next_fn_result.result}.')
if (not next_fn_result.result.member.is_struct_with_python() or
next_fn_result.result.member.python_container is not ClientResult):
raise ClientResultTypeError(
f'The "result" attribute of the return type of `next_fn` must have '
f'the `ClientResult` container, but found {next_fn_result.result}.')
if not model_weights_param.member.trainable.is_assignable_from(
next_fn_result.result.member.update):
raise ClientResultTypeError(
f'The "update" attribute of returned `ClientResult` must match '
f'the "trainable" attribute of the `tff.learning.ModelWeights` '
f'expected as second input argument of the `next_fn`. Found:\n'
f'Second input argument: {model_weights_param.member.trainable}\n'
f'Update attribute of result: {next_fn_result.result.member.update}.')
if next_fn_result.measurements.placement != placements.SERVER:
raise errors.TemplatePlacementError(
f'The "measurements" attribute of return type of `next_fn` must be '
f'placed at SERVER, but found {next_fn_result.measurements}.')
# TODO(b/190334722): Add model metric handling and aggregation and report it in
# the measurement field of the output.
def build_model_delta_client_work(model_fn: Callable[[], model_lib.Model],
optimizer: optimizer_base.Optimizer):
"""Builds `ClientWorkProcess` returning change to the trained model weights.
The created `ClientWorkProcess` expects model weights that can be assigned to
the model created by `model_fn`, and will apply `optimizer` to optimize the
model using the client data. The returned `ClientResult` will contain the
difference between the trained and initial trainable model weights (aka
"model delta") as update, and the update_weight will be the number of examples
used in training. The type signature for client data is derived from the input
spec of the model.
This method is the recommended starting point for forking a custom
implementation of the `ClientWorkProcess`.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
optimizer: A `tff.learning.optimizers.Optimizer`.
Returns:
A `ClientWorkProcess`.
"""
py_typecheck.check_callable(model_fn)
# TODO(b/190334722): Include support for Keras optimizers via
# tff.learning.optimizers.KerasOptimizer when ready.
py_typecheck.check_type(optimizer, optimizer_base.Optimizer)
weights_type, data_type = _weights_and_data_type_from_model_fn(model_fn)
# TODO(b/161529310): We flatten and convert the trainable specs to tuple, as
# "for batch in data:" pattern would try to stack the tensors in a list.
optimizer_tensor_specs = _flat_tuple(
type_conversions.type_to_tf_tensor_specs(weights_type.trainable))
@computations.tf_computation(weights_type, data_type)
@tf.function
def local_update(initial_weights, data):
# TODO(b/190334722): Restructure so that model_fn only needs to be invoked
# once.
with tf.init_scope():
model = model_fn()
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda weight, value: weight.assign(value),
model_weights, initial_weights)
num_examples = tf.constant(0, tf.int32)
optimizer_state = optimizer.initialize(optimizer_tensor_specs)
# TODO(b/161529310): Different from creating an iterator using iter(data).
for batch in data:
with tf.GradientTape() as tape:
outputs = model.forward_pass(batch)
gradients = tape.gradient(outputs.loss, model_weights.trainable)
num_examples += tf.shape(outputs.predictions)[0]
optimizer_state, updated_weights = optimizer.next(
optimizer_state, _flat_tuple(model_weights.trainable),
_flat_tuple(gradients))
updated_weights = tf.nest.pack_sequence_as(model_weights.trainable,
updated_weights)
tf.nest.map_structure(lambda weight, value: weight.assign(value),
model_weights.trainable, updated_weights)
model_delta = tf.nest.map_structure(lambda x, y: x - y,
initial_weights.trainable,
model_weights.trainable)
return ClientResult(
update=model_delta, update_weight=tf.cast(num_examples, tf.float32))
@computations.federated_computation
def init_fn():
return intrinsics.federated_value((), placements.SERVER)
@computations.federated_computation(
init_fn.type_signature.result, computation_types.at_clients(weights_type),
computation_types.at_clients(data_type))
def next_fn(state, weights, client_data):
client_result = intrinsics.federated_map(local_update,
(weights, client_data))
empty_measurements = intrinsics.federated_value((), placements.SERVER)
return measured_process.MeasuredProcessOutput(state, client_result,
empty_measurements)
return ClientWorkProcess(init_fn, next_fn)
def _weights_and_data_type_from_model_fn(model_fn):
with tf.Graph().as_default():
# Wrap model construction in a graph to avoid polluting the global context
# with variables created for this model.
model = model_fn()
data_type = computation_types.SequenceType(model.input_spec)
model_weights_type = model_utils.weights_type_from_model(model)
return model_weights_type, data_type
def _flat_tuple(struct):
return tuple(tf.nest.flatten(struct))
| 47.248276
| 87
| 0.728945
|
2bcf1ca1dd80c97589d82f7f98dbe0f54e5fed39
| 1,758
|
py
|
Python
|
package/awesome_panel/application/models/theme.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
package/awesome_panel/application/models/theme.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
package/awesome_panel/application/models/theme.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
"""In this module we implement the Theme Model
Use the Theme model to
- Provide theming to your Template and Application
- implements a custom subclass Theme
"""
import holoviews as hv
import param
from bokeh.themes.theme import Theme as BokehTheme
from holoviews import Cycle
from awesome_panel.application import assets
hv.extension("bokeh")
_COLOR_CYCLE = tuple(Cycle.default_cycles["Category20"])
class Theme(param.Parameterized):
"""The Theme model provides parameters and functionality like links to spinner images and css.
- Provide theming to your Template and Application
- implement a custom subclass Theme
"""
spinner_static_url = param.String(assets.SPINNER_PANEL_STATIC_LIGHT_400_340)
spinner_url = param.String(assets.SPINNER_PANEL_BREATH_LIGHT_400_340)
css = param.String()
color_cycle = param.Tuple(_COLOR_CYCLE)
bokeh_disable_logo = param.Boolean(True)
bokeh_theme_json = param.Dict()
@property
def holoviews_color_cycle(self) -> Cycle:
"""Returns the HoloViews color Cycle to be used when plotting with the Theme as the active
Theme.
Returns:
Cycle: A HoloViews color Cyle.
"""
if self.color_cycle:
color_cycle = self.color_cycle
else:
color_cycle = _COLOR_CYCLE
return Cycle(list(color_cycle))
@property
def bokeh_theme(self) -> BokehTheme:
"""Returns the Bokeh Theme to be used when plotting with the Theme as the active Theme.
Returns:
BokehTheme: A Bokeh Theme
"""
if self.bokeh_theme_json:
return BokehTheme(json=self.bokeh_theme_json)
return BokehTheme(json={})
| 29.79661
| 99
| 0.680887
|
7a768f762c57490b01c8ae32d7ce418b5cb8bb7b
| 8,174
|
py
|
Python
|
python/GafferUI/ScriptWindow.py
|
Tuftux/gaffer
|
5acaf7cbfadbae841dc06854121ca85dcc5c338c
|
[
"BSD-3-Clause"
] | 31
|
2017-07-10T10:02:07.000Z
|
2022-02-08T13:54:14.000Z
|
python/GafferUI/ScriptWindow.py
|
Tuftux/gaffer
|
5acaf7cbfadbae841dc06854121ca85dcc5c338c
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/ScriptWindow.py
|
Tuftux/gaffer
|
5acaf7cbfadbae841dc06854121ca85dcc5c338c
|
[
"BSD-3-Clause"
] | 3
|
2017-11-04T15:30:11.000Z
|
2018-09-25T18:36:11.000Z
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import IECore
import Gaffer
import GafferUI
class ScriptWindow( GafferUI.Window ) :
def __init__( self, script, **kw ) :
self.__titleChangedSignal = GafferUI.WidgetEventSignal()
GafferUI.Window.__init__( self, **kw )
self.__script = script
self.__titleBehaviour = _WindowTitleBehaviour( self, script )
self.__listContainer = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 0 )
menuDefinition = self.menuDefinition( script.applicationRoot() ) if script.applicationRoot() else IECore.MenuDefinition()
self.__listContainer.append( GafferUI.MenuBar( menuDefinition ) )
applicationRoot = self.__script.ancestor( Gaffer.ApplicationRoot )
layouts = GafferUI.Layouts.acquire( applicationRoot ) if applicationRoot is not None else None
if layouts is not None :
self.setLayout( layouts.createDefault( script ) )
else :
self.setLayout( GafferUI.CompoundEditor( script ) )
self.setChild( self.__listContainer )
self.closedSignal().connect( Gaffer.WeakMethod( self.__closed ), scoped = False )
ScriptWindow.__instances.append( weakref.ref( self ) )
def menuBar( self ) :
return self.__listContainer[0]
def scriptNode( self ) :
return self.__script
def setLayout( self, compoundEditor ) :
if len( self.__listContainer ) > 1 :
del self.__listContainer[1]
assert( compoundEditor.scriptNode().isSame( self.scriptNode() ) )
self.__listContainer.append( compoundEditor, expand=True )
def getLayout( self ) :
return self.__listContainer[1]
# Calling this will disable automatic title updates when the script state
# changes name/dirty state.
def setTitle( self, title ) :
self.__titleBehaviour = None
self._setTitle( title )
def _setTitle( self, title ) :
GafferUI.Window.setTitle( self, title )
self.__titleChangedSignal( self, title )
def titleChangedSignal( self ) :
return self.__titleChangedSignal
def _acceptsClose( self ) :
if not self.__script["unsavedChanges"].getValue() :
return True
f = self.__script["fileName"].getValue()
f = f.rpartition( "/" )[2] if f else "untitled"
dialogue = GafferUI.ConfirmationDialogue(
"Discard Unsaved Changes?",
"The file %s has unsaved changes. Do you want to discard them?" % f,
confirmLabel = "Discard"
)
return dialogue.waitForConfirmation( parentWindow=self )
def __closed( self, widget ) :
scriptParent = self.__script.parent()
if scriptParent is not None :
scriptParent.removeChild( self.__script )
__instances = [] # weak references to all instances - used by acquire()
## Returns the ScriptWindow for the specified script, creating one
# if necessary.
@staticmethod
def acquire( script, createIfNecessary=True ) :
for w in ScriptWindow.__instances :
scriptWindow = w()
if scriptWindow is not None and scriptWindow.scriptNode().isSame( script ) :
return scriptWindow
return ScriptWindow( script ) if createIfNecessary else None
## Returns an IECore.MenuDefinition which is used to define the menu bars for all ScriptWindows
# created as part of the specified application. This can be edited at any time to modify subsequently
# created ScriptWindows - typically editing would be done as part of gaffer startup.
@staticmethod
def menuDefinition( applicationOrApplicationRoot ) :
if isinstance( applicationOrApplicationRoot, Gaffer.Application ) :
applicationRoot = applicationOrApplicationRoot.root()
else :
assert( isinstance( applicationOrApplicationRoot, Gaffer.ApplicationRoot ) )
applicationRoot = applicationOrApplicationRoot
menuDefinition = getattr( applicationRoot, "_scriptWindowMenuDefinition", None )
if menuDefinition :
return menuDefinition
menuDefinition = IECore.MenuDefinition()
applicationRoot._scriptWindowMenuDefinition = menuDefinition
return menuDefinition
## This function provides the top level functionality for instantiating
# the UI. Once called, new ScriptWindows will be instantiated for each
# script added to the application, and EventLoop.mainEventLoop().stop() will
# be called when the last script is removed.
@classmethod
def connect( cls, applicationRoot ) :
applicationRoot["scripts"].childAddedSignal().connect( 0, ScriptWindow.__scriptAdded, scoped = False )
applicationRoot["scripts"].childRemovedSignal().connect( ScriptWindow.__staticScriptRemoved, scoped = False )
__automaticallyCreatedInstances = [] # strong references to instances made by __scriptAdded()
@staticmethod
def __scriptAdded( scriptContainer, script ) :
w = ScriptWindow( script )
w.setVisible( True )
w.getLayout().restoreWindowState()
ScriptWindow.__automaticallyCreatedInstances.append( w )
@staticmethod
def __staticScriptRemoved( scriptContainer, script ) :
for w in ScriptWindow.__automaticallyCreatedInstances :
if w.scriptNode().isSame( script ) :
ScriptWindow.__automaticallyCreatedInstances.remove( w )
if not len( scriptContainer.children() ) and GafferUI.EventLoop.mainEventLoop().running() :
GafferUI.EventLoop.mainEventLoop().stop()
class _WindowTitleBehaviour :
def __init__( self, window, script ) :
self.__window = weakref.ref( window )
self.__script = weakref.ref( script )
self.__scriptPlugSetConnection = script.plugSetSignal().connect( Gaffer.WeakMethod( self.__scriptPlugChanged ) )
self.__metadataChangedConnection = Gaffer.Metadata.nodeValueChangedSignal().connect( Gaffer.WeakMethod( self.__metadataChanged ) )
self.__updateTitle()
def __updateTitle( self ) :
w = self.__window()
if not w :
return
f = self.__script()["fileName"].getValue()
if not f :
f = "untitled"
d = ""
else :
d, n, f = f.rpartition( "/" )
d = " - " + d
u = " *" if self.__script()["unsavedChanges"].getValue() else ""
ro = " (read only) " if Gaffer.MetadataAlgo.readOnly( self.__script() ) else ""
w._setTitle( "Gaffer %s : %s%s%s%s" % ( Gaffer.About.versionString(), f, ro, u, d ) )
def __scriptPlugChanged( self, plug ) :
if plug.isSame( self.__script()["fileName"] ) or plug.isSame( self.__script()["unsavedChanges"] ) :
self.__updateTitle()
def __metadataChanged( self, nodeTypeId, key, node ) :
if Gaffer.MetadataAlgo.readOnlyAffectedByChange( self.__script(), nodeTypeId, key, node ) :
self.__updateTitle()
| 34.489451
| 132
| 0.729019
|
7cd8a19e5931c2c6ec4697f9e35b8b07916322f9
| 9,362
|
py
|
Python
|
python-examples/gui/line-balancing/backend/engine/interface/custom_class_view.py
|
ggsdc/cornflow-examples
|
05b4a87eb4dc2389f00f8a9d7f766aef942b9da8
|
[
"MIT"
] | null | null | null |
python-examples/gui/line-balancing/backend/engine/interface/custom_class_view.py
|
ggsdc/cornflow-examples
|
05b4a87eb4dc2389f00f8a9d7f766aef942b9da8
|
[
"MIT"
] | 2
|
2020-10-05T15:08:09.000Z
|
2021-03-19T23:03:36.000Z
|
python-examples/gui/line-balancing/backend/engine/interface/custom_class_view.py
|
ggsdc/cornflow-examples
|
05b4a87eb4dc2389f00f8a9d7f766aef942b9da8
|
[
"MIT"
] | 1
|
2020-10-03T20:08:14.000Z
|
2020-10-03T20:08:14.000Z
|
from typing import List
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from rest_framework import status
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.views import APIView
class CustomApiView(APIView):
"""
Class to handle the API endpoints
"""
def get_list(self, request, queryset, model_serializer):
"""
Function that gives back all the results that match the queryset
:param request: the request done to the API, needed to check the auth.
:param queryset: the queryset object that performs the query.
:param model_serializer: the serializer to serialize the json result file.
:return: a response to the request with the results.
"""
data = queryset
if data.count() > 0:
serializer = model_serializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
def get_detail(self, request, model, model_serializer, key, value):
"""
Function that gives back the result of a single row on the database identified by the primary key value.
:param request: the request done to the API, needed to check the auth.
:param model: the database model that is going to be searched.
:param model_serializer: the serializer to serialize the json result file.
:param key: the primary key column in the database table.
:param value: the value for the primary key that has to be searched.
:return: a response to the request with the results.
"""
try:
instance = getattr(model.objects, 'get')([key, value])
except model.DoesNotExist:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
serializer = model_serializer(instance)
return Response(serializer.data, status=status.HTTP_200_OK)
def post_list(self, request, queryset, model_serializer, date_fields: List[str] = None):
"""
Function that allows to POST new data to the database.
:param request: the request done to the API, needed to check the auth.
:param queryset: the queryset object that performs the query and gives back the data again.
:param model_serializer: the serializer to serialize the json result file.
:param date_fields: list with the names of the fields that are dates that come from the frontend.
:return: a response to the request with the results.
"""
data = JSONParser().parse(request)
serializer = model_serializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put_list(self, request, model, queryset, model_serializer, key: str = 'id', date_fields: List[str] = None):
"""
Function that allows to perform a PUT to an endpoint without telling the pk value to the API.
:param request: the request done to the API, needed to check the auth.
:param model: the database model that is going to be searched.
:param queryset: the queryset object that performs the query and gives back the data again.
:param model_serializer: the serializer to serialize the json result file.
:param key: the column name for the primary key.
:param date_fields: list with the names of the fields that are dates that come from the frontend.
:return: a response to the request with the results.
"""
data = JSONParser().parse(request)
try:
instance = getattr(model.objects, 'get')([key, data[key]])
except model.DoesNotExist:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
serializer = model_serializer(instance, data)
if serializer.is_valid():
serializer.update(instance, serializer.validated_data)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put_detail(self, request, model, queryset, model_serializer, key, value, date_fields: List[str] = None):
"""
Function that allows to perform a PUT given a primary key value.
:param request: the request done to the API, needed to check the auth.
:param model: the database model that is going to be searched.
:param queryset: the queryset object that performs the query and gives back the data again.
:param model_serializer: the serializer to serialize the json result file.
:param key: the column name for the primary key.
:param value: the value for the primary key that has to be searched.
:param date_fields: list with the names of the fields that are dates that come from the frontend.
:return: a response to the request with the results.
"""
data = JSONParser().parse(request)
try:
instance = getattr(model.objects, 'get')([key, value])
except model.DoesNotExist:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
serializer = model_serializer(instance, data=data)
if serializer.is_valid():
serializer.update(instance, serializer.validated_data)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete_detail(self, request, model, key, value):
"""
Function to perform a DELETE given a primary key value
:param request: the request done to the API, needed to check the auth.
:param model: the database model that is going to be searched.
:param key: the column name for the primary key.
:param value: the value for the primary key that has to be searched.
:return:
"""
try:
instance = getattr(model.objects, 'get')([key, value])
except model.DoesNotExist:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
if instance is None:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
instance.delete()
return Response({'message': 'Data has been deleted'}, status=status.HTTP_200_OK)
@staticmethod
def get_page(queryset, page, num):
"""
Function to get the objects in each page given a query, a page number and a number of elements per page
:param queryset: the queryset object that performs the query and gives back the data again.
:param page: the page number that has to be given back.
:param num: the number of elements per page.
:return: the objects read and an error code
"""
error = None
try:
n_rows = queryset.count()
except Exception as e:
print("It is a raw query: ", e)
n_rows = len(queryset)
if num > n_rows or num <= 0:
num = n_rows
try:
paginator = Paginator(queryset, num, allow_empty_first_page=True)
except Exception as e:
print("It is a raw query: ", e)
paginator = Paginator(queryset, 10, allow_empty_first_page=True)
try:
queryset_page = paginator.page(page)
except PageNotAnInteger:
queryset_page = paginator.page(1)
except EmptyPage:
queryset_page = paginator.page(paginator.num_pages)
except ZeroDivisionError:
queryset_page = queryset
error = -1
return queryset_page, error
def get_list_page(self, request, queryset, model_serializer, page, num):
"""
Function to return the results in pages given a page number and a number of elements per page
:param request: the request done to the API, needed to check the auth.
:param queryset: the queryset object that performs the query and gives back the data again.
:param model_serializer: the serializer to serialize the json result file.
:param page: the number of the page that needs to be given back.
:param num: the number of elements in each page.
:return: a response to the request with the results.
"""
queryset_page, error = self.get_page(queryset, page, num)
if error == -1:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
data = queryset_page.object_list
try:
n_rows = data.count()
except Exception as e:
print("It is a raw query: ", e)
n_rows = len(data)
if n_rows > 0:
serializer = model_serializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({'message': 'No available data'}, status=status.HTTP_204_NO_CONTENT)
| 44.794258
| 116
| 0.657872
|
dadafacd275a9258feac5beee573c0ce70c0c278
| 2,066
|
py
|
Python
|
vaccine.py
|
erik06/CVS-covid-vaccine-checker
|
d13deabbf47bfc690d530e6d7d14026ca473c6b0
|
[
"Apache-2.0"
] | null | null | null |
vaccine.py
|
erik06/CVS-covid-vaccine-checker
|
d13deabbf47bfc690d530e6d7d14026ca473c6b0
|
[
"Apache-2.0"
] | null | null | null |
vaccine.py
|
erik06/CVS-covid-vaccine-checker
|
d13deabbf47bfc690d530e6d7d14026ca473c6b0
|
[
"Apache-2.0"
] | null | null | null |
"""
This is a python script that requires you have python installed, or in a cloud environment.
This script scrapes the CVS website looking for vaccine appointments in the cities you list.
To update for your area, update the locations marked with ### below.
If you receive an error that says something is not install, type
pip install beepy
in your terminal.
Source:
https://python.plainenglish.io/how-i-built-a-cvs-vaccine-appointment-availability-checker-in-python-6beb379549e4
"""
import requests
import time
import beepy
import subprocess
def findAVaccine():
hours_to_run = 24
# Update this to set the number of hours you want the script to run.
max_time = time.time() + hours_to_run * 60 * 60
while time.time() < max_time:
state = "IL" ###Update with your state abbreviation. Be sure to use all CAPS, e.g. RI
url = "https://www.cvs.com/immunizations/covid-19-vaccine"
response = requests.get(
"https://www.cvs.com/immunizations/covid-19-vaccine.vaccine-status.{}.json?vaccineinfo".format(
state.lower()
),
headers={"Referer": url},
)
payload = response.json()
mappings = {}
for item in payload["responsePayloadData"]["data"][state]:
mappings[item.get("city")] = item.get("status")
print(time.ctime())
cities = ["CHICAGO"] ###Update with your cities nearby
for city in cities:
print(city, mappings[city])
for key in mappings.keys():
if (key in cities) and (mappings[key] != "Fully Booked"):
subprocess.Popen(["open", url])
for _ in range(20): # repeat sound 20 times
beepy.beep(sound="coin")
break
else:
pass
time.sleep(60)
# This runs every 60 seconds. Update here if you'd like it to go every 5min (300sec)
print("\n")
findAVaccine() ###this final line runs the function. Your terminal will output the cities every 60seconds
| 30.835821
| 112
| 0.626331
|
4dd923657434f5419bf8f2722f0dc5f5fb100c42
| 21,885
|
py
|
Python
|
demo.py
|
kevincao91/kevin.ai.vehicle_detection
|
fccf0ebb778ff408bc5990ab29b90ee7cb9d97ad
|
[
"MIT"
] | 2
|
2019-04-24T18:47:45.000Z
|
2020-07-16T08:08:57.000Z
|
demo.py
|
kevincao91/kevin.ai.vehicle_detection
|
fccf0ebb778ff408bc5990ab29b90ee7cb9d97ad
|
[
"MIT"
] | null | null | null |
demo.py
|
kevincao91/kevin.ai.vehicle_detection
|
fccf0ebb778ff408bc5990ab29b90ee7cb9d97ad
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# PyTorch Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Kevin Cao, based on code from Jianwei Yang
# --------------------------------------------------------
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from scipy.misc import imread
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.blob import im_list_to_blob
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import pdb
import pynvml
from custom_operations.custom_check import CustomChecker
from custom_operations.custom_show import vis_text_beautiful, vis_detections_beautiful
import glob
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='voc_car_2010', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/res18.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res18', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models',
default="models")
parser.add_argument('--image_dir', dest='image_dir',
help='directory to load images for demo',
default="images")
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=6, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=257, type=int)
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--webcam_num', dest='webcam_num',
help='webcam ID number',
default=-1, type=int)
parser.add_argument('--gpu_id', dest='gpu_id',
help='which gpu is used',
default=0, type=int)
# refine
parser.add_argument('--refine', dest='refine',
help='whether use refine anchor',
action='store_true')
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
def _get_avg(num_list, long=48):
"""average number input.
Arguments:
num_list (list): a list of number input
Returns:
num_avg (float): a float average number of input
"""
if not num_list:
return 0
if len(num_list) >= long:
num_avg = sum(num_list[-10:]) / 10
else:
num_avg = sum(num_list) / len(num_list)
return num_avg
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
# changed to use pytorch models
im_orig /= 255. # Convert range to [0,1]
pixel_means = [0.485, 0.456, 0.406]
im_orig -= pixel_means # Minus mean
pixel_stdens = [0.229, 0.224, 0.225]
im_orig /= pixel_stdens # divide by stddev
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
# for target_size in cfg.TEST.SCALES:
# im_scale = float(target_size) / float(im_size_min)
# # Prevent the biggest axis from being more than MAX_SIZE
# if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
# im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
# im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
# interpolation=cv2.INTER_LINEAR)
# im_scale_factors.append(im_scale)
# processed_ims.append(im)
# no need to change size
im_scale_factors.append(1.0)
processed_ims.append(im_orig)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
if __name__ == '__main__':
# 获取指令参数
args = parse_args()
# 显卡信息获取
# 这里的0是用的GPU id
gpu_id = args.gpu_id
pynvml.nvmlInit()
print("============= Driver Information =============")
driver_version = pynvml.nvmlSystemGetDriverVersion()
driver_version = str(driver_version, encoding='utf-8')
print("Driver Version:", driver_version) # 显卡驱动版本
device_Count = pynvml.nvmlDeviceGetCount() # 几块显卡
print("GPU Count:", device_Count)
gpu_info_list = []
for i in range(device_Count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpu_name = pynvml.nvmlDeviceGetName(handle) # name
gpu_name = str(gpu_name, encoding='utf-8')
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
mem_total = meminfo.total / (1024 * 1024 * 1024) # bit --> G
mem_used = meminfo.used / (1024 * 1024 * 1024) # bit --> G
gpu_info_list.append([i, gpu_name, mem_used, mem_total])
print("Device %d : %s %.6f G / %.6f G" % (i, gpu_name, mem_used, mem_total)) # 具体是什么显卡
print("==============================================")
print('Called with args:')
print(args)
print(torch.cuda.is_available())
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
pascal_classes = np.asarray(['__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor'])
else:
pascal_classes = np.asarray(['__background__',
'car'])
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.dataset:
if args.refine:
print('refine')
cfg_file_name = 'cfgs/{}_refine.yml'.format(args.dataset)
cfg_from_file(cfg_file_name)
else:
cfg_file_name = 'cfgs/{}.yml'.format(args.dataset)
cfg_from_file(cfg_file_name)
cfg.USE_GPU_NMS = args.cuda
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'res101':
fasterRCNN = resnet(pascal_classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res18':
fasterRCNN = resnet(pascal_classes, 18, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res34':
fasterRCNN = resnet(pascal_classes, 34, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(pascal_classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(pascal_classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % load_name)
if args.cuda > 0:
checkpoint = torch.load(load_name)
else:
checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# 显示显存
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
print('GPU meme used: %.10f G' % (meminfo.used / (1024 * 1024 * 1024)), 'after load the weight')
# pdb.set_trace()
print('load image')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda > 0:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
with torch.no_grad():
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda > 0:
cfg.CUDA = True
if args.cuda > 0:
fasterRCNN.cuda()
fasterRCNN.eval()
start = time.time()
max_per_image = 100
thresh = 0.05
vis = False
webcam_num = args.webcam_num
# Set up webcam or get image directories
if webcam_num >= 0:
cap = cv2.VideoCapture(webcam_num)
num_frame = 100000
num_images = num_frame
else:
imglist = os.listdir(args.image_dir)
# print(imglist)
load_file_path_list = []
for file_name in imglist:
load_file_path_list.append([int(file_name.split('.')[0]), file_name])
# sort
load_file_path_list.sort()
imglist_sorted = [file_name for idx, file_name in load_file_path_list]
print(imglist_sorted)
imglist = imglist_sorted
num_frame = len(imglist)
num_images = num_frame
print('Loaded Photo: {} images.'.format(num_images))
# 清理显卡缓存
torch.cuda.empty_cache()
# 显示显存
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
print('GPU meme used: %.10f G' % (meminfo.used / (1024 * 1024 * 1024)), 'before img itr')
# pynvml.nvmlShutdown()
# 区域检测器
custom_checker = CustomChecker('./cfgs/img_defult.txt')
total_time_list = [] # 预设的空值
detect_time_list = [] # 预设空值
nms_time_list = [] # 预设空值
while num_images > 0:
total_tic = time.time()
if webcam_num == -1:
num_images -= 1
# Get image from the webcam
if webcam_num >= 0:
if not cap.isOpened():
raise RuntimeError("Webcam could not open. Please check connection.")
ret, frame_bgr = cap.read()
im_bgr = np.array(frame_bgr)
# bgr -> rgb
im_rgb = im_bgr[:, :, ::-1]
# Load the demo image
else:
image_idx = num_frame - num_images -1
# print('image load: ', imglist[image_idx])
im_file = os.path.join(args.image_dir, imglist[image_idx])
# im = cv2.imread(im_file)
im_rgb = np.array(imread(im_file))
# rgb -> bgr
im_bgr = im_rgb[:, :, ::-1]
if len(im_rgb.shape) == 2:
im_rgb = im_rgb[:, :, np.newaxis]
im_rgb = np.concatenate((im_rgb, im_rgb, im_rgb), axis=2)
# in image is rgb
im_in = im_rgb
blobs, im_scales = _get_image_blob(im_in)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs
im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
im_data_pt = torch.from_numpy(im_blob)
im_data_pt = im_data_pt.permute(0, 3, 1, 2)
im_info_pt = torch.from_numpy(im_info_np)
im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
gt_boxes.resize_(1, 1, 5).zero_()
num_boxes.resize_(1).zero_()
# 显示显存
# handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
# meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
# print('GPU meme used: %.10f G' % (meminfo.used / (1024 * 1024 * 1024)), 'befor go in net', num_images+1)
# pdb.set_trace()
# 开始推理
det_tic = time.time()
with torch.no_grad():
rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, RCNN_loss_cls, RCNN_loss_bbox, rois_label = \
fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
# 显示显存
# handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
# meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
# print('GPU meme used: %.10f G' % (meminfo.used / (1024 * 1024 * 1024)), 'after go in net', num_images+1)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4)
else:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= im_scales[0]
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
detect_time_list.append(detect_time)
# 开始nms
nms_tic = time.time()
# test
# im2show = constraint_check(im_bgr, pascal_classes, scores, pred_boxes, thresh,
# class_agnostic=args.class_agnostic)
all_cls_dets = []
for j in range(1, len(pascal_classes)):
inds = torch.nonzero(scores[:, j] > thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:, j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
# keep = nms(cls_dets, cfg.TEST.NMS, force_cpu=not cfg.USE_GPU_NMS)
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
#
all_cls_dets.append(cls_dets.cpu().numpy())
else:
all_cls_dets.append([])
nms_toc = time.time()
nms_time = nms_toc - nms_tic
nms_time_list.append(nms_time)
# 绘制图形与文字
# plot box and label
im2show = np.copy(im_bgr)
# thresh check // regional check // identify check // path predict
all_cls_dets = custom_checker.thresh_check(all_cls_dets, thresh=0.95)
im2show, all_cls_dets = custom_checker.regional_check(im2show, all_cls_dets)
im2show, all_cls_dets, all_cls_labels, all_cls_speeds = custom_checker.identify_check(im2show, all_cls_dets)
im2show = custom_checker.path_predict(im2show)
im2show = custom_checker.count_check(im2show)
# plot box and label
for j in range(1, len(pascal_classes)):
if len(all_cls_dets[j-1]): # no value check
cls_dets = all_cls_dets[j-1]
cls_labels = all_cls_labels[j-1]
cls_speeds = all_cls_speeds[j-1]
im2show = vis_detections_beautiful(im2show, pascal_classes[j], cls_dets, cls_labels=cls_labels, cls_speeds=cls_speeds)
# plot string
# model info
model_name = args.net
file_name = load_name
# gpu info
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
mem_used = meminfo.used / (1024 * 1024 * 1024) # bit --> G
# average calculate
detect_time_avg = _get_avg(detect_time_list)
nms_time_avg = _get_avg(nms_time_list)
if total_time_list:
total_time = total_time_list[-1] # 取上一帧的总时间,这帧显示,如果是第一张就是预设时间
total_time_avg = _get_avg(total_time_list)
else:
total_time = 0.888 # any no-zero value
total_time_avg = 0.888
# fps calculate
frame_rate = 1 / total_time
frame_rate_avg = 1 / total_time_avg
# need time calculate
need_time = num_images / frame_rate
need_time_avg = num_images / frame_rate_avg
im2show = vis_text_beautiful(im2show, [gpu_name, mem_used, mem_total, model_name, file_name, detect_time_avg,
nms_time_avg, total_time_avg, frame_rate_avg])
if webcam_num >= 0:
cv2.imshow('frame', cv2.resize(im2show, None, fx=1.0, fy=1.0))
if cv2.waitKey(10) & 0xFF == ord('q'):
break
else:
if vis:
cv2.imshow('frame', cv2.resize(im2show, None, fx=1.0, fy=1.0))
if cv2.waitKey(10) & 0xFF == ord('q'):
break
result_path = os.path.join(args.image_dir, imglist[image_idx][:-4] + "_det.jpg")
cv2.imwrite(result_path, im2show)
# print sys
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s {:.3f}s fps: {:.3f} Hz need_time: {:.3f}s \r'
.format(num_images + 1, num_frame, detect_time_avg, nms_time_avg, total_time_avg, frame_rate_avg,
need_time_avg))
sys.stdout.flush()
total_toc = time.time()
total_time = total_toc - total_tic
total_time_list.append(total_time)
# 清理显卡缓存
torch.cuda.empty_cache()
# 显示显存
# handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
# meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
# print('GPU meme used: %.10f G' % (meminfo.used / (1024 * 1024 * 1024)), 'after empty cache')
# clean worksapce
pynvml.nvmlShutdown()
if webcam_num >= 0:
cap.release()
cv2.destroyAllWindows()
print('All Have Done !')
| 39.150268
| 134
| 0.588714
|
93282111364942e2989191edf942adc0379d49a4
| 5,200
|
py
|
Python
|
libp2p/crypto/pb/crypto_pb2.py
|
g-r-a-n-t/py-libp2p
|
36a4a9150dcc53b42315b5c6868fccde5083963b
|
[
"Apache-2.0",
"MIT"
] | 315
|
2019-02-13T01:29:09.000Z
|
2022-03-28T13:44:07.000Z
|
libp2p/crypto/pb/crypto_pb2.py
|
pipermerriam/py-libp2p
|
379a157d6b67e86a616b2458af519bbe5fb26a51
|
[
"Apache-2.0",
"MIT"
] | 249
|
2019-02-22T05:00:07.000Z
|
2022-03-29T16:30:46.000Z
|
libp2p/crypto/pb/crypto_pb2.py
|
ralexstokes/py-libp2p
|
5144ab82894623969cb17baf0d4c64bd0a274068
|
[
"Apache-2.0",
"MIT"
] | 77
|
2019-02-24T19:45:17.000Z
|
2022-03-30T03:20:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: libp2p/crypto/pb/crypto.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='libp2p/crypto/pb/crypto.proto',
package='crypto.pb',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x1dlibp2p/crypto/pb/crypto.proto\x12\tcrypto.pb\"?\n\tPublicKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c\"@\n\nPrivateKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c*9\n\x07KeyType\x12\x07\n\x03RSA\x10\x00\x12\x0b\n\x07\x45\x64\x32\x35\x35\x31\x39\x10\x01\x12\r\n\tSecp256k1\x10\x02\x12\t\n\x05\x45\x43\x44SA\x10\x03')
)
_KEYTYPE = _descriptor.EnumDescriptor(
name='KeyType',
full_name='crypto.pb.KeyType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RSA', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Ed25519', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Secp256k1', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ECDSA', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=175,
serialized_end=232,
)
_sym_db.RegisterEnumDescriptor(_KEYTYPE)
KeyType = enum_type_wrapper.EnumTypeWrapper(_KEYTYPE)
RSA = 0
Ed25519 = 1
Secp256k1 = 2
ECDSA = 3
_PUBLICKEY = _descriptor.Descriptor(
name='PublicKey',
full_name='crypto.pb.PublicKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key_type', full_name='crypto.pb.PublicKey.key_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='crypto.pb.PublicKey.data', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=107,
)
_PRIVATEKEY = _descriptor.Descriptor(
name='PrivateKey',
full_name='crypto.pb.PrivateKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key_type', full_name='crypto.pb.PrivateKey.key_type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='crypto.pb.PrivateKey.data', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=173,
)
_PUBLICKEY.fields_by_name['key_type'].enum_type = _KEYTYPE
_PRIVATEKEY.fields_by_name['key_type'].enum_type = _KEYTYPE
DESCRIPTOR.message_types_by_name['PublicKey'] = _PUBLICKEY
DESCRIPTOR.message_types_by_name['PrivateKey'] = _PRIVATEKEY
DESCRIPTOR.enum_types_by_name['KeyType'] = _KEYTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PublicKey = _reflection.GeneratedProtocolMessageType('PublicKey', (_message.Message,), {
'DESCRIPTOR' : _PUBLICKEY,
'__module__' : 'libp2p.crypto.pb.crypto_pb2'
# @@protoc_insertion_point(class_scope:crypto.pb.PublicKey)
})
_sym_db.RegisterMessage(PublicKey)
PrivateKey = _reflection.GeneratedProtocolMessageType('PrivateKey', (_message.Message,), {
'DESCRIPTOR' : _PRIVATEKEY,
'__module__' : 'libp2p.crypto.pb.crypto_pb2'
# @@protoc_insertion_point(class_scope:crypto.pb.PrivateKey)
})
_sym_db.RegisterMessage(PrivateKey)
# @@protoc_insertion_point(module_scope)
| 31.90184
| 466
| 0.736538
|
a20721822c56f15892aa3418261f0c2a69b1e0f5
| 7,880
|
py
|
Python
|
formal_solver.py
|
Goobley/Lightspinner
|
6214e9a3027f8a5aea01038a1524ebff13563258
|
[
"MIT"
] | 6
|
2020-02-13T11:20:38.000Z
|
2022-03-01T08:23:44.000Z
|
formal_solver.py
|
Goobley/Lightspinner
|
6214e9a3027f8a5aea01038a1524ebff13563258
|
[
"MIT"
] | null | null | null |
formal_solver.py
|
Goobley/Lightspinner
|
6214e9a3027f8a5aea01038a1524ebff13563258
|
[
"MIT"
] | 2
|
2020-04-10T02:09:59.000Z
|
2021-11-23T06:52:33.000Z
|
import numpy as np
from numba import njit
from dataclasses import dataclass
from utils import planck
@dataclass
class IPsi:
"""Holder for the I and PsiStar arrays in a single object.
"""
I: np.ndarray
PsiStar: np.ndarray
@njit
def w2(dtau):
"""Compute the analytic integration factors w0, w1 for the piecewise
linear short characteristics method.
Parameters
----------
dtau : float
Delta tau over the region for which the integration factors are
needed.
Returns
-------
w : np.ndarray
A 2 element array containing w0 and w1 -- these are calculated using
a Taylor series for small dtau, and set to constant values for large
dtau.
"""
w = np.empty(2)
if dtau < 5e-4:
w[0] = dtau * (1.0 - 0.5*dtau)
w[1] = dtau**2 * (0.5 - dtau / 3.0)
elif dtau > 50.0:
w[0] = 1.0
w[1] = 1.0
else:
expdt = np.exp(-dtau)
w[0] = 1.0 - expdt
w[1] = w[0] - dtau * expdt
return w
@njit
def piecewise_1d_impl(muz, toFrom, Istart, z, chi, S):
"""Formal solver core integration function.
Compute the one-dimensional (plane parallel) formal solution and
approximate Psi operator (diagonal) for the specified parameters up or
down one ray for the source function and opacity at a given frequency.
This function uses the piecewise linear short characteristics method --
see [HM pp. 404-408], Auer & Paletou (1994) A&A, 285, 675-686, and Kunasz
& Auer (1988) JQSRT, 39, 67-79. For the approximate operator see [HM pp.
436-440] and [RH91/92]. Also see the notes attached to this code. The
piecewise linear method is somewhat crude, and requires a densely grid in
tau. Due to the nature of assuming piecewise linear variations of the
source function between the points this method can overestimate
substantially in regions where the source function has upwards curvature
and underestimate similarly in regions where the curvature is downwards.
This is however the simplest formal solver to implement, and performs
well enough for many problems.
Parameters
----------
muz : float
The cosine of the ray to the normal of the atmospheric slabs.
toFrom : bool
Whether the ray is upgoing (towards the observer), or downgoing
(True/False respectively).
Istart : float
The incoming intensity at the boundary where the integration starts.
z : np.ndarray
The height grid (1D array).
chi : np.ndarray
The total opacity grid (1D array).
S : np.ndarray
The total source function grid (1D array).
Returns
-------
I : np.ndarray
The intensity at each point on the grid (1D array).
PsiStar : np.ndarray
The approximate Psi operator (diagonal of the Lambda operator) at
each point on the grid (1D array).
"""
Nspace = chi.shape[0]
# NOTE(cmo): Since a smaller mu "increases" the perceived thickness of the slab, the factor we need to use if 1/mu
zmu = 1.0 / muz
# NOTE(cmo): It is simplest to set up the looping criterea separately, with dk being the loop step
if toFrom:
# NOTE(cmo): Upgoing ray / to observer
dk = -1
kStart = Nspace - 1
kEnd = 0
else:
# NOTE(cmo): Downgoing ray / away from observer
dk = 1
kStart = 0
kEnd = Nspace - 1
# dtau_uw = average opacity * slab thickness
dtau_uw = 0.5 * (chi[kStart] + chi[kStart + dk]) * zmu * np.abs(z[kStart] - z[kStart + dk])
# NOTE(cmo): dS_uw = dS / dtau i.e. c_1 on slides. Direction is opposite to
# forward derivative in z as "increases" away from the point at which we're
# solving -- in all directions.
dS_uw = (S[kStart] - S[kStart + dk]) / dtau_uw
Iupw = Istart
I = np.zeros(Nspace)
LambdaStar = np.zeros(Nspace)
# NOTE(cmo): Initial point is equation to boundary condition
I[kStart] = Iupw
LambdaStar[kStart] = 0.0
for k in range(kStart + dk, kEnd, dk):
# NOTE(cmo): Get analytic integration terms
w = w2(dtau_uw)
# NOTE(cmo): Compute I and LambdaStar
# (1.0 - w[0]) = exp(-dtau) as w[0] as w[0] = 1 - exp(-dtau) and this saves us recomputing the exp
I[k] = Iupw * (1.0 - w[0]) + w[0] * S[k] + w[1] * dS_uw
LambdaStar[k] = w[0] - w[1] / dtau_uw
# NOTE(cmo): dtau_dw and dS_dw like uw for next iteration
dtau_dw = 0.5 * (chi[k] + chi[k+dk]) * zmu * np.abs(z[k] - z[k+dk])
dS_dw = (S[k] - S[k+dk]) / dtau_dw
# NOTE(cmo): Set values (Iupw, dS_uw, dtau_uw) for next iteration
Iupw = I[k]
dS_uw = dS_dw
dtau_uw = dtau_dw
# NOTE(cmo): Do final point (exactly the same in this linear scheme)
I[kEnd] = (1.0 - w[0]) * Iupw + w[0] * S[k] + w[1] * dS_uw
LambdaStar[kEnd] = w[0] - w[1] / dtau_uw
# NOTE(cmo): Correctly make PsiStar by dividing LambdaStar by chi
return I, LambdaStar / chi
def piecewise_linear_1d(atmos, mu, toFrom, wav, chi, S):
"""One-dimensional Piecewise linear formal solver
Compute the one-dimensional (plane parallel) formal solution and
approximate Psi operator (diagonal) for the given atmosphere up or
down one ray for the source function and opacity at a given frequency.
The radiation boundary conditions assume that the lower boundary is
thermalised and the upper has no incident radiation.
This function uses the piecewise linear short characteristics method --
see [HM pp. 404-408], Auer & Paletou (1994) A&A, 285, 675-686, and Kunasz
& Auer (1988) JQSRT, 39, 67-79. For the approximate operator see [HM pp.
436-440] and [RH91/92]. Also see the notes attached to this code. The
piecewise linear method is somewhat crude, and requires a densely grid in
tau. Due to the nature of assuming piecewise linear variations of the
source function between the points this method can overestimate
substantially in regions where the source function has upwards curvature
and underestimate similarly in regions where the curvature is downwards.
This is however the simplest formal solver to implement, and performs
well enough for many problems.
Parameters
----------
atmos : Atmosphere
The atmosphere object containing the stratification to compute the
formal solution through.
mu : int
The index of the ray to compute the formal solution for in atmos.muz.
toFrom : bool
Whether the ray is upgoing (towards the observer), or downgoing
(True/False respectively).
wav : float
The wavelength at which the formal solution is being computed (needed for
boundary conditions).
chi : np.ndarray
The total opacity for each depth point in the stratified
atmosphere(1D array).
S : np.ndarray
The total source function grid for each depth point in the stratified
atmosphere (1D array).
Returns
-------
IPsi
A dataclass containing the intensity approximate Psi operator at each
point in the atmosphere.
"""
zmu = 1.0 / atmos.muz[mu]
z = atmos.height
if toFrom:
dk = -1
kStart = atmos.Nspace - 1
kEnd = 0
else:
dk = 1
kStart = 0
kEnd = atmos.Nspace - 1
# NOTE(cmo): Set up supper simple boundary conditions -- thermalised for upgoing and zero for downgoing
if toFrom:
dtau_uw = zmu * (chi[kStart] + chi[kStart + dk]) * 0.5 * np.abs(z[kStart] - z[kStart + dk])
Bnu = planck(atmos.temperature[-2:], wav)
Iupw = Bnu[1] - (Bnu[0] - Bnu[1]) / dtau_uw
else:
Iupw = 0.0
I, PsiStar = piecewise_1d_impl(atmos.muz[mu], toFrom, Iupw, z, chi, S)
return IPsi(I, PsiStar)
| 37.169811
| 118
| 0.637056
|
5b0eb627151373b21a3cf00c4a576ddc7f7f7ea0
| 702
|
py
|
Python
|
parkings/tests/test_models.py
|
mingfeng/parkkihubi
|
c817f8e852c1caee969c4ad59f686bf7c1e94526
|
[
"MIT"
] | 1
|
2021-05-20T13:07:29.000Z
|
2021-05-20T13:07:29.000Z
|
parkings/tests/test_models.py
|
mingfeng/parkkihubi
|
c817f8e852c1caee969c4ad59f686bf7c1e94526
|
[
"MIT"
] | null | null | null |
parkings/tests/test_models.py
|
mingfeng/parkkihubi
|
c817f8e852c1caee969c4ad59f686bf7c1e94526
|
[
"MIT"
] | null | null | null |
import datetime
from django.contrib.gis.geos import Point
from django.utils.timezone import now
from parkings.models import Address, Operator, Parking
def test_address_instance_creation():
Address(city="city", postal_code="12345", street="street")
def test_operator_instance_creation():
Operator(name="name", user_id=1)
def test_parking_instance_creation():
Parking(
device_identifier="device_identifier",
location=Point(60.193609, 24.951394),
operator_id=1,
registration_number="ABC-123",
resident_code="A",
special_code="SPECIAL",
time_end=now() + datetime.timedelta(days=1),
time_start=now(),
zone=3,
)
| 24.206897
| 62
| 0.690883
|
f35e7d39d5995d100fb80b1aea5e5ce012a854a6
| 799
|
py
|
Python
|
account/migrations/0009_auto_20211023_1816.py
|
codingwithahmad/tw-clone
|
9cf4197f40855216a59fbe9d974d5ee9a4863f7f
|
[
"MIT"
] | null | null | null |
account/migrations/0009_auto_20211023_1816.py
|
codingwithahmad/tw-clone
|
9cf4197f40855216a59fbe9d974d5ee9a4863f7f
|
[
"MIT"
] | null | null | null |
account/migrations/0009_auto_20211023_1816.py
|
codingwithahmad/tw-clone
|
9cf4197f40855216a59fbe9d974d5ee9a4863f7f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-10-23 14:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0008_alter_user_bio'),
]
operations = [
migrations.AlterField(
model_name='userfollowing',
name='following_user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='userfollowing',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followers', to=settings.AUTH_USER_MODEL),
),
]
| 30.730769
| 136
| 0.667084
|
50260a5de0aed1764a9e13c9f514a08e0e8e5193
| 9,193
|
py
|
Python
|
medexbot/spiders/med_spider.py
|
ahmedshahriar/bd-medicine-scraper
|
ea97d929fc9cdcbdde2602827cdc3d12709e2ca9
|
[
"Apache-2.0"
] | 1
|
2022-03-17T03:02:49.000Z
|
2022-03-17T03:02:49.000Z
|
medexbot/spiders/med_spider.py
|
ahmedshahriar/bd-medicine-scraper
|
ea97d929fc9cdcbdde2602827cdc3d12709e2ca9
|
[
"Apache-2.0"
] | null | null | null |
medexbot/spiders/med_spider.py
|
ahmedshahriar/bd-medicine-scraper
|
ea97d929fc9cdcbdde2602827cdc3d12709e2ca9
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
import time
import scrapy
from django.db import IntegrityError
from django.utils.text import slugify
from crawler.models import Generic, Manufacturer
from medexbot.items import MedItem, GenericItem
class MedSpider(scrapy.Spider):
name = "med"
allowed_domains = ['medex.com.bd']
start_urls = ['https://medex.com.bd/brands?page=1', 'https://medex.com.bd/brands?herbal=1&page=1']
def clean_text(self, raw_html):
"""
:param raw_html: this will take raw html code
:return: text without html tags
"""
cleaner = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
return re.sub(cleaner, '', raw_html)
def parse(self, response, **kwargs):
for med_info in response.css('a.hoverable-block'):
med_page_links = med_info.css('a.hoverable-block ::attr("href") ')
yield from response.follow_all(med_page_links, self.parse_med)
pagination_links = response.css('a.page-link[rel="next"] ::attr("href") ')
yield from response.follow_all(pagination_links, self.parse)
def parse_generic(self, response):
item = GenericItem()
item['generic_id'] = re.findall("generics/(\S*)/", response.url)[0]
item['generic_name']= response.css('h1.page-heading-1-l ::text').get().strip()
item['monograph_link'] = response.css('span.hidden-sm a ::attr(href)').get()
""" medicine description """
# indications
# generic_details['indications'] = response.css('div#indications h4 ::text').get().strip()
item['indication_description'] = response.xpath(
'//div[@id="indications"]/following-sibling::node()[2]').get().strip()
# ###Therapeutic Class
# therapeutic_class = extract_with_css('div#drug_classes h4 ::text')
item['therapeutic_class_description'] = response.xpath(
'//div[@id="drug_classes"]/following-sibling::node()[2]').get()
# ###Pharmacology
# pharmacology = extract_with_css('div#mode_of_action h4 ::text')
item['pharmacology_description'] = response.xpath(
'//div[@id="mode_of_action"]/following-sibling::node()[2]').get()
# ##Dosage
# dosage = extract_with_css('div#dosage h4 ::text')
item['dosage_description'] = response.xpath('//div[@id="dosage"]/following-sibling::node()[2]').get()
# ##Administration
# administration = extract_with_css('div#administration h4 ::text')
item['administration_description'] = response.xpath(
'//div[@id="administration"]/following-sibling::node()[2]').get()
# ##Interaction
# interaction = extract_with_css('div#interaction h4 ::text')
item['interaction_description'] = response.xpath(
'//div[@id="interaction"]/following-sibling::node()[2]').get()
# ##Contraindications
# contraindications = extract_with_css('div#contraindications h4 ::text')
item['contraindications_description'] = response.xpath(
'//div[@id="contraindications"]/following-sibling::node()[2]').get()
# ##Side Effects
# side_effects = extract_with_css('div#side_effects h4 ::text')
item['side_effects_description'] = response.xpath(
'//div[@id="side_effects"]/following-sibling::node()[2]').get()
# ##Pregnancy & Lactation
# pregnancy_and_lactation = extract_with_css('div#pregnancy_cat h4 ::text')
item['pregnancy_and_lactation_description'] = response.xpath(
'//div[@id="pregnancy_cat"]/following-sibling::node()[2]').get()
# ## Precautions
# precautions = extract_with_css('div#precautions h4 ::text')
item['precautions_description'] = response.xpath(
'//div[@id="precautions"]/following-sibling::node()[2]').get()
# ## Use in Special Populations
# pediatric_usage = extract_with_css('div#pediatric_uses h4 ::text')
item['pediatric_usage_description'] = response.xpath(
'//div[@id="pediatric_uses"]/following-sibling::node()[2]').get()
# ##Overdose Effects
# overdose_effects = extract_with_css('div#overdose_effects h4 ::text')
item['overdose_effects_description'] = response.xpath(
'//div[@id="overdose_effects"]/following-sibling::node()[2]').get()
# ##Duration of treatment
# duration_of_treatment = extract_with_css('div#duration_of_treatment h4 ::text')
item['duration_of_treatment_description'] = response.xpath(
'//div[@id="duration_of_treatment"]/following-sibling::node()[2]').get()
# ##Reconstitution
# reconstitution = extract_with_css('div#reconstitution h4 ::text')
item['reconstitution_description'] = response.xpath(
'//div[@id="reconstitution"]/following-sibling::node()[2]').get()
# ##Storage Conditions
# storage_conditions = extract_with_css('div#storage_conditions h4 ::text')
item['storage_conditions_description'] = response.xpath(
'//div[@id="storage_conditions"]/following-sibling::node()[2]').get()
item['slug'] = slugify(item['generic_name'] + '-' + item['generic_id'],
allow_unicode=True)
yield item
def parse_med(self, response):
def extract_with_css(query):
return response.css(query).get(default='').strip()
item = MedItem()
item['brand_id'] = re.findall(r"brands/(\S*)/", response.url)[0]
item['brand_name'] = response.css('h1.page-heading-1-l span ::text').getall()[0].strip()
item['type'] = 'herbal' if response.css(
'h1.page-heading-1-l img ::attr(alt)').get().strip() == 'Herbal' else 'allopathic'
item['dosage_form'] = extract_with_css('small.h1-subtitle ::text')
# generic_name = extract_with_css('div[title="Generic Name"] a ::text')
item['strength'] = extract_with_css('div[title="Strength"] ::text')
# manufacturer extraction
manufacturer_link = extract_with_css('div[title ="Manufactured by"] a ::attr(href)')
manufacturer_id = re.findall(r"companies/(\d+)/", manufacturer_link)[0]
manufacturer_name = extract_with_css('div[title ="Manufactured by"] a ::text')
try:
item['manufacturer'] = Manufacturer.objects.get(manufacturer_id=manufacturer_id)
except Manufacturer.DoesNotExist as me:
logging.info(me)
item['manufacturer'] = Manufacturer.objects.create(manufacturer_id=manufacturer_id,
manufacturer_name=manufacturer_name,
slug=slugify(manufacturer_name + '-' +
manufacturer_id,
allow_unicode=True))
except IntegrityError as ie:
logging.info(ie)
item['manufacturer'] = None
# med_details['package_container'] = [self.clean_text(spec_value).strip() for spec_value in response.css(
# 'div.package-container').getall()]
# todo : debug package container
# https://medex.com.bd/brands/7701/3rd-cef-100mg
# https://medex.com.bd/brands/9538/3-f-500mg
# check all the dosage forms and add exceptions https://medex.com.bd/dosage-forms
# todo : debug veterinary
# https://medex.com.bd/brands/31317/a-mectin-vet-10mg
# item['package_container'] = ' '.join(extract_with_css('div.package-container ::text').split())
# item['pack_size_info'] = ' '.join(extract_with_css('span.pack-size-info ::text').split())
# todo : remove overlapping pack size info
package_container = ','.join(
[re.sub(r'\s+', ' ', i).strip() for i in response.css('div.package-container ::text').getall()])
pack_size_info = ','.join(
[re.sub(r'\s+', ' ', i).strip() for i in response.css('span.pack-size-info ::text').getall() if
i.strip() is not ''])
item['package_container'] = package_container
item['pack_size_info'] = pack_size_info
item['slug'] = slugify(item['brand_name'] + item['dosage_form'] + item['strength'],
allow_unicode=True)
# generic extraction
generic_link = extract_with_css('div[title="Generic Name"] a ::attr(href)')
generic_id = re.findall(r"generics/(\S*)/", generic_link)[0]
try:
item['generic'] = Generic.objects.get(generic_id=generic_id)
except Generic.DoesNotExist as ge:
logging.info(ge)
"""
save the generics id with medicines id to map them later
"""
with open('generic_id.txt', 'a') as f:
f.write(item['brand_id']+','+generic_id + '\n')
yield response.follow(generic_link, self.parse_generic)
item['generic'] = None
except IntegrityError as ie:
logging.info(ie)
item['generic'] = None
yield item
| 46.903061
| 113
| 0.602306
|
33f4e44f2ba5f4498348527cfad8e0562526616d
| 401
|
py
|
Python
|
work/Aufgabe10_moc/run_all.py
|
hobler/miniTopSim
|
6ead4c5ed9cc1459f019af4bfa899c46c0d2fb22
|
[
"MIT"
] | 16
|
2016-11-21T13:24:52.000Z
|
2022-03-24T11:54:50.000Z
|
work/Aufgabe10_moc/run_all.py
|
hobler/miniTopSim
|
6ead4c5ed9cc1459f019af4bfa899c46c0d2fb22
|
[
"MIT"
] | 1
|
2016-11-29T14:08:33.000Z
|
2016-11-29T19:12:35.000Z
|
work/Aufgabe10_moc/run_all.py
|
hobler/miniTopSim
|
6ead4c5ed9cc1459f019af4bfa899c46c0d2fb22
|
[
"MIT"
] | 11
|
2015-11-19T14:29:34.000Z
|
2021-01-02T14:24:04.000Z
|
"""
Script to run all necessary simulations consecutively
"""
from mini_topsim.main import mini_topsim
mini_topsim('cosine_norm_noredep.cfg')
mini_topsim('cosine_moc_noredep.cfg')
mini_topsim('cosine_norm_redep.cfg')
mini_topsim('cosine_moc_redep.cfg')
mini_topsim('gauss_norm_noredep.cfg')
mini_topsim('gauss_moc_noredep.cfg')
mini_topsim('gauss_norm_redep.cfg')
mini_topsim('gauss_moc_redep.cfg')
| 26.733333
| 53
| 0.825436
|
403eafc96194607b48d78cb014f0430ccdc13743
| 4,584
|
py
|
Python
|
mcts.py
|
polySML/multiobj-rationale
|
64da1e55120f6224b2b9e9f466c8b2953494446f
|
[
"MIT"
] | 80
|
2020-07-03T04:30:00.000Z
|
2022-03-14T10:50:35.000Z
|
mcts.py
|
polySML/multiobj-rationale
|
64da1e55120f6224b2b9e9f466c8b2953494446f
|
[
"MIT"
] | 9
|
2020-07-08T16:39:56.000Z
|
2021-11-29T03:40:59.000Z
|
mcts.py
|
wengong-jin/multiobj-rationale
|
7d32f3f7edff69c877fc29d2e469b341aa026005
|
[
"MIT"
] | 24
|
2020-07-11T17:29:25.000Z
|
2022-02-03T22:54:06.000Z
|
import sys
import argparse
import math
from rdkit import Chem
from functools import partial
from multiprocessing import Pool
from fuseprop import find_clusters, extract_subgraph
from properties import get_scoring_function
MIN_ATOMS = 15
C_PUCT = 10
class MCTSNode():
def __init__(self, smiles, atoms, W=0, N=0, P=0):
self.smiles = smiles
self.atoms = set(atoms)
self.children = []
self.W = W
self.N = N
self.P = P
def Q(self):
return self.W / self.N if self.N > 0 else 0
def U(self, n):
return C_PUCT * self.P * math.sqrt(n) / (1 + self.N)
def mcts_rollout(node, state_map, orig_smiles, clusters, atom_cls, nei_cls, scoring_function):
#print('cur_node', node.smiles, node.P, node.N, node.W)
cur_atoms = node.atoms
if len(cur_atoms) <= MIN_ATOMS:
return node.P
# Expand if this node has never been visited
if len(node.children) == 0:
cur_cls = set( [i for i,x in enumerate(clusters) if x <= cur_atoms] )
for i in cur_cls:
leaf_atoms = [a for a in clusters[i] if len(atom_cls[a] & cur_cls) == 1]
if len(nei_cls[i] & cur_cls) == 1 or len(clusters[i]) == 2 and len(leaf_atoms) == 1:
new_atoms = cur_atoms - set(leaf_atoms)
new_smiles, _ = extract_subgraph(orig_smiles, new_atoms)
#print('new_smiles', node.smiles, '->', new_smiles)
if new_smiles in state_map:
new_node = state_map[new_smiles] # merge identical states
else:
new_node = MCTSNode(new_smiles, new_atoms)
if new_smiles:
node.children.append(new_node)
state_map[node.smiles] = node
if len(node.children) == 0: return node.P # cannot find leaves
scores = scoring_function([x.smiles for x in node.children])
for child, score in zip(node.children, scores):
child.P = score
sum_count = sum([c.N for c in node.children])
selected_node = max(node.children, key=lambda x : x.Q() + x.U(sum_count))
v = mcts_rollout(selected_node, state_map, orig_smiles, clusters, atom_cls, nei_cls, scoring_function)
selected_node.W += v
selected_node.N += 1
return v
def mcts(smiles, scoring_function, n_rollout, max_atoms, prop_delta):
mol = Chem.MolFromSmiles(smiles)
clusters, atom_cls = find_clusters(mol)
nei_cls = [0] * len(clusters)
for i,cls in enumerate(clusters):
nei_cls[i] = [nei for atom in cls for nei in atom_cls[atom]]
nei_cls[i] = set(nei_cls[i]) - set([i])
clusters[i] = set(list(cls))
for a in range(len(atom_cls)):
atom_cls[a] = set(atom_cls[a])
root = MCTSNode( smiles, set(range(mol.GetNumAtoms())) )
state_map = {smiles : root}
for _ in range(n_rollout):
mcts_rollout(root, state_map, smiles, clusters, atom_cls, nei_cls, scoring_function)
rationales = [node for _,node in state_map.items() if len(node.atoms) <= max_atoms and node.P >= prop_delta]
return smiles, rationales
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True)
parser.add_argument('--prop', required=True)
parser.add_argument('--rollout', type=int, default=20)
parser.add_argument('--c_puct', type=float, default=10)
parser.add_argument('--max_atoms', type=int, default=20)
parser.add_argument('--min_atoms', type=int, default=15)
parser.add_argument('--prop_delta', type=float, default=0.5)
parser.add_argument('--ncand', type=int, default=2)
parser.add_argument('--ncpu', type=int, default=15)
args = parser.parse_args()
scoring_function = get_scoring_function(args.prop)
scoring_function.clf.n_jobs = 1
C_PUCT = args.c_puct
MIN_ATOMS = args.min_atoms
with open(args.data) as f:
next(f)
data = [line.split(',')[0] for line in f]
work_func = partial(mcts, scoring_function=scoring_function,
n_rollout=args.rollout,
max_atoms=args.max_atoms,
prop_delta=args.prop_delta)
pool = Pool(args.ncpu)
results = pool.map(work_func, data)
rset = set()
for orig_smiles, rationales in results:
rationales = sorted(rationales, key=lambda x:len(x.atoms))
for x in rationales[:args.ncand]:
if x.smiles not in rset:
print(orig_smiles, x.smiles, len(x.atoms), x.P)
rset.add(x.smiles)
| 36.672
| 112
| 0.624782
|
ff6482511d76456e602c08136ad0fadac26bd2e3
| 440
|
py
|
Python
|
metaci/plan/migrations/0016_plan_test_dashboard.py
|
abhishekalgo/metaci
|
cd62473b3fb85fb0f39623f9fb2850993ff708a5
|
[
"BSD-3-Clause"
] | 48
|
2018-10-24T14:52:06.000Z
|
2022-03-25T21:14:50.000Z
|
metaci/plan/migrations/0016_plan_test_dashboard.py
|
abhishekalgo/metaci
|
cd62473b3fb85fb0f39623f9fb2850993ff708a5
|
[
"BSD-3-Clause"
] | 2,034
|
2018-10-31T20:59:16.000Z
|
2022-03-22T21:38:03.000Z
|
metaci/plan/migrations/0016_plan_test_dashboard.py
|
abhishekalgo/metaci
|
cd62473b3fb85fb0f39623f9fb2850993ff708a5
|
[
"BSD-3-Clause"
] | 27
|
2018-12-24T18:16:23.000Z
|
2021-12-15T17:57:27.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-07-23 22:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("plan", "0015_auto_20180314_2252")]
operations = [
migrations.AddField(
model_name="plan",
name="test_dashboard",
field=models.BooleanField(default=False),
)
]
| 23.157895
| 56
| 0.643182
|
531d961fe2ba1b320a7ab251bf77e7e5c81f7fc8
| 1,999
|
py
|
Python
|
pylint/plugins/hass_logger.py
|
elahd/ha-nyc311
|
c7555a8af316fc7e44f9a5d794a0872eb534bbe1
|
[
"MIT"
] | 1
|
2022-02-03T02:12:27.000Z
|
2022-02-03T02:12:27.000Z
|
pylint/plugins/hass_logger.py
|
elahd/ha-nyc311
|
c7555a8af316fc7e44f9a5d794a0872eb534bbe1
|
[
"MIT"
] | 3
|
2022-02-04T04:29:34.000Z
|
2022-02-24T22:23:06.000Z
|
pylint/plugins/hass_logger.py
|
elahd/ha-nyc311
|
c7555a8af316fc7e44f9a5d794a0872eb534bbe1
|
[
"MIT"
] | null | null | null |
"""Plugin for logger invocations."""
import astroid
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
from pylint.lint import PyLinter
LOGGER_NAMES = ("LOGGER", "log")
LOG_LEVEL_ALLOWED_LOWER_START = ("debug",)
class HassLoggerFormatChecker(BaseChecker): # type: ignore[misc]
"""Checker for logger invocations."""
__implements__ = IAstroidChecker
name = "hasslog"
priority = -1
msgs = {
"W0001": (
"User visible logger messages must not end with a period",
"hass-logger-period",
"Periods are not permitted at the end of logger messages",
),
"W0002": (
"User visible logger messages must start with a capital letter or downgrade to debug",
"hass-logger-capital",
"All logger messages must start with a capital letter",
),
}
options = ()
def visit_call(self, node: astroid.Call) -> None:
"""Called when a Call node is visited."""
if not isinstance(node.func, astroid.Attribute) or not isinstance(
node.func.expr, astroid.Name
):
return
if not node.func.expr.name in LOGGER_NAMES:
return
if not node.args:
return
first_arg = node.args[0]
if not isinstance(first_arg, astroid.Const) or not first_arg.value:
return
log_message = first_arg.value
if len(log_message) < 1:
return
if log_message[-1] == ".":
self.add_message("hass-logger-period", node=node)
if (
isinstance(node.func.attrname, str)
and node.func.attrname not in LOG_LEVEL_ALLOWED_LOWER_START
and log_message[0].upper() != log_message[0]
):
self.add_message("hass-logger-capital", node=node)
def register(linter: PyLinter) -> None:
"""Register the checker."""
linter.register_checker(HassLoggerFormatChecker(linter))
| 28.971014
| 98
| 0.614307
|
f5ee537fbbf5d97712abe290f36d2d13dd360b6d
| 3,983
|
py
|
Python
|
tests/func/params/test_diff.py
|
nickdelgrosso/dvc
|
8d9435e8ee99b9899cd8d3277ea8cae8cc281154
|
[
"Apache-2.0"
] | null | null | null |
tests/func/params/test_diff.py
|
nickdelgrosso/dvc
|
8d9435e8ee99b9899cd8d3277ea8cae8cc281154
|
[
"Apache-2.0"
] | null | null | null |
tests/func/params/test_diff.py
|
nickdelgrosso/dvc
|
8d9435e8ee99b9899cd8d3277ea8cae8cc281154
|
[
"Apache-2.0"
] | null | null | null |
def test_diff_no_params(tmp_dir, scm, dvc):
assert dvc.params.diff() == {}
def test_diff_no_changes(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], single_stage=True)
scm.add(["params.yaml", "Dvcfile"])
scm.commit("bar")
assert dvc.params.diff() == {}
def test_diff(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], single_stage=True)
scm.add(["params.yaml", "Dvcfile"])
scm.commit("bar")
tmp_dir.scm_gen("params.yaml", "foo: baz", commit="baz")
tmp_dir.scm_gen("params.yaml", "foo: qux", commit="qux")
assert dvc.params.diff(a_rev="HEAD~2") == {
"params.yaml": {"foo": {"old": "bar", "new": "qux"}}
}
def test_diff_new(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], single_stage=True)
assert dvc.params.diff() == {
"params.yaml": {"foo": {"old": None, "new": "bar"}}
}
def test_diff_deleted(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], single_stage=True)
scm.add(["params.yaml", "Dvcfile"])
scm.commit("bar")
(tmp_dir / "params.yaml").unlink()
assert dvc.params.diff() == {
"params.yaml": {"foo": {"old": "bar", "new": None}}
}
def test_diff_deleted_config(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo: bar")
dvc.run(cmd="echo params.yaml", params=["foo"], single_stage=True)
scm.add(["params.yaml", "Dvcfile"])
scm.commit("bar")
(tmp_dir / "params.yaml").unlink()
assert dvc.params.diff() == {
"params.yaml": {"foo": {"old": "bar", "new": None}}
}
def test_diff_list(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo:\n- bar\n- baz")
dvc.run(cmd="echo params.yaml", params=["foo"], single_stage=True)
scm.add(["params.yaml", "Dvcfile"])
scm.commit("foo")
tmp_dir.gen("params.yaml", "foo:\n- bar\n- baz\n- qux")
assert dvc.params.diff() == {
"params.yaml": {
"foo": {"old": "['bar', 'baz']", "new": "['bar', 'baz', 'qux']"}
}
}
def test_diff_dict(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo:\n bar: baz")
dvc.run(cmd="echo params.yaml", params=["foo"], single_stage=True)
scm.add(["params.yaml", "Dvcfile"])
scm.commit("foo")
tmp_dir.gen("params.yaml", "foo:\n bar: qux")
assert dvc.params.diff() == {
"params.yaml": {"foo.bar": {"old": "baz", "new": "qux"}}
}
def test_diff_with_unchanged(tmp_dir, scm, dvc):
tmp_dir.gen("params.yaml", "foo: bar\nxyz: val")
dvc.run(cmd="echo params.yaml", params=["foo,xyz"], single_stage=True)
scm.add(["params.yaml", "Dvcfile"])
scm.commit("bar")
tmp_dir.scm_gen("params.yaml", "foo: baz\nxyz: val", commit="baz")
tmp_dir.scm_gen("params.yaml", "foo: qux\nxyz: val", commit="qux")
assert dvc.params.diff(a_rev="HEAD~2", all=True) == {
"params.yaml": {
"foo": {"old": "bar", "new": "qux"},
"xyz": {"old": "val", "new": "val"},
}
}
def test_pipeline_tracked_params(tmp_dir, scm, dvc, run_copy):
from dvc.dvcfile import PIPELINE_FILE
tmp_dir.gen({"foo": "foo", "params.yaml": "foo: bar\nxyz: val"})
run_copy("foo", "bar", name="copy-foo-bar", params=["foo,xyz"])
scm.add(["params.yaml", PIPELINE_FILE])
scm.commit("add stage")
tmp_dir.scm_gen("params.yaml", "foo: baz\nxyz: val", commit="baz")
tmp_dir.scm_gen("params.yaml", "foo: qux\nxyz: val", commit="qux")
assert dvc.params.diff(a_rev="HEAD~2") == {
"params.yaml": {"foo": {"old": "bar", "new": "qux"}}
}
def test_no_commits(tmp_dir):
from dvc.repo import Repo
from dvc.scm.git import Git
from tests.dir_helpers import git_init
git_init(".")
assert Git().no_commits
assert Repo.init().params.diff() == {}
| 29.947368
| 76
| 0.589254
|
8f8aff94a598a52320679c7051f303b58a09a785
| 4,033
|
py
|
Python
|
redash/query_runner/impala_ds.py
|
louis-xy/redash
|
36c93ce2120e15d63aff4047bb464cdc41052e19
|
[
"BSD-2-Clause"
] | 6
|
2017-04-14T14:06:37.000Z
|
2018-09-07T06:29:27.000Z
|
redash/query_runner/impala_ds.py
|
louis-xy/redash
|
36c93ce2120e15d63aff4047bb464cdc41052e19
|
[
"BSD-2-Clause"
] | 1
|
2021-02-24T04:38:42.000Z
|
2021-02-24T04:38:42.000Z
|
redash/query_runner/impala_ds.py
|
louis-xy/redash
|
36c93ce2120e15d63aff4047bb464cdc41052e19
|
[
"BSD-2-Clause"
] | 2
|
2017-04-02T08:28:53.000Z
|
2021-01-26T09:50:03.000Z
|
import json
import logging
import sys
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from impala.dbapi import connect
from impala.error import DatabaseError, RPCError
enabled = True
except ImportError, e:
enabled = False
COLUMN_NAME = 0
COLUMN_TYPE = 1
types_map = {
'BIGINT': TYPE_INTEGER,
'TINYINT': TYPE_INTEGER,
'SMALLINT': TYPE_INTEGER,
'INT': TYPE_INTEGER,
'DOUBLE': TYPE_FLOAT,
'DECIMAL': TYPE_FLOAT,
'FLOAT': TYPE_FLOAT,
'REAL': TYPE_FLOAT,
'BOOLEAN': TYPE_BOOLEAN,
'TIMESTAMP': TYPE_DATETIME,
'CHAR': TYPE_STRING,
'STRING': TYPE_STRING,
'VARCHAR': TYPE_STRING
}
class Impala(BaseSQLQueryRunner):
noop_query = "show schemas"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"host": {
"type": "string"
},
"port": {
"type": "number"
},
"protocol": {
"type": "string",
"title": "Please specify beeswax or hiveserver2"
},
"database": {
"type": "string"
},
"use_ldap": {
"type": "boolean"
},
"ldap_user": {
"type": "string"
},
"ldap_password": {
"type": "string"
},
"timeout": {
"type": "number"
}
},
"required": ["host"],
"secret": ["ldap_password"]
}
@classmethod
def type(cls):
return "impala"
def __init__(self, configuration):
super(Impala, self).__init__(configuration)
def _get_tables(self, schema_dict):
schemas_query = "show schemas;"
tables_query = "show tables in %s;"
columns_query = "show column stats %s.%s;"
for schema_name in map(lambda a: unicode(a['name']), self._run_query_internal(schemas_query)):
for table_name in map(lambda a: unicode(a['name']), self._run_query_internal(tables_query % schema_name)):
columns = map(lambda a: unicode(a['Column']), self._run_query_internal(columns_query % (schema_name, table_name)))
if schema_name != 'default':
table_name = '{}.{}'.format(schema_name, table_name)
schema_dict[table_name] = {'name': table_name, 'columns': columns}
return schema_dict.values()
def run_query(self, query, user):
connection = None
try:
connection = connect(**self.configuration.to_dict())
cursor = connection.cursor()
cursor.execute(query)
column_names = []
columns = []
for column in cursor.description:
column_name = column[COLUMN_NAME]
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_name,
'type': types_map.get(column[COLUMN_TYPE], None)
})
rows = [dict(zip(column_names, row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
cursor.close()
except DatabaseError as e:
json_data = None
error = e.message
except RPCError as e:
json_data = None
error = "Metastore Error [%s]" % e.message
except KeyboardInterrupt:
connection.cancel()
error = "Query cancelled by user."
json_data = None
finally:
if connection:
connection.close()
return json_data, error
register(Impala)
| 28.006944
| 130
| 0.517233
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.