blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39175b46f026c2a7c34d3544deb04048a2fcc655 | 738e837a45630e6a13ffbc4067cb825a04142200 | /docs/source/conf.py | a41335fd4535d4dde3c51d10850e649d15fe56b7 | [
"BSD-3-Clause"
] | permissive | ceholden/cedar-datacube | 4e7abdb33808edb2a3d20114f41eecb02fe4094f | d9463a28ce52665faaed069481d34a5ebe60558e | refs/heads/master | 2020-04-25T12:21:10.182400 | 2019-08-26T17:50:21 | 2019-08-26T17:50:21 | 172,775,111 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,092 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cedar documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import cedar
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'numpydoc',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinxcontrib.programoutput',
'sphinxcontrib.bibtex'
]
# https://github.com/numpy/numpydoc/issues/69
numpydoc_show_class_members = False
# Mapping to other project docs so we can link to classes, functions, etc
_py_version = f'{sys.version_info.major}.{sys.version_info.minor}'
intersphinx_mapping = {
'python': (f'https://docs.python.org/{_py_version}/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'np': ('http://docs.scipy.org/doc/numpy/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'pd': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'xarray': ('http://xarray.pydata.org/en/stable/', None),
'xr': ('http://xarray.pydata.org/en/stable/', None),
'dask': ('http://docs.dask.org/en/latest/', None),
'distributed': ('http://distributed.dask.org/en/latest/', None),
'rasterio': ('https://rasterio.readthedocs.io/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cedar'
copyright = u"2019, Chris Holden"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cedar.__version__
# The full version, including alpha/beta/rc tags.
release = cedar.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
html_context = dict(
display_github=True,
github_user="ceholden",
github_repo="cedar-datacube",
github_version="master",
conf_py_path="/docs/source/",
source_suffix=".rst",
css_files=[
'_static/theme_overrides.css', # override wide tables in RTD theme
]
)
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cedardoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'cedar.tex',
u'cedar Documentation',
u'Chris Holden', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cedar',
u'cedar Documentation',
[u'Chris Holden'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cedar',
u'cedar Documentation',
u'Chris Holden',
'cedar',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"ceholden@gmail.com"
] | ceholden@gmail.com |
a7c6bd2c553b9c2bcf8071d69e20e4e1e3f77a55 | f1df5173f34465c416904c0e119393cbfd9be32d | /app/tasks.py | dd250e279b00490c803a25392897587b83f1a8d8 | [] | no_license | zhangwei1989/microblog | e7765c0aa3f1218292334744f1a22963ecbd4216 | 7f8e8ac74e8114d687d25d1f0c89e49717ff8efd | refs/heads/master | 2022-12-10T14:17:58.795978 | 2019-04-03T08:59:02 | 2019-04-03T08:59:02 | 176,434,070 | 0 | 0 | null | 2022-11-22T03:45:13 | 2019-03-19T05:50:48 | JavaScript | UTF-8 | Python | false | false | 1,718 | py | import sys
import time
import json
from rq import get_current_job
from app import create_app, db
from app.models import Task, User, Post
from flask import render_template
from app.email import send_email
app = create_app()
app.app_context().push()
def _set_task_progress(progress):
job = get_current_job()
if job:
job.meta['progress'] = progress
job.save_meta()
task = Task.query.get(job.get_id())
task.user.add_notification('task_progress', {'task_id': job.get_id(),
'progress': progress})
if progress >= 100:
task.complete = True
db.session.commit()
def export_posts(user_id):
try:
user = User.query.get(user_id)
_set_task_progress(0)
data = []
i = 0
total_posts = user.posts.count()
for post in user.posts.order_by(Post.timestamp.asc()):
data.append({'body': post.body,
'timestamp': post.timestamp.isoformat() + 'Z'})
time.sleep(1)
i += 1
_set_task_progress(100 * i // total_posts)
send_email('[Microblog] Your blog posts',
sender=app.config['ADMINS'][0], recipients=[user.email],
text_body=render_template('email/export_posts.txt', user=user),
html_body=render_template('email/export_posts.html', user=user),
attachments=[('posts.json', 'application/json',
json.dumps({'posts': data}, indent=4))],
sync=True)
except:
_set_task_progress(100)
app.logger.error('Unhandled exception', exc_info=sys.exc_info())
| [
"zhangwei19890518@gmail.com"
] | zhangwei19890518@gmail.com |
54b10beeee3ef88100dbb01782ff9c9e1bb1a0f8 | 05217f20200f03ff18f522c79377426373f7cf9f | /flaskproject/blueprintproject - 副本/blueprintproject/user/__init__.py | 34a3fc3a61d8eed522a78a215f35604749d59be5 | [] | no_license | njw-666/1118Django | d381b90f1148f9ae8eb6baa00b4600e01b9512a5 | c3cae1f832114e79b73ec11b39130eee2ea1655c | refs/heads/master | 2022-11-20T07:52:55.846013 | 2020-03-23T08:29:07 | 2020-03-23T08:29:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | ## 子应用的初始化文件
from flask import Blueprint
from flask_restful import Api
user_bl = Blueprint("user",__name__)
api=Api(user_bl)
# from user.views import *
from .models import *
from blueprintproject.user.views import *
## 收集路由
api.add_resource(Demo,"/demo/") | [
"str_wjp@126.com"
] | str_wjp@126.com |
6f22be7c5101bc2ea58b37bef23039df8674a923 | 9e7c2fab995a0d64a296d7e362c109f9d7d27d6a | /UpdatingDelayedQLearnerAgentClass.py | c8e7ed13f1b4d95dcebb29856a21e3e0ff6338e6 | [] | no_license | collector-m/transfer_rl_icml_2018 | f2f7ef4eb48016abdb81c066283fbece56d8a366 | f66216000c8411b4c53fd5465f93fb4f55f2d003 | refs/heads/master | 2021-09-15T21:14:41.908846 | 2018-06-11T01:19:13 | 2018-06-11T01:19:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,386 | py | ''' QLearningAgentClass.py: Class for a basic QLearningAgent '''
# Python imports.
import random
import numpy
import time
import copy
from collections import defaultdict
# Other imports.
from simple_rl.agents.AgentClass import Agent
class UpdatingDelayedQLearnerAgent(Agent):
'''
Delayed-Q Learning Agent (Strehl, A.L., Li, L., Wiewiora, E., Langford, J. and Littman, M.L., 2006. PAC model-free reinforcement learning).
Implemented by Yuu Jinnai (ddyuudd@gmail.com)
'''
def __init__(self, actions, name="Updating-delayed-Q-learning", gamma=0.99, m=1, epsilon1=0.1):
'''
Args:
actions (list): Contains strings denoting the actions.
init_q (2d list): Initial Q function. AU(s, a) in Strehl et al 2006.
name (str): Denotes the name of the agent.
gamma (float): discount factor
m (float): Number of samples for updating Q-value
epsilon1 (float): Learning rate
'''
# name_ext = "-" + explore if explore != "uniform" else ""
Agent.__init__(self, name=name, actions=actions, gamma=gamma)
self.rmax = 1 # TODO: set/get function
# Set/initialize parameters and other relevant classwide data
self.step_number = 0
# TODO: Here we assume that init_q has Qvalue for every (s, a) pair.
self.q_func = defaultdict(lambda: defaultdict(lambda: 1.0 / (1.0 - gamma)))
self.init_q_func = defaultdict(lambda: defaultdict(lambda: 1.0 / (1.0 - gamma)))
self.AU = defaultdict(lambda: defaultdict(lambda: 0.0)) # used for attempted updates
self.l = defaultdict(lambda: defaultdict(lambda: 0)) # counters
self.b = defaultdict(lambda: defaultdict(lambda: 0)) # beginning timestep of attempted update
self.LEARN = defaultdict(lambda: defaultdict(lambda: False)) # beginning timestep of attempted update
# for x in init_q:
# for y in init_q[x]:
# self.AU[x][y] = 0.0 # AU(s, a) <- 0
# self.l[x][y] = 0 # l(s, a) <- 0
# self.b[x][y] = 0 # b(s, a) <- 0
# self.LEARN[x][y] = False
# TODO: Add a code to calculate m and epsilon1 from epsilon and delta.
# m and epsilon1 should be set according to epsilon and delta in order to be PAC-MDP.
self.m = m
self.epsilon1 = epsilon1
self.tstar = 0 # time of most recent action value change
# --------------------------------
# ---- CENTRAL ACTION METHODS ----
# --------------------------------
def act(self, state, reward, learning=True):
'''
Args:
state (State)
reward (float)
Summary:
The central method called during each time step.
Retrieves the action according to the current policy
and performs updates given (s=self.prev_state,
a=self.prev_action, r=reward, s'=state)
'''
if learning:
self.update(self.prev_state, self.prev_action, reward, state)
# For Delayed Q-learning it always take the action with highest Q value (no epsilon exploration required).
action = self.greedy_q_policy(state)
self.prev_state = state
self.prev_action = action
self.step_number += 1
return action
def greedy_q_policy(self, state):
'''
Args:
state (State)
Returns:
(str): action.
'''
action = self.get_max_q_action(state)
return action
# ---------------------------------
# ---- Q VALUES AND PARAMETERS ----
# ---------------------------------
def update(self, state, action, reward, next_state):
'''
Args:
state (State)
action (str)
reward (float)
next_state (State)
Summary:
Updates the internal Q Function according to the Bellman Equation. (Classic Q Learning update)
'''
# If this is the first state, just return.
if state is None:
self.prev_state = next_state
return
if self.b[state][action] <= self.tstar:
self.LEARN[state][action] = True
if self.LEARN[state][action]:
if self.l[state][action] == 0:
self.b[state][action] = self.step_number
self.l[state][action] = self.l[state][action] + 1
nextq, _ = self._compute_max_qval_action_pair(next_state)
self.AU[state][action] = self.AU[state][action] + reward + self.gamma * nextq
if self.l[state][action] == self.m:
if self.q_func[state][action] - self.AU[state][action] / self.m >= 2 * self.epsilon1:
self.q_func[state][action] = self.AU[state][action] / self.m + self.epsilon1
self.tstar = self.step_number
elif self.b[state][action] > self.tstar:
self.LEARN[state][action] = False
self.AU[state][action] = 0
self.l[state][action] = 0
def _compute_max_qval_action_pair(self, state):
'''
Args:
state (State)
Returns:
(tuple) --> (float, str): where the float is the Qval, str is the action.
'''
# Grab random initial action in case all equal
best_action = random.choice(self.actions)
max_q_val = float("-inf")
shuffled_action_list = self.actions[:]
random.shuffle(shuffled_action_list)
# Find best action (action w/ current max predicted Q value)
for action in shuffled_action_list:
q_s_a = self.get_q_value(state, action)
if q_s_a > max_q_val:
max_q_val = q_s_a
best_action = action
return max_q_val, best_action
def get_max_q_action(self, state):
'''
Args:
state (State)
Returns:
(str): denoting the action with the max q value in the given @state.
'''
return self._compute_max_qval_action_pair(state)[1]
def get_max_q_value(self, state):
'''
Args:
state (State)
Returns:
(float): denoting the max q value in the given @state.
'''
return self._compute_max_qval_action_pair(state)[0]
def get_q_value(self, state, action):
'''
Args:
state (State)
action (str)
Returns:
(float): denoting the q value of the (@state, @action) pair.
'''
return self.q_func[state][action]
def get_action_distr(self, state, beta=0.2):
'''
Args:
state (State)
beta (float): Softmax temperature parameter.
Returns:
(list of floats): The i-th float corresponds to the probability
mass associated with the i-th action (indexing into self.actions)
'''
all_q_vals = []
for i in xrange(len(self.actions)):
action = self.actions[i]
all_q_vals.append(self.get_q_value(state, action))
# Softmax distribution.
total = sum([numpy.exp(beta * qv) for qv in all_q_vals])
softmax = [numpy.exp(beta * qv) / total for qv in all_q_vals]
return softmax
def reset(self):
self.step_number = 0
self.episode_number = 0
# print "#####################################"
# print "Reset", self.name, "Q-function"
# # print self.q_func
# for x in self.q_func:
# print (x)
# for y in self.q_func[x]:
# print (y, ':', self.q_func[x][y])
self.update_init_q_function()
self.q_func = copy.deepcopy(self.init_q_func)
Agent.reset(self)
def end_of_episode(self):
'''
Summary:
Resets the agents prior pointers.
'''
Agent.end_of_episode(self)
def set_q_function(self, q_func):
'''
Set initial Q-function.
For PAC-MDP, initial Q(s, a) should be an upper bound of Q*(s, a).
'''
self.init_q_func = copy.deepcopy(q_func)
self.q_func = copy.deepcopy(self.init_q_func)
def set_vmax(self):
'''
Initialize Q-values to be Vmax.
'''
vmax = self.rmax / (1 - self.gamma)
for x in self.q_func:
for y in self.q_func[x]:
self.q_func[x][y] = vmax
self.init_q_func[x][y] = vmax
def update_init_q_function(self):
new_q_func = self.q_func
# print new_q_func, type(new_q_func)
assert len(self.init_q_func) <= len(new_q_func)
for x in new_q_func:
# print "x", x, len(self.init_q_func[x])
assert len(self.init_q_func[x]) <= len(new_q_func[x])
for y in new_q_func[x]:
# print "y", y
# print "new_q_func[x]", new_q_func[x], type(new_q_func[x])
# print "init_q_func[x]", self.init_q_func[x], type(self.init_q_func[x])
# print type(self.init_q_func[x][y])
# print type(new_q_func[x][y])
# print self.init_q_func[x][y], new_q_func[x][y]
new_q_func[x][y] = max(new_q_func[x][y], self.init_q_func[x][y])
self.init_q_func = new_q_func
| [
"ddyuudd@gmail.com"
] | ddyuudd@gmail.com |
d2d8a47ae5c8b58f85fb4c194f4b78c97929f046 | 40e7e12de3a4c2e3c55d064898f331eb89093ff0 | /sbase/steps.py | 98a00abb7d58ae3cc7f19052060de7a633c64e47 | [
"MIT"
] | permissive | bryoh/SeleniumBase | 0f2ed8701557d3c512a65e050271ff1f2a2e02e2 | fda7a286c4a0b2cb9015baa19d825b89834c8c1b | refs/heads/master | 2023-05-26T16:29:26.919583 | 2023-05-12T16:51:41 | 2023-05-12T16:51:41 | 132,666,520 | 0 | 0 | MIT | 2023-02-01T10:38:22 | 2018-05-08T21:19:55 | Python | UTF-8 | Python | false | false | 38,261 | py | from behave import step
def normalize_text(text):
text = text.replace("\\\\", "\\").replace("\\t", "\t").replace("\\n", "\n")
text = text.replace('\\"', '"').replace("\\'", "'")
return text
@step("Open '{url}'")
@step('Open "{url}"')
@step("Open URL '{url}'")
@step('Open URL "{url}"')
@step("User opens '{url}'")
@step('User opens "{url}"')
@step("User opens URL '{url}'")
@step('User opens URL "{url}"')
@step("User goes to '{url}'")
@step('User goes to "{url}"')
@step("User goes to URL '{url}'")
@step('User goes to URL "{url}"')
def open_url(context, url):
sb = context.sb
sb.open(url)
@step("Click '{selector}'")
@step('Click "{selector}"')
@step("Click element '{selector}'")
@step('Click element "{selector}"')
@step("User clicks '{selector}'")
@step('User clicks "{selector}"')
@step("User clicks element '{selector}'")
@step('User clicks element "{selector}"')
def click_element(context, selector):
sb = context.sb
sb.click(selector)
@step("Type text '{text}' into '{selector}'")
@step('Type text "{text}" into "{selector}"')
@step("Type text '{text}' into \"{selector}\"")
@step('Type text "{text}" into \'{selector}\'')
@step("Type text '{text}' in '{selector}'")
@step('Type text "{text}" in "{selector}"')
@step("Type text '{text}' in \"{selector}\"")
@step('Type text "{text}" in \'{selector}\'')
@step("Type '{text}' into '{selector}'")
@step('Type "{text}" into "{selector}"')
@step("Type '{text}' into \"{selector}\"")
@step('Type "{text}" into \'{selector}\'')
@step("Type '{text}' in '{selector}'")
@step('Type "{text}" in "{selector}"')
@step("Type '{text}' in \"{selector}\"")
@step('Type "{text}" in \'{selector}\'')
@step("In '{selector}' type '{text}'")
@step('In "{selector}" type "{text}"')
@step("In '{selector}' type \"{text}\"")
@step('In "{selector}" type \'{text}\'')
@step("Into '{selector}' type '{text}'")
@step('Into "{selector}" type "{text}"')
@step("Into '{selector}' type \"{text}\"")
@step('Into "{selector}" type \'{text}\'')
@step("Find '{selector}' and type '{text}'")
@step('Find "{selector}" and type "{text}"')
@step("Find '{selector}' and type \"{text}\"")
@step('Find "{selector}" and type \'{text}\'')
@step("User types '{text}' in '{selector}'")
@step('User types "{text}" in "{selector}"')
@step("User types '{text}' in \"{selector}\"")
@step('User types "{text}" in \'{selector}\'')
@step("User types '{text}' into '{selector}'")
@step('User types "{text}" into "{selector}"')
@step("User types '{text}' into \"{selector}\"")
@step('User types "{text}" into \'{selector}\'')
def type_text(context, selector, text):
sb = context.sb
text = normalize_text(text)
sb.type(selector, text)
@step("Add text '{text}' into '{selector}'")
@step('Add text "{text}" into "{selector}"')
@step("Add text '{text}' into \"{selector}\"")
@step('Add text "{text}" into \'{selector}\'')
@step("Add text '{text}' in '{selector}'")
@step('Add text "{text}" in "{selector}"')
@step("Add text '{text}' in \"{selector}\"")
@step('Add text "{text}" in \'{selector}\'')
@step("Add '{text}' into '{selector}'")
@step('Add "{text}" into "{selector}"')
@step("Add '{text}' into \"{selector}\"")
@step('Add "{text}" into \'{selector}\'')
@step("Add '{text}' in '{selector}'")
@step('Add "{text}" in "{selector}"')
@step("Add '{text}' in \"{selector}\"")
@step('Add "{text}" in \'{selector}\'')
@step("Into '{selector}' add '{text}'")
@step('Into "{selector}" add "{text}"')
@step("Into '{selector}' add \"{text}\"")
@step('Into "{selector}" add \'{text}\'')
@step("In '{selector}' add '{text}'")
@step('In "{selector}" add "{text}"')
@step("In '{selector}' add \"{text}\"")
@step('In "{selector}" add \'{text}\'')
@step("User adds '{text}' in '{selector}'")
@step('User adds "{text}" in "{selector}"')
@step("User adds '{text}' in \"{selector}\"")
@step('User adds "{text}" in \'{selector}\'')
@step("User adds '{text}' into '{selector}'")
@step('User adds "{text}" into "{selector}"')
@step("User adds '{text}' into \"{selector}\"")
@step('User adds "{text}" into \'{selector}\'')
def add_text(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.add_text(selector, text)
@step("Assert element '{selector}'")
@step('Assert element "{selector}"')
@step("Assert element '{selector}' is visible")
@step('Assert element "{selector}" is visible')
@step("Element '{selector}' should be visible")
@step('Element "{selector}" should be visible')
def assert_element(context, selector):
sb = context.sb
sb.assert_element(selector)
@step("Assert text '{text}' in '{selector}'")
@step('Assert text "{text}" in "{selector}"')
@step("Assert text '{text}' in \"{selector}\"")
@step('Assert text "{text}" in \'{selector}\'')
@step("Text in '{selector}' should contain '{text}'")
@step('Text in "{selector}" should contain "{text}"')
@step("Text in '{selector}' should contain \"{text}\"")
@step('Text in "{selector}" should contain \'{text}\'')
def assert_text_in_element(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.assert_text(text, selector)
@step("Assert text '{text}'")
@step('Assert text "{text}"')
@step("Assert text '{text}' is visible")
@step('Assert text "{text}" is visible')
@step("Text '{text}' should be visible")
@step('Text "{text}" should be visible')
def assert_text(context, text):
sb = context.sb
text = normalize_text(text)
sb.assert_text(text)
@step("Assert exact text '{text}' in '{selector}'")
@step('Assert exact text "{text}" in "{selector}"')
@step("Assert exact text '{text}' in \"{selector}\"")
@step('Assert exact text "{text}" in \'{selector}\'')
@step("Text in '{selector}' should be '{text}'")
@step('Text in "{selector}" should be "{text}"')
@step("Text in '{selector}' should be \"{text}\"")
@step('Text in "{selector}" should be \'{text}\'')
def assert_exact_text(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.assert_exact_text(text, selector)
@step("Highlight '{selector}'")
@step('Highlight "{selector}"')
@step("Highlight element '{selector}'")
@step('Highlight element "{selector}"')
@step("Use JS to highlight '{selector}'")
@step('Use JS to highlight "{selector}"')
def highlight_element(context, selector):
sb = context.sb
sb.highlight(selector)
@step("Click link '{link}'")
@step('Click link "{link}"')
@step("User clicks link '{link}'")
@step('User clicks link "{link}"')
def click_link(context, link):
sb = context.sb
sb.click_link(link)
@step("JS click '{selector}'")
@step('JS click "{selector}"')
@step("JS click element '{selector}'")
@step('JS click element "{selector}"')
@step("Use JS to click '{selector}'")
@step('Use JS to click "{selector}"')
def js_click(context, selector):
sb = context.sb
sb.js_click(selector)
@step("Save screenshot as '{name}'")
@step('Save screenshot as "{name}"')
@step("User saves screenshot as '{name}'")
@step('User saves screenshot as "{name}"')
def save_screenshot_as(context, name):
sb = context.sb
name = normalize_text(name)
sb.save_screenshot(name)
@step("Save screenshot to '{folder}' as '{name}'")
@step('Save screenshot to "{folder}" as "{name}"')
@step("Save screenshot to '{folder}' as \"{name}\"")
@step('Save screenshot to "{folder}" as \'{name}\'')
@step("User saves screenshot to '{folder}' as '{name}'")
@step('User saves screenshot to "{folder}" as "{name}"')
@step("User saves screenshot to '{folder}' as \"{name}\"")
@step('User saves screenshot to "{folder}" as \'{name}\'')
def save_screenshot_to_folder_as(context, name, folder):
sb = context.sb
name = normalize_text(name)
sb.save_screenshot(name, folder)
@step("Save screenshot to logs")
@step("Save a screenshot to the logs")
@step("User saves screenshot to logs")
@step("User saves a screenshot to the logs")
def save_screenshot_to_logs(context):
sb = context.sb
sb.save_screenshot_to_logs()
@step("Refresh page")
@step("Reload page")
@step("User refreshes the page")
@step("User reloads the page")
def refresh_page(context):
sb = context.sb
sb.refresh_page()
@step("Go back")
@step("User goes back")
@step("User navigates back")
def go_back(context):
sb = context.sb
sb.go_back()
@step("Go forward")
@step("User goes forward")
@step("User navigates forward")
def go_forward(context):
sb = context.sb
sb.go_forward()
@step("Set value of '{selector}' to '{text}'")
@step('Set value of "{selector}" to "{text}"')
@step("Set value of \"{selector}\" to '{text}'")
@step('Set value of \'{selector}\' to "{text}"')
@step("User sets value of '{selector}' to '{text}'")
@step('User sets value of "{selector}" to "{text}"')
@step("User sets value of \"{selector}\" to '{text}'")
@step('User sets value of \'{selector}\' to "{text}"')
def set_value(context, selector, text):
sb = context.sb
text = normalize_text(text)
sb.set_value(selector, text)
@step("Switch to iframe '{frame}'")
@step('Switch to iframe "{frame}"')
@step("Switch to frame '{frame}'")
@step('Switch to frame "{frame}"')
@step("User switches to iframe '{frame}'")
@step('User switches to iframe "{frame}"')
@step("User switches to frame '{frame}'")
@step('User switches to frame "{frame}"')
def switch_to_frame(context, frame):
sb = context.sb
sb.switch_to_frame(frame)
@step("Switch to default content")
@step("Exit from iframes")
@step("Exit from frames")
@step("User switches to default content")
@step("User exits from iframes")
@step("User exits from frames")
def switch_to_default_content(context):
sb = context.sb
sb.switch_to_default_content()
@step("Switch to parent frame")
@step("Exit current iframe")
@step("Exit current frame")
@step("User switches to parent frame")
@step("User exits current iframe")
@step("User exits current frame")
def switch_to_parent_frame(context):
sb = context.sb
sb.switch_to_parent_frame()
@step("Into '{selector}' enter MFA code '{totp_key}'")
@step('Into "{selector}" enter MFA code "{totp_key}"')
@step("Into '{selector}' enter MFA code \"{totp_key}\"")
@step('Into "{selector}" enter MFA code \'{totp_key}\'')
@step("Into '{selector}' do MFA '{totp_key}'")
@step('Into "{selector}" do MFA "{totp_key}"')
@step("Into '{selector}' do MFA \"{totp_key}\"")
@step('Into "{selector}" do MFA \'{totp_key}\'')
@step("Do MFA '{totp_key}' into '{selector}'")
@step('Do MFA "{totp_key}" into "{selector}"')
@step("Do MFA \"{totp_key}\" into '{selector}'")
@step('Do MFA \'{totp_key}\' into "{selector}"')
@step("Enter MFA code '{totp_key}' into '{selector}'")
@step('Enter MFA code "{totp_key}" into "{selector}"')
@step("Enter MFA code \"{totp_key}\" into '{selector}'")
@step('Enter MFA code \'{totp_key}\' into "{selector}"')
@step("User enters MFA code '{totp_key}' into '{selector}'")
@step('User enters MFA code "{totp_key}" into "{selector}"')
@step("User enters MFA code \"{totp_key}\" into '{selector}'")
@step('User enters MFA code \'{totp_key}\' into "{selector}"')
def enter_mfa_code(context, selector, totp_key):
sb = context.sb
sb.enter_mfa_code(selector, totp_key)
@step("Open if not '{url}'")
@step('Open if not "{url}"')
@step("Open if not URL '{url}'")
@step('Open if not URL "{url}"')
@step("User opens '{url}' if not on page")
@step('User opens "{url}" if not on page')
@step("User opens URL '{url}' if not on page")
@step('User opens URL "{url}" if not on page')
def open_if_not_url(context, url):
sb = context.sb
sb.open_if_not_url(url)
@step("Select if unselected '{selector}'")
@step('Select if unselected "{selector}"')
@step("Select '{selector}' if unselected")
@step('Select "{selector}" if unselected')
@step("User selects '{selector}' if unselected")
@step('User selects "{selector}" if unselected')
def select_if_unselected(context, selector):
sb = context.sb
sb.select_if_unselected(selector)
@step("Unselect if selected '{selector}'")
@step('Unselect if selected "{selector}"')
@step("Unselect '{selector}' if selected")
@step('Unselect "{selector}" if selected')
@step("User unselects '{selector}' if selected")
@step('User unselects "{selector}" if selected')
def unselect_if_selected(context, selector):
sb = context.sb
sb.unselect_if_selected(selector)
@step("Check if unchecked '{selector}'")
@step('Check if unchecked "{selector}"')
@step("Check '{selector}' if unchecked")
@step('Check "{selector}" if unchecked')
@step("User checks '{selector}' if unchecked")
@step('User checks "{selector}" if unchecked')
def check_if_unchecked(context, selector):
sb = context.sb
sb.check_if_unchecked(selector)
@step("Uncheck if checked '{selector}'")
@step('Uncheck if checked "{selector}"')
@step("Uncheck '{selector}' if checked")
@step('Uncheck "{selector}" if checked')
@step("User unchecks '{selector}' if checked")
@step('User unchecks "{selector}" if checked')
def uncheck_if_checked(context, selector):
sb = context.sb
sb.uncheck_if_checked(selector)
@step("Drag '{drag_selector}' into '{drop_selector}'")
@step('Drag "{drag_selector}" into "{drop_selector}"')
@step("Drag '{drag_selector}' into \"{drop_selector}\"")
@step('Drag "{drag_selector}" into \'{drop_selector}\'')
@step("User drags '{drag_selector}' into '{drop_selector}'")
@step('User drags "{drag_selector}" into "{drop_selector}"')
@step("User drags '{drag_selector}' into \"{drop_selector}\"")
@step('User drags "{drag_selector}" into \'{drop_selector}\'')
def drag_and_drop(context, drag_selector, drop_selector):
sb = context.sb
sb.drag_and_drop(drag_selector, drop_selector)
@step("Hover '{hover_selector}' and click '{click_selector}'")
@step('Hover "{hover_selector}" and click "{click_selector}"')
@step("Hover '{hover_selector}' and click \"{click_selector}\"")
@step('Hover "{hover_selector}" and click \'{click_selector}\'')
@step("User hovers '{hover_selector}' and clicks '{click_selector}'")
@step('User hovers "{hover_selector}" and clicks "{click_selector}"')
@step("User hovers '{hover_selector}' and clicks \"{click_selector}\"")
@step('User hovers "{hover_selector}" and clicks \'{click_selector}\'')
def hover_and_click(context, hover_selector, click_selector):
sb = context.sb
sb.hover_and_click(hover_selector, click_selector)
@step("Find '{selector}' and select '{text}'")
@step('Find "{selector}" and select "{text}"')
@step("Find '{selector}' and select \"{text}\"")
@step('Find "{selector}" and select \'{text}\'')
@step("User selects '{text}' in '{selector}'")
@step('User selects "{text}" in "{selector}"')
@step("User selects \"{text}\" in '{selector}'")
@step('User selects \'{text}\' in "{selector}"')
@step("User finds '{selector}' and selects '{text}'")
@step('User finds "{selector}" and selects "{text}"')
@step("User finds '{selector}' and selects \"{text}\"")
@step('User finds "{selector}" and selects \'{text}\'')
def select_option_by_text(context, selector, text):
sb = context.sb
text = normalize_text(text)
sb.select_option_by_text(selector, text)
@step("Find '{selector}' and select '{text}' by {option}")
@step('Find "{selector}" and select "{text}" by {option}')
@step("Find '{selector}' and select \"{text}\" by {option}")
@step('Find "{selector}" and select \'{text}\' by {option}')
@step("User finds '{selector}' and selects '{text}' by {option}")
@step('User finds "{selector}" and selects "{text}" by {option}')
@step("User finds '{selector}' and selects \"{text}\" by {option}")
@step('User finds "{selector}" and selects \'{text}\' by {option}')
def select_option_by_option(context, selector, text, option):
sb = context.sb
text = normalize_text(text)
if option.startswith("'") or option.startswith('"'):
option = option[1:]
if option.endswith("'") or option.endswith('"'):
option = option[:-1]
if option == "text":
sb.select_option_by_text(selector, text)
elif option == "index":
sb.select_option_by_index(selector, text)
elif option == "value":
sb.select_option_by_value(selector, text)
else:
raise Exception("Unknown option: %s" % option)
@step("Wait for '{selector}' to be visible")
@step('Wait for "{selector}" to be visible')
@step("Wait for element '{selector}'")
@step('Wait for element "{selector}"')
@step("User waits for '{selector}' to be visible")
@step('User waits for "{selector}" to be visible')
@step("User waits for element '{selector}'")
@step('User waits for element "{selector}"')
def wait_for_element(context, selector):
sb = context.sb
sb.wait_for_element(selector)
@step("Wait for text '{text}' in '{selector}'")
@step('Wait for text "{text}" in "{selector}"')
@step("Wait for text '{text}' in \"{selector}\"")
@step('Wait for text "{text}" in \'{selector}\'')
@step("Wait for '{selector}' to have text '{text}'")
@step('Wait for "{selector}" to have text "{text}"')
@step('Wait for "{selector}" to have text \'{text}\'')
@step("Wait for '{selector}' to have text \"{text}\"")
@step("User waits for text '{text}' in '{selector}'")
@step('User waits for text "{text}" in "{selector}"')
@step("User waits for text '{text}' in \"{selector}\"")
@step('User waits for text "{text}" in \'{selector}\'')
@step("User waits for '{selector}' to have text '{text}'")
@step('User waits for "{selector}" to have text "{text}"')
@step('User waits for "{selector}" to have text \'{text}\'')
@step("User waits for '{selector}' to have text \"{text}\"")
def wait_for_text_in_element(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.wait_for_text(text, selector)
@step("Wait for text '{text}'")
@step('Wait for text "{text}"')
@step("User waits for text '{text}'")
@step('User waits for text "{text}"')
def wait_for_text(context, text):
sb = context.sb
text = normalize_text(text)
sb.wait_for_text(text)
@step("Double click '{selector}'")
@step('Double click "{selector}"')
@step("Double click element '{selector}'")
@step('Double click element "{selector}"')
@step("User double clicks '{selector}'")
@step('User double clicks "{selector}"')
@step("User double clicks element '{selector}'")
@step('User double clicks element "{selector}"')
def double_click_element(context, selector):
sb = context.sb
sb.double_click(selector)
@step("Slow click '{selector}'")
@step('Slow click "{selector}"')
@step("Slow click element '{selector}'")
@step('Slow click element "{selector}"')
@step("User slow clicks '{selector}'")
@step('User slow clicks "{selector}"')
@step("User slow clicks element '{selector}'")
@step('User slow clicks element "{selector}"')
def slow_click_element(context, selector):
sb = context.sb
sb.slow_click(selector)
@step("Clear text field '{selector}'")
@step('Clear text field "{selector}"')
@step("Clear text in '{selector}'")
@step('Clear text in "{selector}"')
@step("User clears text field '{selector}'")
@step('User clears text field "{selector}"')
@step("User clears text in '{selector}'")
@step('User clears text in "{selector}"')
def clear_text_field(context, selector):
sb = context.sb
sb.clear(selector)
@step("Maximize window")
@step("Maximize the window")
@step("User maximizes window")
@step("User maximizes the window")
def maximize_window(context):
sb = context.sb
sb.maximize_window()
@step("Get new driver")
@step("User gets new driver")
def get_new_driver(context):
sb = context.sb
sb.get_new_driver()
@step("Switch to default driver")
@step("User switches to default driver")
def switch_to_default_driver(context):
sb = context.sb
sb.switch_to_default_driver()
@step("Press up arrow")
@step("User presses up arrow")
def press_up_arrow(context):
sb = context.sb
sb.press_up_arrow()
@step("Press down arrow")
@step("User presses down arrow")
def press_down_arrow(context):
sb = context.sb
sb.press_down_arrow()
@step("Press left arrow")
@step("User presses left arrow")
def press_left_arrow(context):
sb = context.sb
sb.press_left_arrow()
@step("Press right arrow")
@step("User presses right arrow")
def press_right_arrow(context):
sb = context.sb
sb.press_right_arrow()
@step("Clear all cookies")
@step("Delete all cookies")
@step("User clears all cookies")
@step("User deletes all cookies")
def delete_all_cookies(context):
sb = context.sb
sb.delete_all_cookies()
@step("Clear Local Storage")
@step("Delete Local Storage")
@step("User clears Local Storage")
@step("User deletes Local Storage")
def clear_local_storage(context):
sb = context.sb
sb.clear_local_storage()
@step("Clear Session Storage")
@step("Delete Session Storage")
@step("User clears Session Storage")
@step("User deletes Session Storage")
def clear_session_storage(context):
sb = context.sb
sb.clear_session_storage()
@step("JS click all '{selector}'")
@step('JS click all "{selector}"')
@step("Use JS to click all '{selector}'")
@step('Use JS to click all "{selector}"')
def js_click_all(context, selector):
sb = context.sb
sb.js_click_all(selector)
@step("Click '{selector}' at ({px},{py})")
@step('Click "{selector}" at ({px},{py})')
@step("Click '{selector}' at ({px}, {py})")
@step('Click "{selector}" at ({px}, {py})')
@step("User clicks '{selector}' at ({px},{py})")
@step('User clicks "{selector}" at ({px},{py})')
@step("User clicks '{selector}' at ({px}, {py})")
@step('User clicks "{selector}" at ({px}, {py})')
def click_with_offset(context, selector, px, py):
sb = context.sb
sb.click_with_offset(selector, px, py)
@step("In '{selector}' choose file '{file_path}'")
@step('In "{selector}" choose file "{file_path}"')
@step("In '{selector}' choose file \"{file_path}\"")
@step('In "{selector}" choose file \'{file_path}\'')
@step("Into '{selector}' choose file '{file_path}'")
@step('Into "{selector}" choose file "{file_path}"')
@step("Into '{selector}' choose file \"{file_path}\"")
@step('Into "{selector}" choose file \'{file_path}\'')
@step("User chooses file '{file_path}' for '{selector}'")
@step('User chooses file "{file_path}" for "{selector}" ')
@step("User chooses file \"{file_path}\" for '{selector}' ")
@step('User chooses file \'{file_path}\' for "{selector}" ')
def choose_file(context, selector, file_path):
sb = context.sb
sb.choose_file(selector, file_path)
@step("Set content to frame '{frame}'")
@step('Set content to frame "{frame}"')
@step("User sets content to frame '{frame}'")
@step('User sets content to frame "{frame}"')
def set_content_to_frame(context, frame):
sb = context.sb
sb.set_content_to_frame(frame)
@step("Set content to default")
@step("User sets content to default")
def set_content_to_default(context):
sb = context.sb
sb.set_content_to_default()
@step("Set content to parent")
@step("User sets content to parent")
def set_content_to_parent(context):
sb = context.sb
sb.set_content_to_parent()
@step("Assert element present '{selector}'")
@step('Assert element present "{selector}"')
@step("Element '{selector}' should be present")
@step('Element "{selector}" should be present')
def assert_element_present(context, selector):
sb = context.sb
sb.assert_element_present(selector)
@step("Assert element not visible '{selector}'")
@step('Assert element not visible "{selector}"')
@step("Element '{selector}' should not be visible")
@step('Element "{selector}" should not be visible')
def assert_element_not_visible(context, selector):
sb = context.sb
sb.assert_element_not_visible(selector)
@step("Assert link text '{text}'")
@step('Assert link text "{text}"')
@step("Link text '{text}' should be visible")
@step('Link text "{text}" should be visible')
def assert_link_text(context, text):
sb = context.sb
text = normalize_text(text)
sb.assert_link_text(text)
@step("Assert title '{title}'")
@step('Assert title "{title}"')
@step("The title should be '{title}'")
@step('The title should be "{title}"')
def assert_title(context, title):
sb = context.sb
title = normalize_text(title)
sb.assert_title(title)
@step("Assert downloaded file '{file}'")
@step('Assert downloaded file "{file}"')
@step("File '{file}' should be in downloads")
@step('File "{file}" should be in downloads')
def assert_downloaded_file(context, file):
sb = context.sb
sb.assert_downloaded_file(file)
@step("Download '{file}' to downloads")
@step('Download "{file}" to downloads')
@step("Download file '{file}' to downloads")
@step('Download file "{file}" to downloads')
@step("User downloads '{file}' to downloads")
@step('User downloads "{file}" to downloads')
def download_file(context, file):
sb = context.sb
sb.download_file(file)
@step("Download '{file}' to '{destination}'")
@step('Download "{file}" to "{destination}"')
@step("Download file '{file}' to '{destination}'")
@step('Download file "{file}" to "{destination}"')
@step("User downloads '{file}' to '{destination}'")
@step('User downloads "{file}" to "{destination}"')
def download_file_to_destination(context, file, destination):
sb = context.sb
sb.download_file(file, destination)
@step("In '{selector}' assert attribute \'{attribute}\'")
@step('In "{selector}" assert attribute \"{attribute}\"')
@step("In \"{selector}\" assert attribute '{attribute}'")
@step('In \'{selector}\' assert attribute "{attribute}"')
def assert_attribute(context, selector, attribute):
sb = context.sb
sb.assert_attribute(selector, attribute)
@step("In '{selector}' assert attribute/value '{attribute}'/'{value}'")
@step('In "{selector}" assert attribute/value "{attribute}"/"{value}"')
@step("In \"{selector}\" assert attribute/value '{attribute}'/\"{value}\"")
@step('In \'{selector}\' assert attribute/value "{attribute}"/\'{value}\'')
@step("In '{selector}' assert attribute/value '{attribute}'/\"{value}\"")
@step('In "{selector}" assert attribute/value "{attribute}"/\'{value}\'')
@step("In \"{selector}\" assert attribute/value '{attribute}'/'{value}'")
@step('In \'{selector}\' assert attribute/value "{attribute}"/"{value}"')
def assert_attribute_has_value(context, selector, attribute, value):
sb = context.sb
value = normalize_text(value)
sb.assert_attribute(selector, attribute, value)
@step("Show file choosers")
@step("Show hidden file choosers")
@step("Use JS to show file choosers")
@step("Use JS to show hidden file choosers")
def show_file_choosers(context):
sb = context.sb
sb.show_file_choosers()
@step("Sleep for {seconds} seconds")
@step("Wait for {seconds} seconds")
@step("User sleeps for {seconds} seconds")
@step("User waits for {seconds} seconds")
def sleep(context, seconds):
sb = context.sb
sb.sleep(float(seconds))
@step("Activate Demo Mode")
@step("User activates Demo Mode")
def activate_demo_mode(context):
sb = context.sb
sb.activate_demo_mode()
@step("Deactivate Demo Mode")
@step("User deactivates Demo Mode")
def deactivate_demo_mode(context):
sb = context.sb
sb.deactivate_demo_mode()
@step("Deferred assert element '{selector}'")
@step('Deferred assert element "{selector}"')
def deferred_assert_element(context, selector):
sb = context.sb
sb.deferred_assert_element(selector)
@step("Deferred assert element present '{selector}'")
@step('Deferred assert element present "{selector}"')
def deferred_assert_element_present(context, selector):
sb = context.sb
sb.deferred_assert_element_present(selector)
@step("Deferred assert text '{text}' in '{selector}'")
@step('Deferred assert text "{text}" in "{selector}"')
@step("Deferred assert text '{text}' in \"{selector}\"")
@step('Deferred assert text "{text}" in \'{selector}\'')
def deferred_assert_text_in_element(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.deferred_assert_text(text, selector)
@step("Deferred assert text '{text}'")
@step('Deferred assert text "{text}"')
def deferred_assert_text(context, text):
sb = context.sb
text = normalize_text(text)
sb.deferred_assert_text(text)
@step("Deferred assert exact text '{text}' in '{selector}'")
@step('Deferred assert exact text "{text}" in "{selector}"')
def deferred_assert_exact_text(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.deferred_assert_exact_text(text, selector)
@step("Process deferred asserts")
def process_deferred_asserts(context):
sb = context.sb
sb.process_deferred_asserts()
@step("Assert text not visible '{text}' in '{selector}'")
@step('Assert text not visible "{text}" in "{selector}"')
@step("Assert text not visible '{text}' in \"{selector}\"")
@step('Assert text not visible "{text}" in \'{selector}\'')
@step("Text '{text}' should not be visible in '{selector}'")
@step('Text "{text}" should not be visible in "{selector}"')
@step("Text '{text}' should not be visible in \"{selector}\"")
@step('Text "{text}" should not be visible in \'{selector}\'')
def assert_text_not_visible_in_element(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.assert_text_not_visible(text, selector)
@step("Assert text not visible '{text}'")
@step('Assert text not visible "{text}"')
@step("Text '{text}' should not be visible")
@step('Text "{text}" should not be visible')
def assert_text_not_visible(context, text):
sb = context.sb
text = normalize_text(text)
sb.assert_text_not_visible(text)
@step("Assert exact text not visible '{text}' in '{selector}'")
@step('Assert exact text not visible "{text}" in "{selector}"')
@step("Assert exact text not visible '{text}' in \"{selector}\"")
@step('Assert exact text not visible "{text}" in \'{selector}\'')
@step("Exact text '{text}' should not be visible in '{selector}'")
@step('Exact text "{text}" should not be visible in "{selector}"')
@step("Exact text '{text}' should not be visible in \"{selector}\"")
@step('Exact text "{text}" should not be visible in \'{selector}\'')
def assert_exact_text_not_visible_in_element(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.assert_exact_text_not_visible(text, selector)
@step("Assert exact text not visible '{text}'")
@step('Assert exact text not visible "{text}"')
@step("Exact text '{text}' should not be visible")
@step('Exact text "{text}" should not be visible')
def assert_exact_text_not_visible(context, text):
sb = context.sb
text = normalize_text(text)
sb.assert_exact_text_not_visible(text)
@step("Assert title contains '{substring}'")
@step('Assert title contains "{substring}"')
@step("The title should contain '{substring}'")
@step('The title should contain "{substring}"')
def assert_title_contains(context, substring):
sb = context.sb
substring = normalize_text(substring)
sb.assert_title_contains(substring)
@step("Open new tab")
@step("Open new window")
@step("User opens new tab")
@step("User opens new window")
def open_new_window(context):
sb = context.sb
sb.open_new_window()
@step("Accept alert")
@step("User accepts alert")
def accept_alert(context):
sb = context.sb
sb.accept_alert()
@step("Dismiss alert")
@step("User dismisses alert")
def dismiss_alert(context):
sb = context.sb
sb.dismiss_alert()
@step("Assert URL '{url}'")
@step('Assert URL "{url}"')
@step("The URL should be '{url}'")
@step('The URL should be "{url}"')
def assert_url(context, url):
sb = context.sb
url = normalize_text(url)
sb.assert_url(url)
@step("Assert URL contains '{substring}'")
@step('Assert URL contains "{substring}"')
@step("The URL should contain '{substring}'")
@step('The URL should contain "{substring}"')
def assert_url_contains(context, substring):
sb = context.sb
substring = normalize_text(substring)
sb.assert_url_contains(substring)
@step("Hover '{selector}'")
@step('Hover "{selector}"')
@step("Hover over '{selector}'")
@step('Hover over "{selector}"')
@step("Hover element '{selector}'")
@step('Hover element "{selector}"')
@step("User hovers over '{selector}'")
@step('User hovers over "{selector}"')
@step("User hovers over element '{selector}'")
@step('User hovers over element "{selector}"')
def hover(context, selector):
sb = context.sb
sb.hover(selector)
@step("Context click '{selector}'")
@step('Context click "{selector}"')
@step("Context click element '{selector}'")
@step('Context click element "{selector}"')
@step("Right click '{selector}'")
@step('Right click "{selector}"')
@step("Right click element '{selector}'")
@step('Right click element "{selector}"')
@step("User right clicks '{selector}'")
@step('User right clicks "{selector}"')
@step("User right clicks element '{selector}'")
@step('User right clicks element "{selector}"')
def context_click(context, selector):
sb = context.sb
sb.context_click(selector)
@step("JS type '{text}' in '{selector}'")
@step('JS type "{text}" in "{selector}"')
@step("JS type '{text}' in \"{selector}\"")
@step('JS type "{text}" in \'{selector}\'')
@step("JS type '{text}' into '{selector}'")
@step('JS type "{text}" into "{selector}"')
@step("JS type '{text}' into \"{selector}\"")
@step('JS type "{text}" into \'{selector}\'')
@step("JS type text '{text}' in '{selector}'")
@step('JS type text "{text}" in "{selector}"')
@step("JS type text '{text}' in \"{selector}\"")
@step('JS type text "{text}" in \'{selector}\'')
@step("JS type text '{text}' into '{selector}'")
@step('JS type text "{text}" into "{selector}"')
@step("JS type text '{text}' into \"{selector}\"")
@step('JS type text "{text}" into \'{selector}\'')
@step("Use JS to type '{text}' in '{selector}'")
@step('Use JS to type "{text}" in "{selector}"')
@step("Use JS to type '{text}' in \"{selector}\"")
@step('Use JS to type "{text}" in \'{selector}\'')
@step("Use JS to type '{text}' into '{selector}'")
@step('Use JS to type "{text}" into "{selector}"')
@step("Use JS to type '{text}' into \"{selector}\"")
@step('Use JS to type "{text}" into \'{selector}\'')
def js_type(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.js_type(selector, text)
@step("jQuery click '{selector}'")
@step('jQuery click "{selector}"')
@step("jQuery click element '{selector}'")
@step('jQuery click element "{selector}"')
@step("Use jQuery to click '{selector}'")
@step('Use jQuery to click "{selector}"')
def jquery_click(context, selector):
sb = context.sb
sb.jquery_click(selector)
@step("jQuery click all '{selector}'")
@step('jQuery click all "{selector}"')
@step("Use jQuery to click all '{selector}'")
@step('Use jQuery to click all "{selector}"')
def jquery_click_all(context, selector):
sb = context.sb
sb.jquery_click_all(selector)
@step("jQuery type '{text}' in '{selector}'")
@step('jQuery type "{text}" in "{selector}"')
@step("jQuery type '{text}' in \"{selector}\"")
@step('jQuery type "{text}" in \'{selector}\'')
@step("jQuery type '{text}' into '{selector}'")
@step('jQuery type "{text}" into "{selector}"')
@step("jQuery type '{text}' into \"{selector}\"")
@step('jQuery type "{text}" into \'{selector}\'')
@step("jQuery type text '{text}' in '{selector}'")
@step('jQuery type text "{text}" in "{selector}"')
@step("jQuery type text '{text}' in \"{selector}\"")
@step('jQuery type text "{text}" in \'{selector}\'')
@step("jQuery type text '{text}' into '{selector}'")
@step('jQuery type text "{text}" into "{selector}"')
@step("jQuery type text '{text}' into \"{selector}\"")
@step('jQuery type text "{text}" into \'{selector}\'')
@step("Use jQuery to type '{text}' in '{selector}'")
@step('Use jQuery to type "{text}" in "{selector}"')
@step("Use jQuery to type '{text}' in \"{selector}\"")
@step('Use jQuery to type "{text}" in \'{selector}\'')
@step("Use jQuery to type '{text}' into '{selector}'")
@step('Use jQuery to type "{text}" into "{selector}"')
@step("Use jQuery to type '{text}' into \"{selector}\"")
@step('Use jQuery to type "{text}" into \'{selector}\'')
def jquery_type(context, text, selector):
sb = context.sb
text = normalize_text(text)
sb.jquery_type(selector, text)
@step("Find '{selector}' and set {attribute} to '{value}'")
@step('Find "{selector}" and set {attribute} to "{value}"')
@step("Find '{selector}' and set {attribute} to \"{value}\"")
@step('Find "{selector}" and set {attribute} to \'{value}\'')
def set_attribute(context, selector, attribute, value):
sb = context.sb
value = normalize_text(value)
if attribute.startswith("'") or attribute.startswith('"'):
attribute = attribute[1:]
if attribute.endswith("'") or attribute.endswith('"'):
attribute = attribute[:-1]
sb.set_attribute(selector, attribute, value)
@step("Find all '{selector}' and set {attribute} to '{value}'")
@step('Find all "{selector}" and set {attribute} to "{value}"')
@step("Find all '{selector}' and set {attribute} to \"{value}\"")
@step('Find all "{selector}" and set {attribute} to \'{value}\'')
def set_attributes(context, selector, attribute, value):
sb = context.sb
value = normalize_text(value)
if attribute.startswith("'") or attribute.startswith('"'):
attribute = attribute[1:]
if attribute.endswith("'") or attribute.endswith('"'):
attribute = attribute[:-1]
sb.set_attributes(selector, attribute, value)
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
b0ec567f01fe087fc6a1c79460a64a24e3f8f03a | f03064e9f7fbd5d0344812fae45439905627f2a8 | /helga/nuke/reconstruction/sceneReconstructVRay/lib/reconstruct_alembic.py | a8186eb7ac6559d7c90e7c7dccd65fd61e58dce5 | [] | no_license | tws0002/helga | 45324a4acfde5054c452329de8cfdd38de4f8bda | 80f44393a5f1b3038d4ce3dc5057989ad7d3ef28 | refs/heads/master | 2021-01-12T17:21:04.802566 | 2015-04-16T20:39:06 | 2015-04-16T20:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,519 | py |
"""
reconstruct_alembic
==========================================
Internal module that reconstructs nuke 3d scenes from metadata in exrs according to our pipeline standards
.. important::
This module is internal. Access its functionality from :mod:`helga.nuke.reconstruction.sceneReconstructVRay.sceneReconstruct`
-----------------------
"""
#Imports
#------------------------------------------------------------------
#python
import sys
import os
import cPickle as pickle
import logging
#nuke
import nuke
import nukescripts
#do_reload
do_reload = True
#own
import reconstruct_globals as reconstruct_globals
if(do_reload): reload(reconstruct_globals)
#Methods
#------------------------------------------------------------------
def reconstruct_alembic(node = None, verbose = True):
"""Reconstruct alembic from exr metada in read node"""
try:
#node = None
if not(node):
if(verbose):
print('Node = None. Returning...')
return
#node != type Read
if not(nodetypeMatches(node, 'Read')):
if(verbose):
print('Node {0} is not of type Read. Returning...'.format(node.name()))
return
#metadata_dict
metadata_dict = node.metadata()
#alembic_dict_key
alembic_dict_key = reconstruct_globals.NUKE_EXR_METADATA_PREFIX + reconstruct_globals.ALEMBIC_DICTIONARY_KEY
#metadata_dict has no key alembic
if not(alembic_dict_key in metadata_dict):
if(verbose):
print('Key {0} not in metadata of node {1}. Returning...'.format(alembic_dict_key, node.name()))
return
#alembic_details_list [{details}, {details}, {details}]
alembic_details_list = pickle.loads(metadata_dict[alembic_dict_key])
for item in alembic_details_list:
print(item)
print('----------------------------------------------------------------')
#alembic_details_list empty
if not(alembic_details_list):
if(verbose):
print('Alembic details list for node {0} empty. Returning...'.format(node.name()))
return
#read_node_list
read_node_list = create_alembic_read_nodes(alembic_details_list = alembic_details_list, verbose = verbose)
#read_node_list empty
if not(read_node_list):
if(verbose):
print('Read node list for node {0} empty. No alembics reconstructed. Returning...'.format(node.name()))
return
#backdrop
for alembic_parts_list in read_node_list:
backdrop = create_backdrop(alembic_parts_list, rgb_to_hex_string(reconstruct_globals.ALEMBIC_READ_NODE_BACKDROP_COLOR))
backdrop.knob('label').setValue(node.name() +'_alembic')
backdrop.knob('note_font_size').setValue(20)
#complete_read_nodes_list
complete_read_nodes_list = []
for alembic_parts_list in read_node_list:
complete_read_nodes_list += alembic_parts_list
#scene_node
scene_node = nuke.nodes.Scene(inputs = complete_read_nodes_list)
except:
#status
if(node.name()):
print('Error reconstructing Alembic files for node {0}'.format(node.name()))
else:
print('Error reconstructing Alembic files')
def create_alembic_read_nodes(alembic_details_list = [], verbose = True):
"""Create Geo read nodes for alembic pathes in node and return list of them"""
#alembic_pathes_list
alembic_pathes_list = [alembic_dict.get('alembic_path', '') for
alembic_dict in
alembic_details_list if
alembic_dict.get('alembic_path', '')]
#alembic_textures_list
alembic_textures_list = [alembic_dict.get('alembic_textures', '') for
alembic_dict in
alembic_details_list if
alembic_dict.get('alembic_path', '')]
#alembic_pathes_list empty
if not(alembic_pathes_list):
if(verbose):
print('Alembic pathes list empty. Returning empty list...')
return []
#alembic_parts_list / [[readGeo, readGeo], [readGeo, readGeo,readGeo]]
alembic_parts_list = []
#iterate and create
for index, alembic_path in enumerate(alembic_pathes_list):
#append
alembic_parts_list.append(create_alembic_parts(alembic_path,
alembic_textures_list[index],
recreate_textures = True))
return alembic_parts_list
def nodetypeMatches(node, nodetype):
"""Check if the nodetype matches"""
if(node.Class() == nodetype):
return True
return False
def create_backdrop(nodesList, hexColor):
"""Create backdrop for nodesList with hexColor"""
#deselect all
deselect_all()
#Select nodesList in viewport
for node in nodesList:
node.setSelected(True)
#nukescripts autobackdrop
backdrop = nukescripts.autoBackdrop()
backdrop['tile_color'].setValue(hexColor)
return backdrop
def deselect_all():
"""Deselect All"""
#Select All to invert the selection XD
nuke.selectAll()
nuke.invertSelection()
def rgb_to_hex_string(colorList = [0,0,0]):
"""Convert RGB List to hex color"""
#getColors
r = colorList[0]
g = colorList[1]
b = colorList[2]
#get hexColor
hexColor = int('%02x%02x%02x%02x' % (r*255,g*255,b*255,1),16)
return hexColor
def create_alembic_parts(alembic_path, texture_path, recreate_textures = False):
"""Create alembic node with given path"""
#alembic_read_node
alembic_read_node_temp = nuke.createNode('ReadGeo2', 'file {' +alembic_path +'}')
#scene_view
scene_view = alembic_read_node_temp['scene_view']
#all_items
all_items = scene_view.getAllItems() # get a list of all nodes stored in the abc file
#delete temp node
nuke.delete(alembic_read_node_temp)
#alembic_read_node_list
alembic_read_node_list = []
#iterate and create node
for item in all_items:
#alembic_read_node
alembic_read_node = nuke.createNode('ReadGeo2', 'file {' +alembic_path +'}')
alembic_read_node.knob('label').setValue(item)
#scene_view
scene_view = alembic_read_node['scene_view']
scene_view.setImportedItems([item]) #import all items into the ReadGeo node
scene_view.setSelectedItems([item]) #set everything to selected (i.e. visible)
#append to list
alembic_read_node_list.append(alembic_read_node)
#align nodes
reconstruct_globals.align_nodes(alembic_read_node_list, direction = 'y')
#hide control panel
for alembic_read_node in alembic_read_node_list:
alembic_read_node.hideControlPanel()
#if recreate_textures
if(recreate_textures):
#if texture path
if(texture_path):
#material_node
material_node = nuke.nodes.BasicMaterial()
#set position
offset = -30
pos_x = alembic_read_node_list[0]['xpos'].value()
pos_y = alembic_read_node_list[0]['ypos'].value() + offset
material_node['xpos'].setValue(pos_x)
material_node['ypos'].setValue(pos_y)
material_node['specular'].setValue(0.1)
#texture_node
texture_node = nuke.nodes.Read()
texture_node['file'].fromUserText(texture_path)
#set position
offset = -150
pos_x = alembic_read_node_list[0]['xpos'].value()
pos_y = alembic_read_node_list[0]['ypos'].value() + offset
texture_node['xpos'].setValue(pos_x)
texture_node['ypos'].setValue(pos_y)
#connect to material
material_node.setInput(1, texture_node)
#connect alembic parts
for alembic_read_node in alembic_read_node_list:
alembic_read_node.setInput(0, material_node)
#append texture node
alembic_read_node_list.append(texture_node)
return alembic_read_node_list
#Temp
#------------------------------------------------------------------
"""
#alembic_path
alembic_path = r'P:\23_NEUE_CLIPS\01_Erdmaennchen\150_rnd\rnd_timm\alembic_reconstruct_test\cache\cam_vertex_and_trans_matrix_animation.abc'
#alembic_read_node
#alembic_read_node = nuke.nodes.ReadGeo2()
#set path
#alembic_read_node['file'].fromUserText(alembic_path)
#alembic_read_node
alembic_read_node_temp = nuke.createNode('ReadGeo2', 'file {' +alembic_path +'}')
sceneView = alembic_read_node_temp['scene_view']
all_items = sceneView.getAllItems() # get a list of all nodes stored in the abc file
print(all_items)
nuke.delete(alembic_read_node_temp)
#alembic_read_node_list
alembic_read_node_list = []
#iterate and create node
for item in all_items:
alembic_read_node = nuke.createNode('ReadGeo2', 'file {' +alembic_path +'}')
alembic_read_node.knob('label').setValue(item)
sceneView = alembic_read_node['scene_view']
sceneView.setImportedItems([item]) # import all items into the ReadGeo node
sceneView.setSelectedItems([item]) # set everything to selected (i.e. visible)
alembic_read_node_list.append(alembic_read_node)
#align nodes
alignNodes(alembic_read_node_list, direction = 'y')
#hide control panel
for alembic_read_node in alembic_read_node_list:
alembic_read_node.hideControlPanel()
""" | [
"wagenertimm@gmail.com"
] | wagenertimm@gmail.com |
af5fc97d37e7ae14f03fe6da6e8adbca257be03a | 5a61eb222fda029d2b0a8169d6508bf8b3222d57 | /opinion_dynamics/opinion_dynamics_on_hete_social_distance_network.py | 4c2450705ea23194e50fbbcbb9f34c845d45c062 | [] | no_license | Dcomplexity/research | f7b5ed539ce63b16026bddad0d08b3d23c3aa2a8 | 7e487f765b7eee796464b6a1dc90baa5d3e5d5db | refs/heads/master | 2022-04-16T19:02:38.634091 | 2020-04-13T02:31:28 | 2020-04-13T02:31:28 | 199,882,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import networkx as nx
import random
from network_build import *
def get_network(mul_dimen, degree, group_size, group_base, group_length, alpha, beta):
G = generate_hete_network_connected(mul_dimen, degree, group_size, group_base, group_length, alpha)
adj_array = nx.to_numpy_array(G)
adj_link = []
for i in range(adj_array.shape[0]):
adj_link.append(list(np.where(adj_array[i] == 1)[0]))
nodes = G.nodes
edges = G.edges
return adj_array, adj_link, nodes, edges
class Agent:
def __init__(self, id, init_op, neighbor):
self.id = id
self.op = init_op
self.old_op = init_op
self.neighbor = neighbor
def set_op(self, new_op):
self.op = new_op
def get_op(self):
return self.op
def get_old_op(self):
return self.old_op
def get_id(self):
return self.id
def backup(self):
self.old_op = self.op
def get_neighbor(self):
return self.neighbor[:]
def initialize_population(group_size, group_base, group_length, mul_dimen, degree, alpha, beta):
total_num = group_size * (group_base ** (group_length - 1))
adj_array, adj_link, nodes, edges = get_network(mul_dimen, degree, group_size, group_base,
group_length, alpha, beta)
population = []
popu_num = len(nodes)
for i in nodes:
# if i / popu_num <= 0.5:
# population.append(Agent(i, i/popu_num + 0.5, adj_link[i]))
# else:
# population.append(Agent(i, i/popu_num - 0.5, adj_link[i]))
population.append(Agent(i, (i+popu_num/2)%popu_num/popu_num, adj_link[i]))
return population
def run(popu, bound, iter_num):
popu_num = len(popu)
op_history = [[] for _ in range(popu_num)]
for _ in range(iter_num):
for i in range(popu_num):
i_op = popu[i].get_old_op()
op_history[i].append(i_op)
neighbors = popu[i].get_neighbor()
neighbors.append(i)
op_in_bound = []
for j in neighbors:
j_op = popu[j].get_old_op()
if abs(i_op - j_op) < bound or (1.0 - abs(i_op - j_op)) < bound:
# if abs(i_op - j_op) < bound:
op_in_bound.append(j_op)
new_op = np.mean(op_in_bound)
popu[i].set_op(new_op)
for i in range(popu_num):
popu[i].backup()
return op_history
if __name__ == '__main__':
group_size_r = 50
group_base_r = 2
group_length_r = 6
mul_dimen_r = 10
degree_r = 20
alpha_r = 2
beta_r = 2
total_num_r = group_size_r * (group_base_r ** (group_length_r - 1))
popu_r = initialize_population(group_size_r, group_base_r, group_length_r, mul_dimen_r, degree_r, alpha_r, beta_r)
op_history_r = run(popu_r, 0.3, 50)
op_history_pd = pd.DataFrame(op_history_r)
plt.figure()
op_history_pd.T.plot(legend=False)
plt.show()
print(op_history_pd) | [
"cdengcnc@sjtu.edu.cn"
] | cdengcnc@sjtu.edu.cn |
0a30fe7513d3a2f42aec2d973a649dbf459c724c | a82418f3d62b944a27b6e9000829af54b7575893 | /psets_gensim_v1/2016/cfg_hiddenValleyGridPack_higgs_m_5_ctau_500_xiO_1.py | 70b80c9b157815bbb87bd475c4a2e0286ff3b1d7 | [] | no_license | mcitron/hiddenValleyGeneration | abb347a30319ce5f230e0e1248a4259bf4cc4b1b | 5d165be91ae082fdba790506bfb11a026d602787 | refs/heads/master | 2023-04-08T13:34:56.835752 | 2021-04-28T17:14:46 | 2021-04-28T17:17:14 | 362,550,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,692 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/cfgs_update_filter_2016/hiddenValleyGridPack_higgs_m_5_ctau_500_xiO_1.py --python_filename cfg_hiddenValleyGridPack_higgs_m_5_ctau_500_xiO_1.py --eventcontent RAWSIM --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1,Configuration/DataProcessing/Utils.addMonitoring --datatier GEN-SIM --fileout file:output.root --conditions MCRUN2_71_V1::All --beamspot Realistic50ns13TeVCollision --customise_commands process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(1) --step LHE,GEN,SIM --magField 38T_PostLS1 --no_exec --mc -n 10
import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.Geometry.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic50ns13TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('Configuration/GenProduction/python/cfgs_update_filter_2016/hiddenValleyGridPack_higgs_m_5_ctau_500_xiO_1.py nevts:10'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RAWSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:output.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.gencount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("genfilter"),
minNumber = cms.uint32(1)
)
process.generator = cms.EDFilter("Pythia8HadronizerFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(10),
PythiaParameters = cms.PSet(
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'),
pythia8CUEP8M1Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:pT0Ref=2.4024',
'MultipartonInteractions:ecmPow=0.25208',
'MultipartonInteractions:expPow=1.6'),
pythia8PSweightsSettings = cms.vstring('UncertaintyBands:doVariations = on',
'UncertaintyBands:List = {isrRedHi isr:muRfac=0.707,fsrRedHi fsr:muRfac=0.707,isrRedLo isr:muRfac=1.414,fsrRedLo fsr:muRfac=1.414,isrDefHi isr:muRfac=0.5,fsrDefHi fsr:muRfac=0.5,isrDefLo isr:muRfac=2.0,fsrDefLo fsr:muRfac=2.0,isrConHi isr:muRfac=0.25,fsrConHi fsr:muRfac=0.25,isrConLo isr:muRfac=4.0,fsrConLo fsr:muRfac=4.0,fsr_G2GG_muR_dn fsr:G2GG:muRfac=0.5,fsr_G2GG_muR_up fsr:G2GG:muRfac=2.0,fsr_G2QQ_muR_dn fsr:G2QQ:muRfac=0.5,fsr_G2QQ_muR_up fsr:G2QQ:muRfac=2.0,fsr_Q2QG_muR_dn fsr:Q2QG:muRfac=0.5,fsr_Q2QG_muR_up fsr:Q2QG:muRfac=2.0,fsr_X2XG_muR_dn fsr:X2XG:muRfac=0.5,fsr_X2XG_muR_up fsr:X2XG:muRfac=2.0,fsr_G2GG_cNS_dn fsr:G2GG:cNS=-2.0,fsr_G2GG_cNS_up fsr:G2GG:cNS=2.0,fsr_G2QQ_cNS_dn fsr:G2QQ:cNS=-2.0,fsr_G2QQ_cNS_up fsr:G2QQ:cNS=2.0,fsr_Q2QG_cNS_dn fsr:Q2QG:cNS=-2.0,fsr_Q2QG_cNS_up fsr:Q2QG:cNS=2.0,fsr_X2XG_cNS_dn fsr:X2XG:cNS=-2.0,fsr_X2XG_cNS_up fsr:X2XG:cNS=2.0,isr_G2GG_muR_dn isr:G2GG:muRfac=0.5,isr_G2GG_muR_up isr:G2GG:muRfac=2.0,isr_G2QQ_muR_dn isr:G2QQ:muRfac=0.5,isr_G2QQ_muR_up isr:G2QQ:muRfac=2.0,isr_Q2QG_muR_dn isr:Q2QG:muRfac=0.5,isr_Q2QG_muR_up isr:Q2QG:muRfac=2.0,isr_X2XG_muR_dn isr:X2XG:muRfac=0.5,isr_X2XG_muR_up isr:X2XG:muRfac=2.0,isr_G2GG_cNS_dn isr:G2GG:cNS=-2.0,isr_G2GG_cNS_up isr:G2GG:cNS=2.0,isr_G2QQ_cNS_dn isr:G2QQ:cNS=-2.0,isr_G2QQ_cNS_up isr:G2QQ:cNS=2.0,isr_Q2QG_cNS_dn isr:Q2QG:cNS=-2.0,isr_Q2QG_cNS_up isr:Q2QG:cNS=2.0,isr_X2XG_cNS_dn isr:X2XG:cNS=-2.0,isr_X2XG_cNS_up isr:X2XG:cNS=2.0}',
'UncertaintyBands:nFlavQ = 4',
'UncertaintyBands:MPIshowers = on',
'UncertaintyBands:overSampleFSR = 10.0',
'UncertaintyBands:overSampleISR = 10.0',
'UncertaintyBands:FSRpTmin2Fac = 20',
'UncertaintyBands:ISRpTmin2Fac = 1'),
pythia8PowhegEmissionVetoSettings = cms.vstring('POWHEG:veto = 1',
'POWHEG:pTdef = 1',
'POWHEG:emitted = 0',
'POWHEG:pTemt = 0',
'POWHEG:pThard = 0',
'POWHEG:vetoCount = 100',
'SpaceShower:pTmaxMatch = 2',
'TimeShower:pTmaxMatch = 2'),
processParameters = cms.vstring('POWHEG:nFinal = 1',
'ParticleDecays:limitTau0= off',
'25:m0 =125',
'25:addChannel = 1 1.0 102 4900101 -4900101',
'25:0:onMode=0',
'25:1:onMode=0',
'25:2:onMode=0',
'25:3:onMode=0',
'25:4:onMode=0',
'25:5:onMode=0',
'25:6:onMode=0',
'25:7:onMode=0',
'25:8:onMode=0',
'25:9:onMode=0',
'25:10:onMode=0',
'25:11:onMode=0',
'25:12:onMode=0',
'25:13:onMode=0',
'HiddenValley:Ngauge = 3',
'HiddenValley:nFlav = 1',
'HiddenValley:fragment = on',
'HiddenValley:FSR = on',
'HiddenValley:alphaOrder = 1',
'HiddenValley:Lambda = 5.0',
'HiddenValley:pTminFSR = 5.5',
'HiddenValley:spinFv=0',
'4900101:m0 = 2.0',
'4900111:m0 = 5',
'4900113:m0 = 5.0',
'4900113:onMode = 0',
'4900111:addChannel = 1 0.044 91 21 21',
'4900111:addChannel = 1 0.009 91 3 -3',
'4900111:addChannel = 1 0.541 91 4 -4',
'4900111:addChannel = 1 0.004 91 13 -13',
'4900111:addChannel = 1 0.401 91 15 -15',
'4900111:tau0 = 5000'),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'pythia8PSweightsSettings',
'pythia8PowhegEmissionVetoSettings',
'processParameters')
)
)
process.genfilter = cms.EDFilter("GenParticleSelector",
src = cms.InputTag("genParticlesForFilter"),
cut = cms.string('(pdgId==25) && pt>140. && status==62')
)
process.genParticlesForFilter = cms.EDProducer("GenParticleProducer",
saveBarCodes = cms.untracked.bool(True),
src = cms.InputTag("generator"),
abortOnUnknownPDGCode = cms.untracked.bool(False)
)
process.externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
nEvents = cms.untracked.uint32(10),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh'),
numberOfParameters = cms.uint32(1),
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/powheg/V2/gg_H_quark-mass-effects_NNPDF30_13TeV_M125/v2/gg_H_quark-mass-effects_NNPDF30_13TeV_M125_tarball.tar.gz')
)
process.ProductionFilterSequence = cms.Sequence(process.generator+process.genParticlesForFilter+process.genfilter+process.gencount)
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.RAWSIMoutput_step)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1
#call to customisation function customisePostLS1 imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1(process)
# End of customisation functions
# Customisation from command line
process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(1) | [
"mcitron@cern.ch"
] | mcitron@cern.ch |
df5a6b91b902fa050e18a252084453dd0d8a2d3d | 509b8316075f18612f5600993ccefbfe14527a35 | /src/_spacefligth/pipeline_registry.py | a674db6c272aee3dfe2557249f4228fec54e26d8 | [] | no_license | Princekrampah/SpaceFlightKedro | 44a2eb14a5e6356f136fa45dd0c9496a514aa5d7 | deab13030e4181fae33ce452a96403f549974750 | refs/heads/master | 2023-05-05T00:08:53.814882 | 2021-05-30T14:38:14 | 2021-05-30T14:38:14 | 372,237,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from typing import Dict
from kedro.pipeline import Pipeline
from _spacefligth.pipelines import data_processing as dp
from _spacefligth.pipelines import data_science as ds
def register_pipelines() -> Dict[str, Pipeline]:
"""Register the project's pipeline.
Returns:
A mapping from a pipeline name to a ``Pipeline`` object.
"""
data_processing_pipeline = dp.create_pipeline()
data_science_pipeline = ds.create_pipeline()
return {
"__default__": data_processing_pipeline + data_science_pipeline,
"dp": data_processing_pipeline,
"ds": data_science_pipeline,
}
| [
"jsksprince@gmail.com"
] | jsksprince@gmail.com |
cc1354efb7277cd1d71af9e0579c730536239931 | 14856ffe01c711af7a41af0b1abf0378ba4ffde6 | /Python/Django/group_project/apps/travel/models.py | 34f47a390411072fa349b8cca78f69d1ffdf6d69 | [] | no_license | sharonanchel/coding-dojo | 9a8db24eec17b0ae0c220592e6864510297371c3 | d6c4a7efd0804353b27a49e16255984c4f4b7f2a | refs/heads/master | 2021-05-05T18:17:48.101853 | 2017-06-23T23:53:51 | 2017-06-23T23:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Tourist (models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
memberID = models.IntegerField()
destination_id = models.ForeignKey('Destination')
class Destination (models.Model):
country = models.CharField(max_length=100)
city = models.CharField(max_length=100)
description = models.TextField(max_length=1000)
| [
"jao.colin@gmail.com"
] | jao.colin@gmail.com |
f1ee89673ec345caeddc3233b30a649d55c62bf4 | d3e252c5c8a507b14aad3fba419c2c4535c49e27 | /migrations/versions/afe21b1fbed1_comment.py | 0b9201337663f2125c37b02f1562fefd02d97b10 | [] | no_license | MutuaFranklin/Watchlist | 2076dadc02eaa0599aec89393dc2c9721e1fdc5b | 73b033342fb58da9aa7d3911e38beb93e557aa47 | refs/heads/main | 2023-07-22T13:08:39.947380 | 2021-08-23T11:45:22 | 2021-08-23T11:45:22 | 392,306,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | """Comment
Revision ID: afe21b1fbed1
Revises: 24b376f6e5fa
Create Date: 2021-08-12 13:29:58.852546
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'afe21b1fbed1'
down_revision = '24b376f6e5fa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('reviews',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('movie_id', sa.Integer(), nullable=True),
sa.Column('movie_title', sa.String(), nullable=True),
sa.Column('image_path', sa.String(), nullable=True),
sa.Column('movie_review', sa.String(), nullable=True),
sa.Column('posted', sa.Time(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reviews')
# ### end Alembic commands ###
| [
"franklin.mutua@student.moringaschool.com"
] | franklin.mutua@student.moringaschool.com |
d6c07151daabf0c745ea0b53d3309a2a5408d995 | a4681043cb56a9ab45be32a62fa9700b391f087f | /19-Beautiful_Soup/10_of_11_Reading_Text.py | 9948d5ba655ac865cc685313ffa24266d7551eda | [] | no_license | MarceloDL-A/Python | b16b221ae4355b6323092d069bf83d1d142b9975 | c091446ae0089f03ffbdc47b3a6901f4fa2a25fb | refs/heads/main | 2023-01-01T02:29:31.591861 | 2020-10-27T19:04:11 | 2020-10-27T19:04:11 | 301,565,957 | 0 | 0 | null | 2020-10-27T19:04:12 | 2020-10-05T23:41:30 | Python | MacCentralEurope | Python | false | false | 2,953 | py | """
WEB SCRAPING WITH BEAUTIFUL SOUP
Reading Text
When we use BeautifulSoup to select HTML elements, we often want to grab the text inside of the element, so that we can analyze it. We can use .get_text() to retrieve the text inside of whatever tag we want to call it on.
<h1 class="results">Search Results for: <span class='searchTerm'>Funfetti</span></h1>
If this is the HTML that has been used to create the soup object, we can make the call:
soup.get_text()
Which will return:
'Search Results for: Funfetti'
Notice that this combined the text inside of the outer h1 tag with the text contained in the span tag inside of it! Using get_text(), it looks like both of these strings are part of just one longer string. If we wanted to separate out the texts from different tags, we could specify a separator character. This command would use a . character to separate:
soup.get_text('|')
Now, the command returns:
'Search Results for: |Funfetti'
"""
import requests
from bs4 import BeautifulSoup
prefix = "https://content.codecademy.com/courses/beautifulsoup/"
webpage_response = requests.get('https://content.codecademy.com/courses/beautifulsoup/shellter.html')
webpage = webpage_response.content
soup = BeautifulSoup(webpage, "html.parser")
turtle_links = soup.find_all("a")
links = []
#go through all of the a tags and get the links associated with them:
for a in turtle_links:
links.append(prefix+a["href"])
#Define turtle_data:
turtle_data = {}
#follow each link:
for link in links:
webpage = requests.get(link)
turtle = BeautifulSoup(webpage.content, "html.parser")
turtle_name = turtle.select(".name")[0].get_text()
turtle_data[turtle_name] = [turtle.find("ul").get_text("|").split("|")]
print(turtle_data)
"""
After the loop, print out turtle_data. We have been storing the names as the whole p tag containing the name.
Instead, letís call get_text() on the turtle_name element and store the result as the key of our dictionary instead.
hint:
turtle_name should now be equal to something like:
turtle.select(".name")[0].get_text()
"""
"""
Instead of associating each turtle with an empty list, letís have each turtle associated with a list of the stats that are available on their page.
It looks like each piece of information is in a li element on the turtleís page.
Get the ul element on the page, and get all of the text in it, separated by a '|' character so that we can easily split out each attribute later.
Store the resulting string in turtle_data[turtle_name] instead of storing an empty list there.
Hint:
At this point, the value of each turtle_data[turtle_name] should look something like:
turtle.find("ul").get_text("|")
"""
"""
When we store the list of info in each turtle_data[turtle_name], separate out each list element again by splitting on '|'.
Hint
At this point, the value of each turtle_data[turtle_name] should look something like:
turtle.find("ul").get_text("|").split("|")
"""
| [
"marcelo.delmondes.lima@usp.br"
] | marcelo.delmondes.lima@usp.br |
8f11c565a577e78d997f30bb8cfbc51293c2337a | d4280eca1a9badb0a4ad2aa22598616eedece373 | /PyQt/PyQt5 tutorial/Dialogs/inputdialog.py | be359884367273f401bca2ba1344afedd634941e | [] | no_license | Little-Captain/py | 77ec12bb2aaafe9f709a70831266335b03f63663 | 74ba3c3449e7b234a77500a17433e141e68169f7 | refs/heads/master | 2021-06-09T11:33:23.205388 | 2019-11-22T01:17:44 | 2019-11-22T01:17:44 | 131,844,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from PyQt5.QtWidgets import (QWidget, QPushButton, QLineEdit,
QInputDialog, QApplication)
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn = QPushButton('Dialog', self)
self.btn.move(20, 20)
self.btn.clicked.connect(self.showDialog)
self.le = QLineEdit(self)
self.le.move(130, 22)
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Input dialog')
self.show()
def showDialog(self):
# This line displays the input dialog.
# The first string is a dialog title,
# the second one is a message within the dialog.
# The dialog returns the entered text and a boolean value.
# If we click the Ok button, the boolean value is true.
text, ok = QInputDialog.getText(self, 'Input Dialog',
'Enter your name:')
if ok:
self.le.setText(str(text))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
"littlecaptain@foxmail.com"
] | littlecaptain@foxmail.com |
b2c37f8ae5e7c59302df4e81734325b8f55263af | 430b9e03e36e355bba475df49505011f99fa0819 | /keji/lesson03_data_type_list (2)/demo7_tuple.py | 4d383d19df25b0628d127fe4cd18ac2cd5616b1a | [] | no_license | gaoyang1224/mysite | b43e5d5e378b810b94dd60ffcac1c992173cc11a | 72150c67b9590b0498241a1eacb2669a836520ff | refs/heads/master | 2023-05-01T21:42:40.096287 | 2021-05-20T14:40:30 | 2021-05-20T14:40:30 | 368,254,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # 元组是用 () 表示
a = (1,2)
print(a)
print(type(a))
print(len(a))
# 元组如果是空的
a = ()
print(a)
print(type(a))
print(len(a))
# 如果表示 1 个元素的元组:
# TODO: 一定要在元素后加一个 , 不然的话,元组不生效
a = ("星河",1,2,3)
print(a)
print(type(a))
print(len(a))
# 元组不可变类型,只能查
print(a[0])
print(a[1:3])
print(a.index("星河"))
# 字典
# 集合
# 数据运算, + - 比较 and not or, 成员
# 作业。 | [
"15195989321@163.com"
] | 15195989321@163.com |
67cf26c42ec0530cc7f8e5bf1cb724eba7d8bf9d | 049ca48d22011604f4c7594c42467e0a6d95d7f5 | /tests/python3/kyu_5/test_convert_string_to_camel_case.py | b35988f80a839c8e29b752a13eb589d947b8f400 | [] | no_license | wangerde/codewars | 3ffdf560f0fd2333ab2711d20e2f2b32588fd9fd | bcfd15aba49f87c0a64cf840e96df06ef5ec9162 | refs/heads/master | 2021-01-23T05:35:29.217960 | 2017-01-15T18:23:30 | 2017-01-15T18:23:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # pylint: disable=missing-docstring
"""Convert string to camel case"""
import pytest
from python3.kyu_5.convert_string_to_camel_case import to_camel_case
EXAMPLES = (
('text', 'expected'),
[
('', ''),
('the_stealth_warrior', 'theStealthWarrior'),
('The-Stealth-Warrior', 'TheStealthWarrior'),
('A-B-C', 'ABC'),
]
)
@pytest.mark.parametrize(*EXAMPLES)
def test_returns_correct_result(text, expected):
assert to_camel_case(text) == expected
| [
"karateev.pavel@ya.ru"
] | karateev.pavel@ya.ru |
a474200d782ba6c520d3792b044a9ebced08b3a5 | 293a1d4ce3e3ec034fd4d662cb8dcc8c58b512e4 | /tools/scripts/prepare_submission.py | 516857ea830e2fac07e9523eb3457e5ab7411d2c | [] | no_license | czhiming/POSTECH | 87475137674dbce3d6add290ef455ca253d7c423 | 7e0436fe74e55ce0ec4875bc8d70964f85d64209 | refs/heads/master | 2021-09-02T12:11:17.001027 | 2018-01-02T13:51:11 | 2018-01-02T13:51:11 | 116,019,207 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
if len(sys.argv) < 2:
print "usage: {} method-name < input.txt > output.txt".format(sys.argv[0])
exit(1)
method = sys.argv[1]
for idx, line in enumerate(sys.stdin):
print "{}\t{}\t{}".format(method, idx+1, line.strip())
| [
"qqchenzhiming@jxnu.edu.cn"
] | qqchenzhiming@jxnu.edu.cn |
4f7db53b849c5840d0ae7303bb14c6f8fdf62404 | 55ac013ac7fc80d878fb47def8d6218c2fe2d391 | /backend/home/management/commands/load_initial_data.py | f71a44de842eb711d2760b024594c8fce1b4e607 | [] | no_license | crowdbotics-apps/romano-at-law-3401 | 98f2845d138b9589b89b660a580beaad23050c25 | ae58daf3da747a5b19af96a7186a09424c1800c8 | refs/heads/master | 2022-12-13T14:02:46.247773 | 2019-05-15T17:32:08 | 2019-05-15T17:32:08 | 186,874,268 | 0 | 0 | null | 2022-12-06T16:01:29 | 2019-05-15T17:32:03 | JavaScript | UTF-8 | Python | false | false | 739 | py |
from django.core.management import BaseCommand
from home.models import CustomText, HomePage
def load_initial_data():
homepage_body = """
<h1 class="display-4 text-center">romano_at_law_3401</h1>
<p class="lead">
This is the sample application created and deployed from the crowdbotics slack app. You can
view list of packages selected for this application below
</p>"""
customtext_title = 'romano_at_law_3401'
CustomText.objects.create(title=customtext_title)
HomePage.objects.create(body=homepage_body)
class Command(BaseCommand):
can_import_settings = True
help = 'Load initial data to db'
def handle(self, *args, **options):
load_initial_data()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
6fe5debd483b04800ed02ebb2fbc65a3cc6d0487 | 38e4d244f3d56a8027627c5fdad518d53a0f3bad | /Loaded/__init__.py | d381537adfa9d7de462f15245701b652e1391855 | [] | no_license | elgrandt/Tanks | be57822f00bdf698ea6622658ef33e3ed8e096e6 | b6c8e94342bbebb3e679e85eb5a2d2a8af9298f5 | refs/heads/master | 2022-12-01T11:32:58.424853 | 2020-08-17T21:05:30 | 2020-08-17T21:05:30 | 288,251,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | import Images
import Fonts | [
"dylantasat11@gmail.com"
] | dylantasat11@gmail.com |
b61316b862a7647911f05d2eea8e9e749f65e77d | 1eaaeee197d0809f354b8dfe669ecc2fe8424757 | /11_PaginationDRF/PaginationDRF/settings.py | c7083642ab915643a3596995ebe25064b54bcf3f | [
"MIT"
] | permissive | jhleed/LikeLion_Django_Study_Summary | 4ec3ae9b05b24eca370075c613c70211da957c1c | c788182af5bcfd16bdd4b57235a48659758e494b | refs/heads/master | 2022-03-27T16:53:42.886054 | 2019-12-07T03:49:33 | 2019-12-07T03:49:33 | 265,724,111 | 1 | 0 | MIT | 2020-05-21T01:22:33 | 2020-05-21T01:22:33 | null | UTF-8 | Python | false | false | 3,292 | py | """
Django settings for PaginationDRF project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ihc!jbbs!+4_cr)$y*@74&0a63zd$vc)oaxitr1i5vdhp3z-oq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'post.apps.PostConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'PaginationDRF.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'PaginationDRF.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
}
| [
"alstn2468_@naver.com"
] | alstn2468_@naver.com |
af35cc25640ac62e7ee66225a9dc8f00d4b603d8 | 9568dee77459304ad0f7e01c9dea9432c11377d0 | /warp_the_pickle_new.py | ea441a6ec3b80bab4e24ebd6ddb5d7ab0d4ea8cf | [
"MIT"
] | permissive | lbaumo/wtgpipeline | c101c7e7ec1491a1c40cbe14102662770641bb9a | 73de01736e33769c09c4467e3c040545d7070407 | refs/heads/master | 2021-06-20T14:40:38.263891 | 2017-08-14T21:08:24 | 2017-08-14T21:08:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,091 | py | #!/usr/bin/env python
import sys, glob,pyfits, os.path
#from numpy import *
import scipy
import scipy.interpolate.interpolate as interp
#from dappleutils import readtxtfile
#from optparse import OptionParser
c = 299792458e10 #Angstroms/s
def get_sdss_spectra(gmi,umg,gmr,imz,number=4,tol=0.01,S_N=5):
import sqlcl
dict_names = ['plate', 'MJD', 'fiberID', 'ra', 'dec', 'mag_0', 'mag_1', 'mag_2']
#query = 'select top ' + str(number) + ' ' + reduce(lambda x,y: x + ',' + y, ['s.' + x for x in dict_names]) + ' from specobjall as s join specphotoall as p on s.specobjid = p.specobjid where abs(s.mag_0 - s.mag_1 - ' + str(gmr) + ') < ' + str(tol) + ' and abs(s.mag_1 - s.mag_2 - ' + str(rmi) + ') < ' + str(tol) + ' and abs(s.mag_0 - s.mag_2 - ' + str(gmr + rmi) + ') < ' + str(tol) + ' and s.sn_0 > ' + str(S_N) + ' and s.sn_1 > ' + str(S_N) + ' and s.sn_2 > ' + str(S_N) + ' and abs(s.mag_0 - s.mag_1 - (p.fibermag_g - p.fibermag_r)) < 0.1 and abs(s.mag_1 - s.mag_2 - (p.fibermag_r - p.fibermag_i)) < 0.1 order by -1.*s.sn_1'
if False: pattern = 'zbelodiesptype like "%v%" and zbelodiesptype not like "%var%"'
#elif 0.7 < rmi < 1.0: pattern = '(zbelodiesptype like "%G%v%" or zbelodiesptype like "%K%v%" or zbelodiesptype like "%M%v%")'
else:
pattern = 'zbelodiesptype like "%M%v%"'
''' try to approximately match u and z band stellar colors as well, not just spectroscopic magnitudes '''
query = "select top " + str(number) + " " + reduce(lambda x,y: x + "," + y, ["s." + x for x in dict_names]) + " \
from specobjall as s join specphoto as p on s.specobjid = p.specobjid join sppParams sp on sp.specobjid = s.specobjid \
where zbclass='STAR' and " + pattern + " and abs(s.mag_0 - s.mag_2 - " + str(gmi) + ") < " + str(tol) + " and \
abs(s.mag_0 - s.mag_1 - " + str(gmr) + ") < " + str(tol) + " and abs(s.mag_1 - s.mag_2 - " + str(gmi - gmr) + ") < " + str(tol) + " and \
s.sn_0 > " + str(S_N) + " and s.sn_1 > " + str(S_N) + " and s.sn_2 > " + str(S_N) + " and \
abs(s.mag_0 - s.mag_1 - (p.fibermag_g - p.fibermag_r)) < 0.1 and abs(s.mag_1 - s.mag_2 - (p.fibermag_r - p.fibermag_i)) < 0.1 \
and abs(p.fibermag_u - p.fibermag_g - " + str(umg) + ") < 0.1 and abs(p.fibermag_i - p.fibermag_z - " + str(imz) + ") < 0.1 \
order by -1.*s.sn_1"
if rmi < 0.7: pattern = 'zbelodiesptype like "%v%" and zbelodiesptype not like "%var%"'
#elif 0.7 < rmi < 1.0: pattern = '(zbelodiesptype like "%G%v%" or zbelodiesptype like "%K%v%" or zbelodiesptype like "%M%v%")'
else: pattern = 'zbelodiesptype like "%M%v%"'
query = 'select top ' + str(number) + ' ' + reduce(lambda x,y: x + ',' + y, ['s.' + x for x in dict_names]) + ' from specobjall as s join specphoto as p on s.specobjid = p.specobjid join sppParams sp on sp.specobjid = s.specobjid where zbclass="STAR" and ' + pattern + ' and abs(s.mag_0 - s.mag_1 - ' + str(gmr) + ') < ' + str(tol) + ' and abs(s.mag_1 - s.mag_2 - ' + str(rmi) + ') < ' + str(tol) + ' and abs(s.mag_0 - s.mag_2 - ' + str(gmr + rmi) + ') < ' + str(tol) + ' and s.sn_0 > ' + str(S_N) + ' and s.sn_1 > ' + str(S_N) + ' and s.sn_2 > ' + str(S_N) + ' and abs(s.mag_0 - s.mag_1 - (p.fibermag_g - p.fibermag_r)) < 0.1 and abs(s.mag_1 - s.mag_2 - (p.fibermag_r - p.fibermag_i)) < 0.1 and abs(' + str(umg) + ' - (p.psfMag_u - p.psfMag_g)) < 0.05 and abs(' + str(imz) + ' - (p.psfMag_i - p.psfMag_z)) < 0.05 \
order by -1.*s.sn_1'
#select top 100 zbclass, zbelodiesptype, zbsubclass from sppParams where zbsubclass like '%M%' and zbclass='STAR'
import time
time.sleep(1.5)
print query
lines = sqlcl.query(query).readlines()
print lines
dicts = []
if lines[0] != 'N':
for line in lines[1:]:
dict = {}
line = line.replace('\n','')
import re
res = re.split(',',line)
print res
for i in range(len(res)):
if dict_names[i] == 'fiberID' or dict_names[i] == 'plate' or dict_names[i] == 'MJD':
dict[dict_names[i]] = int(res[i])
else:
dict[dict_names[i]] = (res[i])
print dict
dicts.append(dict)
print dicts
return dicts
def retrieve_sdss_spectra(dict,plot=False):
dict['gmr'] = float(dict['mag_0']) - float(dict['mag_1'])
dict['rmi'] = float(dict['mag_1']) - float(dict['mag_2'])
print dict
file = "http://das.sdss.org/spectro/1d_26/%(plate)04d/1d/spSpec-%(MJD)d-%(plate)04d-%(fiberID)03d.fit" % dict
#output = "/tmp/spSpec-%(MJD)d-%(plate)04d-%(fiberID)d.fit" % dict
#os.system('wget ' + file + ' -O ' + output)
print file
import pyfits, scipy
import scipy
p = pyfits.open(file)
mask = p[0].data[3]
flux = p[0].data[0]
indices = scipy.array(range(len(flux)))
#flux = flux[mask==0]
#indices = indices[mask==0]
#mask = mask[mask==0]
print mask
COEFF0 = p[0].header['COEFF0']
COEFF1 = p[0].header['COEFF1']
import scipy
wavelength = 10.**(COEFF0 + COEFF1*indices)
spectrum = []
for i in range(len(indices)):
spectrum.append([wavelength[i],flux[i]])
import scipy
spectrum = scipy.array(spectrum)
if plot:
import pylab
pylab.plot(spectrum[:,0], spectrum[:,1])
pylab.xlabel('angstroms')
pylab.ylabel('flux')
pylab.show()
return spectrum
def make_new_spectrum(locus_index,plot=False):
filters = get_filters()
import pickle
f = open('picklelocus_MACS','r')
m = pickle.Unpickler(f)
stars = m.load()
import string
spectra_complete = load_spectra()
locus_list = locus()
comp_list = filter(lambda x: string.find(x.replace('SDSS_',''),'SDSS')!=-1 and string.find(x,'SDSS_')!=-1, locus_list.keys())
print comp_list
import pylab
gmi_all = locus_list['GSDSS_ISDSS'][:]
umg_all = locus_list['USDSS_GSDSS'][:]
gmr_all = locus_list['GSDSS_RSDSS'][:]
imz_all = locus_list['ISDSS_ZSDSS'][:]
#locus_index = 13
print 'locus_index', locus_index
gmi = locus_list['GSDSS_ISDSS'][locus_index]
umg = locus_list['USDSS_GSDSS'][locus_index]
gmr = locus_list['GSDSS_RSDSS'][locus_index]
imz = locus_list['ISDSS_ZSDSS'][locus_index]
print gmi, umg, gmr, imz
if plot:
pylab.clf()
pylab.scatter(gmr_all,rmi_all,color='blue')
pylab.scatter(gmr,rmi,color='red')
pylab.show()
if False:
closest = closest_pickles(stars, locus_list, locus_index, comp_list)
closest_index = closest[1][1]
import pylab
print 'plotting'
print spectra_complete[closest_index][0][:,0]
print spectra_complete[closest_index][0][:,1]
pylab.plot(spectra_complete[closest_index][0][:,0],spectra_complete[closest_index][0][:,1])
pylab.xlim(3000,11000)
pylab.show()
print 'plotted'
import pickle
f = open('picklelocus_MACS','r')
m = pickle.Unpickler(f)
stars = m.load()
locus_list = locus()
good = False
gmi_off = 0
gmr_off = 0
trys = 0
tol = 0.01
while not good:
trys += 1
#if trys > 4: tol = 0.02
#if trys > 6: tol = 0.03
#if trys > 10: tol = 0.05
print gmi, umg, gmr, imz
dicts = get_sdss_spectra(gmi-gmi_off,umg,gmr-gmr_off,imz,tol=tol)
if len(dicts):
print dicts
gmi_diffs = []
gmr_diffs = []
for dict in dicts:
spectrum = retrieve_sdss_spectra(dict,plot=False)
mags = synth([1.],[[spectrum]],filters,show=False)
print mags
gmi_diffs.append(mags['GSDSS'] - mags['ISDSS'] - gmi)
gmr_diffs.append(mags['GSDSS'] - mags['RSDSS'] - gmr)
print mags['GSDSS'] - mags['ISDSS'], gmi
print float(dict['mag_0']) - float(dict['mag_2'])
print mags['GSDSS'] - mags['RSDSS'], gmr
print float(dict['mag_0']) - float(dict['mag_1'])
gmi_diffs.sort()
gmr_diffs.sort()
median_gmi = gmi_diffs[int(len(gmr_diffs)/2)]
median_gmr = gmr_diffs[int(len(rmi_diffs)/2)]
if abs(median_gmr) > tol or abs(median_rmi) > tol:
gmi_off += median_gmr
gmr_off += median_rmi
else: good = True
print gmi_diffs, gmr_diffs
print median_gmi, median_gmr
print gmi, gmr
else: tol += 0.01
print spectrum
print comp_list
if plot:
max = spectrum[:,1].max()
pylab.plot(spectrum[:,0],spectrum[:,1]/max)
#pylab.plot(spectra_complete[closest_index][0][:,0],spectra_complete[closest_index][0][:,1])
pylab.xlim(3000,11000)
pylab.show()
sdssSpec, pickleSpec = similar(spectrum)
stitchSpec = optimize(sdssSpec,pickleSpec, locus_index,plot=plot)
print stitchSpec
return stitchSpec
''' assemble a new locus '''
def make_new_locus():
locus_list = locus()
keys = locus_list.keys()
keys += ['WSRSUBARU_WSGSUBARU','WSRSUBARU_WSISUBARU','WSRSUBARU_WSZSUBARU','MPUSUBARU_WSRSUBARU','BJOHN_WSRSUBARU','WSGSUBARU_WSISUBARU','WHTB_VJOHN','WHTU_VJOHN','B_VJOHN','I_VJOHN']
print keys
locus_list_new = dict([[x,[]] for x in keys])
filters = get_filters(sdss=False)
print filters
locus_list_mag = []
if False:
import pickle
f = open('newlocus','r')
m = pickle.Unpickler(f)
locus_list_new = m.load()
import pickle
f = open('maglocus','r')
m = pickle.Unpickler(f)
locus_list_mag = m.load()
spectra = []
for i in 2* scipy.array(range(len(locus_list[keys[0]])/2)):
if i > len(locus_list_mag):
stitchSpec = make_new_spectrum(i,plot=True)
spectra.append(stitchSpec)
mags = synth([1.,0,0,0],[[stitchSpec]],filters)
print filters
print mags['GSDSS'] - mags['RSDSS']
print mags
locus_list_mag.append(mags)
for key in keys:
if key != 'NUM':
import re
res = re.split('\_',key)
locus_list_new[key].append(mags[res[0]] - mags[res[1]])
else: locus_list_new['NUM'] = i
print locus_list_new
import pickle
f = open('newlocus_SYNTH','w')
m = pickle.Pickler(f)
pickle.dump(locus_list_new,m)
f.close()
import pickle
f = open('maglocus_SYNTH','w')
m = pickle.Pickler(f)
pickle.dump(locus_list_mag,m)
f.close()
import pickle
f = open('spectra_SYNTH','w')
m = pickle.Pickler(f)
pickle.dump(spectra,m)
f.close()
def optimize(specSDSS,pickleSpec,locus_index,plot=False):
filters = get_filters()
locus_list = locus()
import string
comp_list = filter(lambda x: string.find(x.replace('SDSS_',''),'SDSS')!=-1 and string.find(x,'SDSS_')!=-1, locus_list.keys())
print comp_list
grdiff = (locus_list['GSDSS_RSDSS'][locus_index])
sdssSpline = interp.interp1d(specSDSS[:,0], specSDSS[:,1],
bounds_error = False,
fill_value = 0.)
sdssLimits = [specSDSS[0,0],8500] #specSDSS[-1,0]]
sdssLimits = [4200,8500] #specSDSS[-1,0]]
zOverLap = [8000,9000]
uOverLap = [4100,4600]
print sdssLimits
specSDSS_new = []
for l in specSDSS:
if sdssLimits[0] < l[0] < sdssLimits[1]:
specSDSS_new.append(l)
import scipy
specSDSS = scipy.array(specSDSS_new)
uSpec = []
zSpec = []
zOverLapData = []
uOverLapData = []
for l in pickleSpec:
if l[0] < sdssLimits[0]:
uSpec.append(l)
if l[0] > sdssLimits[1]:
zSpec.append(l)
if zOverLap[0] < l[0] < zOverLap[1]:
zOverLapData.append(l)
if uOverLap[0] < l[0] < uOverLap[1]:
uOverLapData.append(l)
uOverLapData = scipy.array(uOverLapData)
zOverLapData = scipy.array(zOverLapData)
uSpec = scipy.array(uSpec)
zSpec = scipy.array(zSpec)
zRescale = scipy.median(sdssSpline(zOverLapData[:,0])/ zOverLapData[:,1])
uRescale = scipy.median(sdssSpline(uOverLapData[:,0])/ uOverLapData[:,1])
import scipy
uSpec = scipy.array(zip(uSpec[:,0],uRescale*uSpec[:,1]))
zSpec = scipy.array(zip(zSpec[:,0],zRescale*zSpec[:,1]))
import pylab
if False:
pylab.clf()
pylab.plot(uSpec[:,0],uSpec[:,1])
pylab.plot(zSpec[:,0],zSpec[:,1])
pylab.plot(specSDSS[:,0],specSDSS[:,1])
pylab.show()
def plot(specStitch,pickleSpecMod):
pylab.clf()
pylab.plot(specStitch[:,0],specStitch[:,1])
print pickleSpecMod
#pylab.plot(pickleSpecMod[:,0],pickleSpecMod[:,1])
pylab.xlim([3000,10000])
pylab.show()
fit_list = ['USDSS_GSDSS','GSDSS_ZSDSS','ISDSS_Z_SDSS']
def errfunc(p,plot_it=False, getSpec=False):
uWarp = interp.interp1d([2500]+uOverLap, [abs(p[0]),1.,1.],
bounds_error = False,
fill_value = 1.)
zWarp = interp.interp1d(zOverLap + [11000], [1.,1.,abs(p[1])],
bounds_error = False,
fill_value = 1.)
specStitch_0 = (uSpec[:,0].tolist() + specSDSS[:,0].tolist() + zSpec[:,0].tolist())
specStitch_1 = (uSpec[:,1].tolist() + specSDSS[:,1].tolist() + zSpec[:,1].tolist())
specStitch = scipy.array(zip(specStitch_0,specStitch_1*uWarp(specStitch_0)*zWarp(specStitch_0)))
mags = synth([1.,0,0,0],[[specStitch]],filters)
#print mags
#raw_input()
if False: #getSpec: #plot_it:
import pylab
pylab.plot(pickleSpec[:,0],uWarp(pickleSpec[:,0])*zWarp(pickleSpec[:,0]),color='red')
pylab.xlim([3000,10000])
pylab.show()
#plot(specStitch,scipy.array(zip(pickleSpec[:,0].tolist(),(uWarp(pickleSpec[:,0])*zWarp(pickleSpec[:,0])*pickleSpec[:,1]).tolist())))
#plot(specStitch,scipy.array(zip(specStitch[:,0].tolist(),(uWarp(specStitch[:,0])*zWarp(specStitch[:,0])*specStitch[:,1]).tolist())))
plot(specStitch,specStitch[:,1])
pylab.show()
#print mags
ugdiff = (mags['USDSS'] - mags['GSDSS'] - locus_list['USDSS_GSDSS'][locus_index])
#urdiff = (mags['USDSS'] - mags['RSDSS'] - locus_list['USDSS_RSDSS'][locus_index])
gzdiff = (mags['GSDSS'] - mags['ZSDSS'] - locus_list['GSDSS_ZSDSS'][locus_index])
izdiff = (mags['ISDSS'] - mags['ZSDSS'] - locus_list['ISDSS_ZSDSS'][locus_index])
ridiff = (mags['RSDSS'] - mags['ISDSS'] - locus_list['RSDSS_ISDSS'][locus_index])
stat = ( ugdiff**2. + gzdiff**2. + izdiff**2. + ridiff**2.)
print (locus_list['GSDSS_RSDSS'][locus_index]), mags['GSDSS'] - mags['RSDSS']
print ugdiff, gzdiff, izdiff, stat
if getSpec: return specStitch
else:
return stat
from scipy import optimize
pinit = [1.,1.]
out = scipy.optimize.fmin(errfunc,pinit,args=())
print out
stitchSpec = errfunc(out,plot_it=plot,getSpec=True)
mags = synth([1.,0,0,0],[[stitchSpec]],filters)
print (locus_list['GSDSS_RSDSS'][locus_index]), mags['GSDSS'] - mags['RSDSS']
return stitchSpec
def similar(input):
#sdssSpectrum = sdssSpectrum[0]
from copy import copy
sdssSpectrum = copy(input)
import scipy, pylab
print scipy.median(sdssSpectrum[:,1])
sdssSpectrum[:,1] = sdssSpectrum[:,1] / (scipy.ones(len(sdssSpectrum[:,1]))*scipy.median(sdssSpectrum[:,1]))
print sdssSpectrum
spectra_complete = load_spectra()
diffs = []
for i in range(len(spectra_complete)):
sp = spectra_complete[i]
spectrum = sp[0]
picklesSpline = interp.interp1d(spectrum[:,0], spectrum[:,1],
bounds_error = False,
fill_value = 0.)
specInterp = picklesSpline(sdssSpectrum[:,0])
specInterp = specInterp / (scipy.ones(len(sdssSpectrum[:,1]))*scipy.median(specInterp))
diff = specInterp - sdssSpectrum[:,1]
diff = diff - scipy.ones(len(diff))*scipy.median(diff)
stat = abs(diff).sum()
print stat, i
diffs.append([stat,i])
diffs.sort()
sp = spectra_complete[diffs[0][1]]
spectrum = sp[0]
picklesSpline = interp.interp1d(spectrum[:,0], spectrum[:,1],
bounds_error = False,
fill_value = 0.)
specInterp = picklesSpline(sdssSpectrum[:,0])
specInterp = specInterp / (scipy.ones(len(sdssSpectrum[:,1]))*scipy.median(specInterp))
diff = specInterp - sdssSpectrum[:,1]
diff = diff - scipy.ones(len(diff))*scipy.median(diff)
import scipy
specAll = scipy.array(zip(spectrum[:,0], spectrum[:,1] / (scipy.ones(len(spectrum[:,1]))*scipy.median(specInterp))))
if False:
pylab.clf()
pylab.plot(specAll[:,0],specAll[:,1])
pylab.plot(sdssSpectrum[:,0],sdssSpectrum[:,1])
pylab.plot(sdssSpectrum[:,0],diff)
pylab.xlim(3000,11000)
pylab.show()
''' need to fit spectral ends to reproduce locus color '''
return sdssSpectrum, specAll
def load_spectra():
import pickle
f = open('picklespectra','r')
m = pickle.Unpickler(f)
spectra = m.load()
return spectra
def locus():
import os, re
f = open(os.environ['bonn'] + '/locus.txt','r').readlines()
id = -1
rows = {}
colors = {}
for i in range(len(f)):
l = f[i]
if l[0] != ' ':
rows[i] = l[:-1]
else:
id += 1
colors[rows[id]] = [float(x) for x in re.split('\s+',l[:-1])[1:]]
import pylab
#pylab.scatter(colors['GSDSS_ZSDSS'],colors['RSDSS_ISDSS'])
#pylab.show()
return colors
def readtxtfile(file):
import re
f = open(file,'r').readlines()
file_out = []
for l in f:
import re
res = re.split('\s+',l)
if l[0] != '#':
if res[0] == '': res = res[1:]
if res[-1] == '': res = res[:-1]
file_out.append([float(x) for x in res])
filt_out = scipy.array(file_out)
#print file, 'file'
return filt_out
def get_filters(sdss=True):
#filter = readtxtfile(filterfile)[:,:2]
if sdss:
#flist = [['USDSS','u_SDSS.res'],['GSDSS','g_SDSS.res'],['RSDSS','r_SDSS.res'],['ISDSS','i_SDSS.res'],['ZSDSS','z_SDSS.res']]
flist = [['USDSS','SDSS-u.res'],['GSDSS','SDSS-g.res'],['RSDSS','SDSS-r.res'],['ISDSS','SDSS-i.res'],['ZSDSS','SDSS-z.res']]
else:
flist = [['BJOHN','SUBARU-10_1-1-W-J-B.res'],['VJOHN','SUBARU-10_1-1-W-J-V.res'],['RJOHN','SUBARU-10_1-1-W-C-RC.res'],['IJOHN','SUBARU-10_1-1-W-C-IC.res'],['MPUSUBARU','MEGAPRIME-0-1-u.res'],['MPGSUBARU','MEGAPRIME-0-1-g.res'],['MPRSUBARU','MEGAPRIME-0-1-r.res'],['MPISUBARU','MEGAPRIME-0-1-i.res'],['MPZSUBARU','MEGAPRIME-0-1-z.res'],['USDSS','SDSS-u.res'],['GSDSS','SDSS-g.res'],['RSDSS','SDSS-r.res'],['ISDSS','SDSS-i.res'],['ZSDSS','SDSS-z.res'],['JTMASS','J2MASS.res'],['HTMASS','H2MASS.res'],['KTMASS','K2MASS.res'],['WSZSUBARU','SUBARU-10_1-1-W-S-Z+.res'],['CAPAKIS','i_subaru.res'],['WSISUBARU','SUBARU-10_1-1-W-S-I+.res'],['WKSUBARU','SPECIAL-0-1-K.res'],['WSGSUBARU','SUBARU-10_1-1-W-S-G+.res'],['WSRSUBARU','SUBARU-10_1-1-W-S-R+.res'],['WHTB','WHT-0-1-B.res'],['WHTU','WHT-0-1-U.res'],['B','B_12k.res'],['I','MEGAPRIME-0-1-i.res']]
filters = []
for name, filt_name in flist:
file = '/a/wain010/g.ki.ki04/pkelly/bpz-1.99.2/FILTER/' + filt_name
#filt = readtxtfile(file)
import numpy
filt = numpy.loadtxt(file)
#filt = filt[filt[:,1]>0]
import pylab
print filt_name
#pylab.plot(filt[:,0],filt[:,1])
#pylab.show()
step = filt[1,0] - filt[0,0]
if filt[0,0] > filt[-1,0]:
filt_list = filt.tolist()
filt_list.reverse()
import scipy
filt = scipy.array(filt_list)
print filt
import string
#if string.find(filt_name,'SDSS') != -1:
from copy import copy
filterSpline = interp.interp1d(filt[:,0], filt[:,1],
bounds_error = False,
fill_value = 0.)
filters.append([copy(filterSpline),copy(step),copy(name)])
return filters
def get_spectra():
spectrafiles = glob.glob('dwarf-pickles/*.dat')[:]
spectra = [[readtxtfile(s)[:,:2],s] for s in spectrafiles]
import pickle
f = open('picklespectra','w')
m = pickle.Pickler(f)
pickle.dump(spectra,m)
f.close()
def applyFilter():
spectrafiles = glob.glob('dwarf-pickles/*.dat')[:]
spectra = [[readtxtfile(s)[:,:2],s] for s in spectrafiles]
filters = get_filters()
nspectra = len(spectra)
''' interpolate only on the filter '''
spec_mags = []
for spec,name in spectra:
star = {'name':name}
for filterSpline, step, filt_name in filters:
specStep = spec[1,0] - spec[0,0] # wavelength increment
resampFilter = filterSpline(spec[:,0]) # define an interpolating function
val = sum(specStep * resampFilter * spec[:,1])
logEff = scipy.log10(val)
logNorm = scipy.log10(sum(resampFilter*c*specStep/spec[:,0]**2))
mag = 2.5*(logNorm - logEff) # to calculated an AB magnitude
star[filt_name] = mag
spec_mags.append(star)
import pickle
f = open('picklelocus_MACS','w')
m = pickle.Pickler(f)
pickle.dump(spec_mags,m)
f.close()
return spec_mags
def synth(p,spectra,filters,show=False):
#polyfunc = lambda x: abs(1. + p[2]*x + p[3]*x**2.) #+ p[5]*x**3.)
mags ={}
import scipy
for filterSpline, step, filt_name in filters:
specall = scipy.zeros(len(spectra[0][0][:,1]))
val = 0
for coeff,specfull in [[p[0],spectra[0]]]: #,[p[1],spectra[1]],[1.-p[0]-p[1],spectra[2]]]:
spec = specfull[0]
print spec
specStep = spec[1:,0] - spec[0:-1,0] # wavelength increment
print specStep[400:600], 'specStep'
resampFilter = filterSpline(spec[:,0]) # define an interpolating function
print resampFilter
print filt_name
import pylab, string
if False: #string.find(filt_name,'SDSS') != -1:
pylab.plot(spec[:,0],resampFilter)
pylab.show()
''' need to multiply by polynomial '''
#polyterm = polyfunc(spec[:,0]) # define an interpolating function
#specall = polyterm * spec[:,1]
val += abs(coeff)*sum(specStep * resampFilter[:-1] * spec[:-1,1])
logEff = scipy.log10(val)
logNorm = scipy.log10(sum(resampFilter[:-1]*c*specStep/spec[:-1,0]**2))
mag = 2.5*(logNorm - logEff) # to calculated an AB magnitude
import string
if False: #string.find(filt_name,'SDSS') != -1:
print mag, val, filt_name, resampFilter, spec[:,1]
mags[filt_name]=mag
import pylab
if show:
pylab.plot(spec[:,0], specall)
pylab.show()
return mags
def errfunc(p,spectra,locus_list,locus_index,comp_list,filters):
star_stats = []
mags = synth(p,spectra,filters)
stat = 0
#print mags, 'mags'
for combo in comp_list:
import re
res = re.split('\_',combo)
f1 = res[0]
f2 = res[1]
#print mags[f1]-mags[f2], locus_list[combo][locus_index], f1, f2
stat += ((mags[f1]-mags[f2]) - locus_list[combo][locus_index])**2.
from copy import copy
stat = stat**0.5
print 'stat', stat, 'p', p
return stat
def closest_pickles(stars, locus_list, locus_index, comp_list):
star_stats = []
for s in range(len(stars)):
stat = 0
for combo in comp_list:
import re
res = re.split('\_',combo)
f1 = res[0]
f2 = res[1]
stat += ((stars[s][f1]-stars[s][f2]) - locus_list[combo][locus_index])**2.
from copy import copy
star_stats.append([stat,copy(s)])
star_stats.sort()
print [x for x in star_stats[:3]]
return star_stats
def plot():
spectra_complete = load_spectra()
filters = get_filters(False)
print filters
import pickle
f = open('picklelocus_MACS','r')
m = pickle.Unpickler(f)
stars = m.load()
locus_list = locus()
import string
comp_list = filter(lambda x: string.find(x.replace('SDSS_',''),'SDSS')!=-1 and string.find(x,'SDSS_')!=-1, locus_list.keys())
import string
print locus_list.keys()
close_locus = []
if 0:
fit_mags = []
for i in 5 * scipy.array(range(len(locus_list[comp_list[0]])/5)): #[0:20]:
star_stats = []
for s in range(len(stars)):
stat = 0
for combo in comp_list:
import re
res = re.split('\_',combo)
f1 = res[0]
f2 = res[1]
stat += ((stars[s][f1]-stars[s][f2]) - locus_list[combo][i])**2.
from copy import copy
star_stats.append([stat,copy(s)])
star_stats.sort()
print [x for x in star_stats[:3]]
spectra_sub = [spectra_complete[x[1]] for x in star_stats[:4]]
if True:
mags = synth([1,0,0,0],spectra_sub,filters)
for combo in comp_list:
import re
res = re.split('\_',combo)
f1 = res[0]
f2 = res[1]
print mags[f1] - mags[f2], locus_list[combo][star_stats[0][1]], f1, f2
close_locus.append(star_stats[0][1])
close_locus.append(star_stats[1][1])
#close_locus.append(star_stats[2][1])
print spectra_sub
from scipy import optimize
pinit = [1,0,0,0] #,1] #,1,1,1,1]
locus_index = i
out = scipy.optimize.fmin(errfunc,pinit,xtol=0.005,ftol=0.001,args=(spectra_sub,locus_list,locus_index,comp_list,filters))
#mags = errfunc([1,1,1,1,1,1,1,1],spectra_complete[0:3],filters)
print out
mags = synth(out,spectra_sub,filters,show=False)
print mags
from copy import copy
fit_mags.append([mags,out,spectra_sub,copy(i)])
#print fit_mags
import pickle
f = open('maglocus','w')
m = pickle.Pickler(f)
pickle.dump(fit_mags,m)
import pickle
f = open('maglocus_SYNTH','r')
m = pickle.Unpickler(f)
fit_mags = m.load()
import pickle
f = open('newlocus_SYNTH','r')
m = pickle.Unpickler(f)
locus_list_new = m.load()
synth_locus = {}
for key in locus_list_new.keys():
s = key.split('_')
if len(s) == 2:
mag1, mag2 = s
list = []
for i in range(len(fit_mags)):
list.append(fit_mags[i][mag1] - fit_mags[i][mag2])
synth_locus[key] = list
print synth_locus
import pickle
f = open('synthlocus','w')
m = pickle.Pickler(f)
pickle.dump(synth_locus,m)
print comp_list
import pylab
pylab.clf()
c1 = []
c2 = []
print close_locus
for i in range(len(stars)):
print len(stars)
c1.append(stars[i]['GSDSS']-stars[i]['RSDSS'])
c2.append(stars[i]['RSDSS']-stars[i]['ISDSS'])
print c1, c2
import string
pylab.scatter(c1,c2,color='green')
pylab.scatter(locus_list['GSDSS_RSDSS'], locus_list['RSDSS_ISDSS'])
c1 = []
c2 = []
print close_locus
for i in close_locus:
print len(stars)
c1.append(stars[i]['GSDSS']-stars[i]['RSDSS'])
c2.append(stars[i]['RSDSS']-stars[i]['ISDSS'])
print c1, c2
import string
c1 = []
c2 = []
print close_locus
for i in range(len(fit_mags)):
print len(stars)
c1.append(fit_mags[i]['GSDSS']-fit_mags[i]['RSDSS'])
c2.append(fit_mags[i]['RSDSS']-fit_mags[i]['ISDSS'])
#c1.append(fit_mags[i]['RSDSS']-fit_mags[i]['CAPAKIS'])
#c2.append(fit_mags[i]['RSDSS']-fit_mags[i]['WSISUBARU'])
print c1, c2
import string
pylab.scatter(c1,c2,color='red')
pylab.show()
def warp():
mod_func = lambda x: p[0] + p[1]*x + p[2]*x**2. + p[3]*x**3.
| [
"dapple@xoc7.stanford.edu"
] | dapple@xoc7.stanford.edu |
93216cfecb0a8cd165fb8267341028ee1f87dba0 | a4c04117685c3d28dd60bdfc45654cb2c935f746 | /template_match_vswir2dimac.py | a6bc5dfce49cf4c30f862b2f2b88c960f7ebc8cb | [] | no_license | DKnapp64/General_Python_Codes | 1ca40779bb381d526d61c5d5fedcc76ae797c590 | 8d4669c82c17455640a0a3123f92760cd65cc26a | refs/heads/main | 2023-02-28T05:55:46.018482 | 2021-02-01T21:55:16 | 2021-02-01T21:55:16 | 335,077,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,162 | py | #!/bin/env python2
import cv2
from PIL import Image
import numpy as np
import gdal, gdalconst
import os, sys
import time
import random
## import pdb
def main(in1, in2, scorethresh, rmsethresh, outf):
scorethresh = float(scorethresh)
rmsethresh = float(rmsethresh)
## reasonable values for Score threshold = 7000
## reasonable values for RMSE threshold = 5.0
## def surfit(in1, in2):
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch13_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch13_20170930_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch4and5_20171001_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch4and5_20171001_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patchHIMB_20171001_atrem_refl3'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patchHIMB_20170930_and_20171001_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch42_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch42_20170930_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch44_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch44_20170930_dimac_match'
## in1 = '/lustre/scratch/cao/OahuVSWIRTemp/rad/patch25_20170930_atrem_refl'
## in2 = '/Volumes/DGE/CAO/caodata/Scratch/dknapp/Kaneohe/patch25_20171001_dimac_match'
vswirds = gdal.Open(in1)
vswirarr = np.zeros((vswirds.RasterYSize, vswirds.RasterXSize, 3), dtype=np.float32)
vswir8uint = np.zeros((vswirds.RasterYSize, vswirds.RasterXSize, 3), dtype=np.uint8)
bandit = vswirds.GetRasterBand(45)
vswirarr[:,:,0] = bandit.ReadAsArray()
bandit = vswirds.GetRasterBand(27)
vswirarr[:,:,1] = bandit.ReadAsArray()
bandit = vswirds.GetRasterBand(9)
vswirarr[:,:,2] = bandit.ReadAsArray()
sort1 = np.sort(vswirarr[:,:,0].flatten())
sort2 = np.sort(vswirarr[:,:,1].flatten())
sort3 = np.sort(vswirarr[:,:,2].flatten())
## find how many Nans are in each band
numnan1 = np.sum(np.logical_or(np.isnan(vswirarr[:,:,0]), (vswirarr[:,:,0] < -50.0)))
numnan2 = np.sum(np.logical_or(np.isnan(vswirarr[:,:,1]), (vswirarr[:,:,1] < -50.0)))
numnan3 = np.sum(np.logical_or(np.isnan(vswirarr[:,:,2]), (vswirarr[:,:,2] < -50.0)))
min1 = sort1[np.int(np.floor(0.02 * (len(sort1)-numnan1)))]
max1 = sort1[np.int(np.floor(0.98 * (len(sort1)-numnan1)))]
min2 = sort2[np.int(np.floor(0.02 * (len(sort2)-numnan2)))]
max2 = sort2[np.int(np.floor(0.98 * (len(sort2)-numnan2)))]
min3 = sort3[np.int(np.floor(0.02 * (len(sort3)-numnan3)))]
max3 = sort3[np.int(np.floor(0.98 * (len(sort3)-numnan3)))]
scale1 = 255./(max1-min1)
scale2 = 255./(max2-min2)
scale3 = 255./(max3-min3)
shift1 = -(min1*255.)
shift2 = -(min2*255.)
shift3 = -(min3*255.)
vswir8uint[:,:,0] = cv2.convertScaleAbs(vswirarr[:,:,0], alpha=scale1, beta=shift1)
vswir8uint[:,:,1] = cv2.convertScaleAbs(vswirarr[:,:,1], alpha=scale2, beta=shift2)
vswir8uint[:,:,2] = cv2.convertScaleAbs(vswirarr[:,:,2], alpha=scale3, beta=shift3)
bandit = None
temp1 = random.randint(0,100000000)
temp2 = random.randint(0,100000000)
nametemp1 = "%010d" % temp1
nametemp2 = "%010d" % temp2
gray1 = cv2.cvtColor(vswir8uint, cv2.COLOR_RGB2GRAY)
grayimg1 = Image.fromarray(gray1, mode='L')
grayimg1.save(nametemp1+".jpg")
dimacds = gdal.Open(in2)
bandit = dimacds.GetRasterBand(1)
driver = gdal.GetDriverByName('MEM')
outds = driver.Create('', vswirds.RasterXSize, vswirds.RasterYSize, 3, bandit.DataType)
refProj = vswirds.GetProjection()
refTrans = vswirds.GetGeoTransform()
outds.SetGeoTransform(refTrans)
outds.SetProjection(refProj)
gdal.ReprojectImage(dimacds, outds, refProj, refProj, gdalconst.GRA_Average)
dimacarr = np.zeros((outds.RasterYSize, outds.RasterXSize, 3), dtype=np.uint8)
bandit = outds.GetRasterBand(1)
dimacarr[:,:,0] = bandit.ReadAsArray()
bandit = outds.GetRasterBand(2)
dimacarr[:,:,1] = bandit.ReadAsArray()
bandit = outds.GetRasterBand(3)
dimacarr[:,:,2] = bandit.ReadAsArray()
bandit = None
dimacds = None
## img2 = cv2.imread(in2)
gray2 = cv2.cvtColor(dimacarr, cv2.COLOR_BGR2GRAY)
grayimg2 = Image.fromarray(gray2, mode='L')
grayimg2.save(nametemp2+".jpg")
tilerows = int(np.floor(dimacarr.shape[0]/20.)) - 2
tilecols = int(np.floor(dimacarr.shape[1]/20.)) - 2
f = open(outf, 'w')
f.write("; ENVI Image to Image GCP File\n")
f.write("; base file: %s\n" % (in2))
f.write("; warp file: %s\n" % (in1))
f.write("; Base Image (x,y), Warp Image (x,y)\n")
f.write(";\n")
## offset = 25
offset = 10
listpoints = []
method = eval('cv2.TM_CCOEFF')
for j in range(tilerows):
rowrange = (25+j*20, 25+(j+1)*20)
for g in range(tilecols):
colrange = (25+g*20, 25+(g+1)*20)
## pdb.set_trace()
template = gray1[rowrange[0]:rowrange[1],colrange[0]:colrange[1]]
w, h = template.shape[::-1]
result = cv2.matchTemplate(gray2, template, method)
resultsub = result[(rowrange[0]-offset):(rowrange[1]-offset),(colrange[0]-offset):(colrange[1]-offset)]
minval, maxval, minloc, maxloc = cv2.minMaxLoc(resultsub)
tempx = maxloc[0]+(colrange[0]-offset)+10
tempy = maxloc[1]+(rowrange[0]-offset)+10
dimacx = colrange[0]+10
dimacy = rowrange[0]+10
diffx = tempx - dimacx
diffy = tempy - dimacy
vswirx = dimacx - diffx
vswiry = dimacy - diffy
listpoints.append((dimacx, dimacy, vswirx, vswiry))
## if ((np.abs(dimac2x-dimac1x) < 80) and (np.abs(dimac2y-dimac1y) < 80)):
f.write(("%10.2f %10.2f " % (dimacx*10.0, dimacy*10.0)) + ("%10.2f %10.2f" % (vswirx, vswiry)) + (" %f\n" % maxval))
f.close()
time.sleep(3.0)
f = open(outf, 'r')
listpoints = f.readlines()
listpoints = listpoints[5:]
f.close()
inarr1 = np.array([[float(l.split()[0]), float(l.split()[1]), 0.0] for l in listpoints])
inarr2 = np.array([[float(l.split()[2]), float(l.split()[3]), 0.0] for l in listpoints])
maxvals = np.array([[float(l.split()[4])] for l in listpoints])
n = inarr1.shape[0]
pad = lambda x:np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
X = pad(inarr1)
Y = pad(inarr2)
A, res, rank, s = np.linalg.lstsq(X, Y)
transform = lambda x: unpad(np.dot(pad(x), A))
preds = transform(inarr1)
diffx = preds[:,0] - inarr2[:,0]
diffy = preds[:,1] - inarr2[:,1]
dists = np.sqrt(np.power(diffx,2) + np.power(diffy,2))
rmse = np.sqrt(np.mean(np.power(dists,2)))
np.savez('testout.npz', inarr1=inarr1, inarr2=inarr2, maxvals=maxvals, dists=dists, rmse=rmse)
f = open(outf, 'w')
f.write("; ENVI Image to Image GCP File\n")
f.write("; base file: %s\n" % (in2))
f.write("; warp file: %s\n" % (in1))
f.write("; Base Image (x,y), Warp Image (x,y)\n")
f.write(";\n")
for j in range(inarr1.shape[0]):
if (dists[j] < rmsethresh) and (maxvals[j] > scorethresh):
f.write(("%10.2f %10.2f " % (inarr1[j,0], inarr1[j,1])) + ("%10.2f %10.2f\n" % (inarr2[j,0], inarr2[j,1])))
f.close()
try:
os.remove(nametemp1+'.jpg')
except:
pass
try:
os.remove(nametemp2+'.jpg')
except:
pass
if __name__ == "__main__":
if len( sys.argv ) != 6:
print "[ ERROR ] you must supply 5 arguments: template_match_vswir2dimac.py vswirimage dimacimage scorethrshold rmsethreshold outputfile"
print "where:"
print " vswirimage = an orthocorrected VSWIR image to warp to the DiMAC image"
print " dimacimage = an orthocorrected DiMAC image to use as the base"
print " scorehreshold = The value of the template matching coefficient threshold BELOW which points are rejected (usually 1000000.0)"
print " rmsethreshold = The value of the point RMSE value threshold ABOVE which points are rejected (for DiMAC, usually 30.0)"
print " outputfile = an output text file in ENVI image-to-image for warping the first DiMAC image to the second."
print ""
sys.exit( 1 )
print main( sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5] )
| [
"dknapp4@asu.edu"
] | dknapp4@asu.edu |
ad40ce01d4d7c2bc546c2517391733816774e136 | ab98aaf1b40a5f2a7ab3c4937f7918421e24ea08 | /awacs/ssmmessages.py | 2908a3e3203504588532e773c47c2d57b51cfca3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bruvio/awacs | 6e7b7f2b5feddf792d983fc187a6460c7125ed1f | 9b9140a645219a4a9f606f97f19893d69bdc8494 | refs/heads/master | 2023-02-23T11:41:24.862343 | 2021-02-01T05:23:11 | 2021-02-01T05:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Session Manager Message Gateway Service'
prefix = 'ssmmessages'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateControlChannel = Action('CreateControlChannel')
CreateDataChannel = Action('CreateDataChannel')
OpenControlChannel = Action('OpenControlChannel')
OpenDataChannel = Action('OpenDataChannel')
| [
"mark@peek.org"
] | mark@peek.org |
526e46e5dd05ee4442f1b022940b7ec2f78eb4b8 | a566cb316ab93aeadd366b148f5110c327c7eb2b | /chp3/ex4.py | 8c89bcf4faccc137baf37af597a0523e2359341d | [] | no_license | piochelepiotr/crackingTheCode | 4aeaffd2c46b2761b2f9642107292d0932731489 | 163ff60f723869a7096b330965d90dc1443d7199 | refs/heads/master | 2021-06-20T21:30:56.033989 | 2021-01-13T08:44:57 | 2021-01-13T08:44:57 | 172,414,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import stack
class MyQueue:
def __init__(self):
self.in_stack = stack.Stack()
self.out_stack = stack.Stack()
def push(self, x):
self.in_stack.push(x)
def pull(self):
if self.out_stack.size() == 0:
if self.in_stack.size() == 0:
raise Exception("empty queue")
while self.in_stack.size() > 0:
self.out_stack.push(self.in_stack.pop())
return self.out_stack.pop()
| [
"piotr.wolski@telecom-paristech.fr"
] | piotr.wolski@telecom-paristech.fr |
0a83cf1bd9b3cc886f61571f18089d7a006463de | 55173732ce1f2537a4fd8a6137b2a813f594b250 | /azure-mgmt-scheduler/azure/mgmt/scheduler/models/oauth_authentication.py | 1b85128c1419f34d634eedd5dbcb6e5d491038fb | [
"Apache-2.0"
] | permissive | dipple/azure-sdk-for-python | ea6e93b84bfa8f2c3e642aecdeab9329658bd27d | 9d746cb673c39bee8bd3010738c37f26ba6603a4 | refs/heads/master | 2020-02-26T15:32:39.178116 | 2016-03-01T19:25:05 | 2016-03-01T19:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .http_authentication import HttpAuthentication
class OAuthAuthentication(HttpAuthentication):
"""OAuthAuthentication
:param str type: Gets or sets the http authentication type. Possible
values include: 'NotSpecified', 'ClientCertificate',
'ActiveDirectoryOAuth', 'Basic'
:param str secret: Gets or sets the secret.
:param str tenant: Gets or sets the tenant.
:param str audience: Gets or sets the audience.
:param str client_id: Gets or sets the client identifier.
"""
_required = []
_attribute_map = {
'secret': {'key': 'secret', 'type': 'str'},
'tenant': {'key': 'tenant', 'type': 'str'},
'audience': {'key': 'audience', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(self, type=None, secret=None, tenant=None, audience=None, client_id=None):
super(OAuthAuthentication, self).__init__(type=type)
self.secret = secret
self.tenant = tenant
self.audience = audience
self.client_id = client_id
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
4637bc96cd5dc021a8983c88d76563d4cd4c56df | eb7bf9ee76f3b38ef11b09440934b36a64639396 | /castero/episode.py | 059bc84c4f1c53ec2168dd6c531b551f326f1ad2 | [
"MIT"
] | permissive | Dramicas/castero | 9cea0dc5d5de949f7df76308ce221a28cbf8bba8 | 9d7edb39ab21c9bd8e6b94e134ef336358f74222 | refs/heads/master | 2020-03-16T16:06:59.623720 | 2018-05-06T19:20:57 | 2018-05-06T19:20:57 | 132,773,066 | 1 | 0 | null | 2018-05-09T14:58:16 | 2018-05-09T14:58:15 | null | UTF-8 | Python | false | false | 7,435 | py | import os
import threading
from castero import helpers
from castero.datafile import DataFile
class Episode:
"""The Episode class.
This class represents a single episode from a podcast feed.
"""
def __init__(self, feed, title=None, description=None, link=None,
pubdate=None, copyright=None, enclosure=None) -> None:
"""Initializes the object.
At least one of a title or description must be specified.
Args:
feed: the feed that this episode is a part of
title: (optional) the title of the episode
description: (optional) the description of the episode
link: (optional) a link to the episode
pubdate: (optional) the date the episode was published, as a string
copyright: (optional) the copyright notice of the episode
enclosure: (optional) a url to a media file
"""
assert title is not None or description is not None
self._feed = feed
self._title = title
self._description = description
self._link = link
self._pubdate = pubdate
self._copyright = copyright
self._enclosure = enclosure
def __str__(self) -> str:
"""Represent this object as a single-line string.
Returns:
string: this episode's title, if it exists, else its description
"""
if self._title is not None:
representation = self._title
else:
representation = self._description
return representation.split('\n')[0]
def _feed_directory(self) -> str:
"""Gets the path to the downloaded episode's feed directory.
This method does not ensure whether the directory exists -- it simply
acts as a single definition of where it _should_ be.
Returns:
str: a path to the feed directory
"""
feed_dirname = helpers.sanitize_path(str(self._feed))
return os.path.join(DataFile.DOWNLOADED_DIR, feed_dirname)
def get_playable(self) -> str:
"""Gets a playable path for this episode.
This method checks whether the episode is available on the disk, giving
the path to that file if so. Otherwise, simply return the episode's
enclosure, which is probably a URL.
Returns:
str: a path to a playable file for this episode
"""
playable = self.enclosure
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = self._feed_directory()
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
playable = os.path.join(feed_directory, File)
return playable
def download(self, download_queue, display=None):
"""Downloads this episode to the file system.
This method currently only supports downloading from an external URL.
In the future, it may be worthwhile to determine whether the episode's
source is a local file and simply copy it instead.
Args:
download_queue: the download_queue overseeing this download
display: (optional) the display to write status updates to
"""
if self._enclosure is None:
if display is not None:
display.change_status("Download failed: episode does not have"
" a valid media source")
return
feed_directory = self._feed_directory()
episode_partial_filename = helpers.sanitize_path(str(self))
extension = os.path.splitext(self._enclosure)[1].split('?')[0]
output_path = os.path.join(feed_directory,
episode_partial_filename + str(extension))
DataFile.ensure_path(output_path)
if display is not None:
display.change_status("Starting episode download...")
t = threading.Thread(
target=DataFile.download_to_file,
args=[
self._enclosure, output_path, str(self),
download_queue, display
],
name="download_%s" % str(self)
)
t.start()
def delete(self, display=None):
"""Deletes the episode file from the file system.
Args:
display: (optional) the display to write status updates to
"""
assert self.downloaded
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = self._feed_directory()
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
os.remove(os.path.join(feed_directory, File))
if display is not None:
display.change_status(
"Successfully deleted the downloaded episode"
)
# if there are no more files in the feed directory, delete it
if len(os.listdir(feed_directory)) == 0:
os.rmdir(feed_directory)
@property
def title(self) -> str:
"""str: the title of the episode"""
result = self._title
if result is None:
result = "Title not available."
return result
@property
def description(self) -> str:
"""str: the description of the episode"""
result = self._description
if result is None:
result = "Description not available."
return result
@property
def link(self) -> str:
"""str: the link of/for the episode"""
result = self._link
if result is None:
result = "Link not available."
return result
@property
def pubdate(self) -> str:
"""str: the publish date of the episode"""
result = self._pubdate
if result is None:
result = "Publish date not available."
return result
@property
def copyright(self) -> str:
"""str: the copyright of the episode"""
result = self._copyright
if result is None:
result = "No copyright specified."
return result
@property
def enclosure(self) -> str:
"""str: the enclosure of the episode"""
result = self._enclosure
if result is None:
result = "Enclosure not available."
return result
@property
def downloaded(self) -> bool:
"""bool: whether or not the episode is downloaded"""
found_downloaded = False
feed_dirname = helpers.sanitize_path(str(self._feed))
episode_partial_filename = helpers.sanitize_path(str(self))
feed_directory = os.path.join(DataFile.DOWNLOADED_DIR, feed_dirname)
if os.path.exists(feed_directory):
for File in os.listdir(feed_directory):
if File.startswith(episode_partial_filename + '.'):
found_downloaded = True
return found_downloaded
@property
def downloaded_str(self) -> str:
"""str: a text description of whether the episode is downloaded"""
if self.downloaded:
result = "Episode downloaded and available for offline playback."
else:
result = "Episode not downloaded."
return result
| [
"jake@faltro.com"
] | jake@faltro.com |
93519bcda9ed48a7c96840b95c632bd619fda9f9 | b01429f27f8d7f4db7e3eba0abbb6be1ea67e2fa | /imageimage1.2/propriete/propriete_vivant_air.py | e31973f542422c9ebc8de6f4de654e9f0b8becc5 | [] | no_license | pastrouveedespeudo/ste-fois-c-la-bonne | 3dce8cdfc6b5523d9651e8ec9a143b7ab7789d21 | 9872c35423870c9854ee0bda120cca0c832c1fc9 | refs/heads/master | 2020-04-20T22:08:34.295196 | 2019-02-17T17:18:36 | 2019-02-17T17:18:36 | 169,129,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | class vivant:
def vivant(self):
self.vivant_air = ["chat",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chien",
"chien",
"chat",
"chien",
"chien",
"chien",
"chat",
"chien",
"chien",
"chien",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chat",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"requin",
"chien",
"chat",
"chien",
"chien",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chien",
"chien",
"chien",
"chien",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chat",
"chien",
"chien",
"chien",
"chien",
"chat",
"requin",
"chien",
"chat",
"requin",
"requin",
"requin",
"requin",
"requin",
"chien",
"chat",
"dinosaure",
"chat",
"chien",
"dinosaure",
"chat",
"chien",
"dinosaure" ] | [
"noreply@github.com"
] | pastrouveedespeudo.noreply@github.com |
df181d1dd23af220e91c7c1f1f8ad80dce1f7d23 | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/group/behav478.py | 1899e1d5fda53dc78b027212ecc6502b202141a0 | [] | no_license | s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | #!/usr/bin/python
import sys
def compute(prey, otherHunter, dist):
temp0 = -1 * prey[0]
if otherHunter[1] != 0:
temp1 = prey[1] / otherHunter[1]
else:
temp1 = otherHunter[1]
temp1 = prey[0] + prey[1]
temp1 = -1 * otherHunter[1]
temp1 = dist - temp0
temp0 = min( otherHunter[1] , prey[0] )
temp1 = max( temp0 , otherHunter[1] )
temp1 = min( prey[1] , otherHunter[0] )
temp1 = max( prey[0] , prey[0] )
if otherHunter[1] != 0:
temp1 = otherHunter[0] / otherHunter[1]
else:
temp1 = otherHunter[1]
if otherHunter[1] != 0:
temp1 = otherHunter[0] % otherHunter[1]
else:
temp1 = otherHunter[1]
temp0 = prey[0] * prey[1]
temp0 = prey[0] - prey[1]
temp2 = prey[1] - temp0
temp1 = max( temp0 , prey[1] )
if temp1 != 0:
temp0 = otherHunter[1] / temp1
else:
temp0 = temp1
if temp2 > temp0 :
temp3 = max( temp1 , prey[0] )
else:
if temp1 > otherHunter[0] :
temp3 = min( otherHunter[1] , temp0 )
else:
if temp1 > otherHunter[1] :
temp3 = temp0 - otherHunter[0]
else:
if temp0 > otherHunter[0] :
if dist > otherHunter[1] :
temp3 = temp1 * temp1
else:
temp3 = min( otherHunter[0] , dist )
else:
temp3 = prey[1] + temp2
return [ temp3 , otherHunter[0] ]
| [
"i7674211@bournemouth.ac.uk"
] | i7674211@bournemouth.ac.uk |
cb015a533d9e178936ea1c750e1174ccc0214944 | 8808906b8562b679540e9fe51f8f034e36e8a977 | /adler/tensorflow/losses.py | 370651b2f1392cd2d7036490a484791831a909b9 | [
"MIT"
] | permissive | adler-j/adler | 2bd0a969f8d31505d99bd4853f57f74d1984dc17 | f5fb62c41d50f270eafdd53e93c1763c99a1d902 | refs/heads/master | 2021-01-20T08:15:39.645701 | 2019-11-28T21:41:18 | 2019-11-28T21:41:18 | 90,125,611 | 8 | 5 | MIT | 2019-11-28T21:41:19 | 2017-05-03T08:22:49 | Python | UTF-8 | Python | false | false | 2,598 | py | import demandimport
with demandimport.enabled():
import tensorflow as tf
import numpy as np
__all__ = ('log10', 'psnr', 'ssim')
def log10(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def psnr(x_result, x_true, name='psnr'):
with tf.name_scope(name):
maxval = tf.reduce_max(x_true) - tf.reduce_min(x_true)
mse = tf.reduce_mean((x_result - x_true) ** 2)
return 20 * log10(maxval) - 10 * log10(mse)
def _tf_fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1,
-size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
return g / tf.reduce_sum(g)
def ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=1.5,
name='ssim'):
"""Structural SIMilarity index.
Code from:
https://stackoverflow.com/questions/39051451/ssim-ms-ssim-for-tensorflow
"""
with tf.name_scope(name):
window = _tf_fspecial_gauss(size, sigma) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1, 1, 1, 1], padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1, 1, 1, 1], padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1, 1, 1, 1], padding='VALID') - mu1_mu2
if cs_map:
value = (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value | [
"jonasadl@kth.se"
] | jonasadl@kth.se |
ab02fa783977bd1142c4ca52d2fd181959bacfa1 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5708921029263360_0/Python/ziyan/c.py | c250248466e97f63f5bb90fb5797cc1624f5e7b5 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | #!/usr/bin/env python
import os
import sys
import collections
def solve(J, P, S, K):
sols = []
jppairs = collections.defaultdict(int)
pspairs = collections.defaultdict(int)
jspairs = collections.defaultdict(int)
for j in range(J):
for p in range(P):
for s in range(S):
if jppairs[(j, p)] < K and pspairs[(p, s)] < K and jspairs[(j, s)] < K:
sols += [(j, p, s)]
jppairs[(j, p)] += 1
pspairs[(p, s)] += 1
jspairs[(j, s)] += 1
return sols
def main():
T = int(sys.stdin.readline().strip())
for t in range(T):
J, P, S, K = map(int, sys.stdin.readline().strip().split())
sols = solve(J, P, S, K)
print 'Case #%d: %d' % (t + 1, len(sols))
for sol in sols:
print '%d %d %d' % (sol[0] + 1, sol[1] + 1, sol[2] + 1)
if __name__ == '__main__':
main()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
808d8073572cc25e44a844f47b654d2ebf298a8b | 13724823af94e5e5351ffa42ca896397f12f1f05 | /install/lamachine/bin/foliamerge | b6f61bf89f1f7cbb27cd35578c4d359457e6c0df | [] | no_license | AymanYac/Neonec-Deep-Classsifier | 21e00cb0c5561f4ac22968f748ada0aa299e0a94 | a7978f434cc09d9e00a7df5d391bae77daf17637 | refs/heads/master | 2022-06-08T12:44:10.203386 | 2018-07-06T15:28:00 | 2018-07-06T15:28:00 | 139,996,406 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | #!/mnt/c/Users/yacay/Downloads/LaMachine-master/install/lamachine/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from foliatools.foliamerge import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"root@Razer-Stealth.localdomain"
] | root@Razer-Stealth.localdomain | |
d2983122fb0009d363cf14e6c7be027b5fbdd062 | 54791fd57ecc9a4fe7c5164dfa6eb79c8df48ee1 | /tmpdoc/experiment/python_demos/work/selenium_demo/selenium_execjs.py | 7fa71e1eb47fced1401baba2eaf6c2b033cffe73 | [] | no_license | cherry-wb/quietheart | 8dfc91f88046bd1b40240e2f6121043977ab78b4 | 715ed73c990da2b4634313c93910769a59ce51f4 | refs/heads/master | 2021-01-18T00:04:39.802220 | 2014-08-21T07:39:21 | 2014-08-21T07:39:21 | 23,286,239 | 1 | 3 | null | 2019-03-11T09:32:21 | 2014-08-24T16:37:05 | null | UTF-8 | Python | false | false | 792 | py | #!/usr/bin/python
from selenium import webdriver
#url = "http://10.126.1.29/wirelesssetup_radiosetup.html"
#url = "http://10.126.1.29/advancedsetup_lanipdhcpsettings.html"
#url = "http://10.126.1.29/wirelesssetup_basicsettings.html"
#url = "http://10.126.1.29/wirelesssetup_radiosetup.html"
#url = "http://10.126.1.29/wirelesssetup_multiplessid.html"
url = "http://admin:admin@10.126.1.15/Wireless_Basic.asp"
formName = 'wireless'
firefoxDriver = webdriver.Firefox()
firefoxDriver.get(url)
#content = firefoxDriver.execute_script("return document.forms['%s'].outerHTML;" % (formName))
#content = firefoxDriver.execute_script("return document.forms['%s'].outerHTML" %(formName))
content = firefoxDriver.execute_script("return document.forms[0].outerHTML")
print content
firefoxDriver.quit()
| [
"quietheart@quietheart-ThinkPad-E420.(none)"
] | quietheart@quietheart-ThinkPad-E420.(none) |
33c63f8feeed6999b66b57b6bfade00d45d12180 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_027/ch27_2019_03_03_19_46_19_955437.py | 2e85409e94af22b3d6127f3c8c20842513e7742d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | x = int(input())
y = int(input())
red = (x*y*365*10/1440)
print(Int(red)) | [
"you@example.com"
] | you@example.com |
5fd135d961041599ba6517fc3bc51b6192575f70 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/dialogflow/intents/hooks.py | 35fab4c997e2bc806e20eb418ea5a0a03f27c244 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 1,402 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declarative hooks for `gcloud dialogflow intents`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
def TrainingPhrasesType(training_phrase):
return {
'parts': [{'text': training_phrase}],
'type': 'EXAMPLE'
}
def ResponseToMessage(response):
return {'text': {'text': [response]}}
def AddOtherPropertiesToRequest(unused_instance_ref, args, request):
intent = encoding.MessageToDict(request.googleCloudDialogflowV2Intent)
if args.IsSpecified('other_properties'):
intent.update(args.other_properties)
request.googleCloudDialogflowV2Intent = encoding.DictToMessage(
intent, type(request.googleCloudDialogflowV2Intent))
return request
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
77e7aabcbc9de1998068a6633dc55119edcbc6db | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf.0/gsn-edf_ut=3.5_rd=1_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=0/params.py | d4f81c06a2325c865d4af4a06346ec07d1c9ec8f | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.721167',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 0,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
081d2606bb85413135f9cf37448d40647dde1cbe | 3199331cede4a22b782f945c6a71150a10c61afc | /20210523LangReview/Python/review04/04-generator/gen02.py | 2624c1c06a7d841915f7e0b8c362406a78e431e6 | [] | no_license | AuroraBoreas/language-review | 6957a3cde2ef1b6b996716addaee077e70351de8 | 2cb0c491db7d179c283dba205b4d124a8b9a52a3 | refs/heads/main | 2023-08-19T23:14:24.981111 | 2021-10-11T12:01:47 | 2021-10-11T12:01:47 | 343,345,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | "#Python is a protocol orientated lang; every top-level function or syntax has a corresponding dunder method implemented;"
import time
class Compute:
def __init__(self, last: int):
self.last = last
self.first = 0
def __iter__(self):
return self
def __next__(self):
rv = self.first
self.first += 1
time.sleep(.5)
if self.first > self.last:
raise StopIteration()
return rv
if __name__ == "__main__":
for i in Compute(10):
print(i)
| [
"noreply@github.com"
] | AuroraBoreas.noreply@github.com |
47c0ab6d57f95d8b1d7819eb25b2c4be405b67ef | cc64b1b5deb4530a5bd3eaabd98ebd4daa2deea1 | /Aulas/Exercícios-Mundo3/Aula016/Ex072.py | 6a361ae1465ad181cd99a5831421f1306f1a034c | [
"MIT"
] | permissive | Sofista23/Aula1_Python | 239b9920353138ff99d99dd0af66a4788f1cbb22 | 129132d977058ac6f23cc95c7bb8b55d8a1bb429 | refs/heads/main | 2023-09-01T23:55:20.529528 | 2021-10-13T23:19:33 | 2021-10-13T23:19:33 | 416,924,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | t=("zero","um","dois","três","quatro","cinco","seis","sete","oito","nove","dez","onze","doze","treze","quatorze","quinze","dezesseis","dezessete","dezoito","dezenove20","vinte")
while True:
esc=int(input("Digite um número de 0 a 20:"))
if 0<=esc<=20:
print(f"Você digitou o valor {t[esc]}.")
esc2=input("Você quer continuar [s/n]:").strip().upper()
if esc2=="N":
break
print("Obrigado por perder seu tempo conosco.") | [
"81760467+Sofista23@users.noreply.github.com"
] | 81760467+Sofista23@users.noreply.github.com |
874e34415a4f5d7c2ddb22a3966ca448f742d45b | 2635d6f24df87d0813e9dd8d3853fb9632d39686 | /setup.py | f8c12f6adcb5876d7aa8340adab284698b3abf79 | [
"MIT"
] | permissive | tolulomo/materialsmine | cc921464aefa0f47fc6ac9f85a8bd65a67c0f3bb | 8ac7d942b89492c8750bc5cb95951e2ab9694ae4 | refs/heads/master | 2022-11-18T08:10:51.631100 | 2020-07-15T17:30:13 | 2020-07-15T17:30:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | #!/usr/bin/env python
from distutils.core import setup
setup(name='Nanomine',
version='0.1',
description='Nanomine project configuration file',
author='rui',
packages=[
# 'pymongo'
],
)
| [
"mccusker@gmail.com"
] | mccusker@gmail.com |
3afab3079ec8742ba54e9a0b1a48976d2ad481f3 | a0b7a7104ca701e8b08d590660ee92b325fd17e9 | /jeri/core/models/fields/__init__.py | 72fba11357e8171207a8f80cb4e8eae570e9bd62 | [
"BSD-3-Clause"
] | permissive | fmorgner/jeri | fecd4df05b62ee00a248005f3cbf1c313eb6d35d | 5b33411c0e25375e3e5928fc044581a24c56f3ad | refs/heads/master | 2021-01-01T16:46:52.786518 | 2017-07-22T17:49:18 | 2017-07-22T17:49:18 | 97,918,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from jeri.core.models.fields.value import StringField # NOQA
from jeri.core.models.fields.related import ( # NOQA
OneToOneField,
OneToManyField
)
| [
"felix.morgner@gmail.com"
] | felix.morgner@gmail.com |
6eecbdfe33a0d7bf82903ca4bfd6b8c3a3c79f4f | 5de5ae0adb6fb1e73c2e897fbc13b6abf53c559b | /Applications/Logic_Puzzles/pipe.py | c2c73d8342e179dfbc8a640d20a4512ce7d4a0d0 | [] | no_license | Trietptm-on-Coding-Algorithms/Learning-Z3 | af935450226ee3299e10361f21a567945aa0fd5c | c5ef7faca49aa164556b3c7e9ccfb4709027cf74 | refs/heads/master | 2020-05-13T18:34:38.105308 | 2017-12-23T11:08:43 | 2017-12-23T11:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,250 | py | from z3 import *
# Solving Puzzle
# Pipe puzzle is a puzzle where we are given a sets of randomly configured pipe
# The goal is to configure the pipes to make a close loop where the water can
# flow only inside the pipe.
# Imagine the field is a matrix with certain row and columns.
# Each cell can be connected with other by a joint, either horizontal joint or
# vertical joint.
# A pipe can be imagined as one or more joints that operate as single body.
# Then, based on how many joints a pipe has, we can create the pipe types.
# The pipe can be rotated to certain degree (0, 90, 180, 270) which result in
# the change of position.
# cell type, angle, (pseudo)graphical representation
symbols={("0", 0): " ",
("2a", 0): "┃",
("2a", 90): "━",
("2b", 0): "┏",
("2b", 90): "┓",
("2b",180): "┛",
("2b",270): "┗",
("3", 0): "┣",
("3", 90): "┳",
("3", 180): "┫",
("3", 270): "┻",
("4", 0): "╋"}
def print_model(m):
# print angles:
for r in range(HEIGHT):
for c in range(WIDTH):
t=cells_type[r][c]
angle=int(str(m[A[r][c]]))
sys.stdout.write("%3d " % angle)
print()
# print pipes:
for r in range(HEIGHT):
for c in range(WIDTH):
t=cells_type[r][c]
angle=int(str(m[A[r][c]]))
sys.stdout.write(symbols[(t, angle)]+" ")
print()
print()
s=Solver()
HEIGHT=8
WIDTH=16
# if T/B/R/L is Bool instead of Int, Z3 solver will work faster
T=[[Bool('cell_%d_%d_top' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
B=[[Bool('cell_%d_%d_bottom' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
R=[[Bool('cell_%d_%d_right' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
L=[[Bool('cell_%d_%d_left' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
A=[[Int('cell_%d_%d_angle' % (r, c)) for c in range(WIDTH)] for r in range(HEIGHT)]
# initial configuration
cells_type=[
["0", "0", "2b", "3", "2a", "2a", "2a", "3", "3", "2a", "3", "2b", "2b", "2b", "0", "0"],
["2b", "2b", "3", "2b", "0", "0", "2b", "3", "3", "3", "3", "3", "4", "2b", "0", "0"],
["3", "4", "2b", "0", "0", "0", "3", "2b", "2b", "4", "2b", "3", "4", "2b", "2b", "2b"],
["2b", "4", "3", "2a", "3", "3", "3", "2b", "2b", "3", "3", "3", "2a", "2b", "4", "3"],
["0", "2b", "3", "2b", "3", "4", "2b", "3", "3", "2b", "3", "3", "3", "0", "2a", "2a"],
["0", "0", "2b", "2b", "0", "3", "3", "4", "3", "4", "3", "3", "3", "2b", "3", "3"],
["0", "2b", "3", "2b", "0", "3", "3", "4", "3", "4", "4", "3", "0", "3", "4", "3"],
["0", "2b", "3", "3", "2a", "3", "2b", "2b", "3", "3", "3", "3", "2a", "3", "3", "2b"]]
# We know that if each of half joints is present, corresponding half-joint must be
# also present, and vice-versa. We define this using these constraints.
# shorthand variables for True and False:
t=True
f=False
# "top" of each cell must be equal to "bottom" of the cell above
# "bottom" of each cell must be equal to "top" of the cell below
# "left" of each cell must be equal to "right" of the cell at left
# "right" of each cell must be equal to "left" of the cell at right
for r in range(HEIGHT):
for c in range(WIDTH):
if r!=0:
s.add(T[r][c]==B[r-1][c])
if r!=HEIGHT-1:
s.add(B[r][c]==T[r+1][c])
if c!=0:
s.add(L[r][c]==R[r][c-1])
if c!=WIDTH-1:
s.add(R[r][c]==L[r][c+1])
# "left" of each cell of first column shouldn't have any connection
# so is "right" of each cell of the last column
for r in range(HEIGHT):
s.add(L[r][0]==f)
s.add(R[r][WIDTH-1]==f)
# "top" of each cell of the first row shouldn't have any connection
# so is "bottom" of each cell of the last row
for c in range(WIDTH):
s.add(T[0][c]==f)
s.add(B[HEIGHT-1][c]==f)
for r in range(HEIGHT):
for c in range(WIDTH):
ty=cells_type[r][c]
if ty=="0":
s.add(A[r][c]==f)
s.add(T[r][c]==f, B[r][c]==f, L[r][c]==f, R[r][c]==f)
if ty=="2a":
s.add(Or(And(A[r][c]==0, L[r][c]==f, R[r][c]==f, T[r][c]==t, B[r][c]==t), # ┃
And(A[r][c]==90, L[r][c]==t, R[r][c]==t, T[r][c]==f, B[r][c]==f))) # ━
if ty=="2b":
s.add(Or(And(A[r][c]==0, L[r][c]==f, R[r][c]==t, T[r][c]==f, B[r][c]==t), # ┏
And(A[r][c]==90, L[r][c]==t, R[r][c]==f, T[r][c]==f, B[r][c]==t), # ┓
And(A[r][c]==180, L[r][c]==t, R[r][c]==f, T[r][c]==t, B[r][c]==f), # ┛
And(A[r][c]==270, L[r][c]==f, R[r][c]==t, T[r][c]==t, B[r][c]==f))) # ┗
if ty=="3":
s.add(Or(And(A[r][c]==0, L[r][c]==f, R[r][c]==t, T[r][c]==t, B[r][c]==t), # ┣
And(A[r][c]==90, L[r][c]==t, R[r][c]==t, T[r][c]==f, B[r][c]==t), # ┳
And(A[r][c]==180, L[r][c]==t, R[r][c]==f, T[r][c]==t, B[r][c]==t), # ┫
And(A[r][c]==270, L[r][c]==t, R[r][c]==t, T[r][c]==t, B[r][c]==f))) # ┻
if ty=="4":
s.add(A[r][c]==0)
s.add(T[r][c]==t, B[r][c]==t, L[r][c]==t, R[r][c]==t) # ╉
print(s.check())
print_model (s.model()) | [
"me@xathrya.id"
] | me@xathrya.id |
1d026ecec5a670431b899914e47b7896880ac674 | 5de0c0e76bdde469156d057007a5008a63a0d66b | /openeeg/proto.py | 2fa2ae2d0a6741714a867192b9b835eb0801cace | [] | no_license | mattharkness/sixthdev | 6bcfd1c490efafb114dc5f014c6e5f1d91d56b4d | a7df929147d82d225606c216f69c48d898e19ebe | refs/heads/master | 2023-06-08T05:57:38.928657 | 2021-06-15T16:53:15 | 2021-06-15T16:53:15 | 338,441,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,255 | py | #!/usr/bin/python2.2
#
# openEEG software prototype
# by michal wallace (sabren@manifestation.com)
#
# python prototype of a mind-mirror style biofeedback machine
# Basically, this is just a spectral analysis program.
#
# This version still only graphs fake data, but adds windowing
# to clean up some of the noise. The scale is still wrong, though.
#
# $Id$
## dependencies: #####################################################
try:
import Numeric # http://www.pfdubois.com/numpy/
import MLab, FFT, RandomArray # (parts of numeric)
import pygame # http://www.pygame.org/
from pygame.locals import *
except:
raise SystemExit, "This program requries NumPy and pygame."
# the rest of these come with python:
import whrandom
import time
## graphic routines ##################################################
def makeGradient():
"""
Returns an 163*10 Surface showing mirrored green-yellow-red
gradients with a blue line in between.
"""
colors = []
for i in range(0, 0xff, 0x22):
colors.append((i, 0xff, 0))
colors.append((0xff, 0xff, 0))
for i in range(0xcc, -1, -0x22):
colors.append((0xff, i, 0))
rcolors = colors
lcolors = colors[:]; lcolors.reverse()
center = 80
sprite = pygame.Surface((163, 10))
for x in range(len(colors)):
# left (red to green)
pygame.draw.rect(sprite, lcolors[x],
pygame.Rect(x*5, 1, 4, 8))
# right (green to red)
pygame.draw.rect(sprite, rcolors[x],
pygame.Rect(center+2+(x*5), 1, 4, 8))
pygame.draw.line(sprite, (0xcc,0xcc,0xff), (center, 0), (center, 10))
return sprite
def makeWindow(winsize):
pygame.init()
pygame.display.set_caption("openEEG prototype")
return pygame.display.set_mode(winsize, RESIZABLE, 0)
def keepLooping():
pygame.display.update()
for e in pygame.event.get():
if (e.type == KEYUP and e.key == K_ESCAPE) \
or (e.type == QUIT):
return 0
return 1
## data routines #####################################################
def wave(frequency, sampRate=256.0):
"""
Returns a sampled wave at the given frequency and sample rate.
This routine is generalized from Eric Hagemann's article at:
http://www.onlamp.com/pub/a/python/2001/01/31/numerically.html
"""
return Numeric.sin(2 * Numeric.pi
* (frequency/sampRate)
* Numeric.arange(sampRate))
def fakeSession():
"""
Creates ten seconds of completely fake data.
"""
pureAlpha = 10 # alpha is 8-12hz
pureBeta = 20 # beta is 13-30hz
pureTheta = 6 # theta is 4-8hz
pureDelta = 2 # delta is 0.5-4hz
sec = [None] * 10 # make an empty list
# when animated, this should move right up the line:
sec[0] = wave(pureDelta)
sec[1] = wave(pureTheta)
sec[2] = wave(pureAlpha)
sec[3] = wave(pureBeta)
# and this should move back down in pairs:
sec[4] = wave(pureBeta) + wave(pureAlpha)
sec[5] = wave(pureAlpha) + wave(pureTheta)
sec[6] = wave(pureTheta) + wave(pureDelta)
sec[7] = wave(pureDelta) + wave(pureBeta)
# all four at once:
sec[8] = wave(pureDelta) + wave(pureTheta) \
+ wave(pureAlpha) + wave(pureBeta)
# and then silence:
sec[9] = wave(0)
return Numeric.concatenate(sec)
def makeSpectrogram(slice):
"""
Returns a list of length 32, with the FFT of the slice.
We seem to need 64 samples to do this.
If the sample rate is 256Hz, then we're talking about
1/4th of a second's worth of data here.
"""
assert len(slice)==64, "we want 32 bins, so we need 64 samples"
res = abs(FFT.real_fft(slice))[:-1] # discard 33rd slot (is this okay?)
res = Numeric.floor(res) # round off to integers
assert len(res)==32, len(res)
return res
## main program ######################################################
def main():
#@TODO: make names for all these magic numbers...
screen = makeWindow(winsize=(200, 400))
grad = makeGradient()
black = pygame.Surface((80,10))
black.fill((0,0,0))
# the windowing array quiets down the edges of the sample
# to prevent "clicking" at the edges:
windowing = MLab.blackman(64)
session = fakeSession()
t = 0
center= 81 # same as in creating the graph @TODO: consolidate these
while keepLooping():
# simulate aquiring data for 1/4th of a second (64 samples):
time.sleep(0.25)
data = session[t:t+64] * windowing
graph = makeSpectrogram(data)
t += 64
if t >= len(session):
t = 0
# draw the gradient, then cover part of it up:
for i in range(32):
screen.blit(grad, (20, 20+i*10))
# left is blank for now:
#screen.blit(black,(20 +(0 ), 20+i*10))
# right side shows the data:
screen.blit(black,(20+center+(graph[i]*10), 20+i*10))
if __name__=="__main__":
main()
| [
"sabren"
] | sabren |
31ee594e35458cdcaaa3616917d92259bf6f73d3 | ea1a86f636db98d111360cc2d6988dc449f21ca7 | /backend-code/website/serializers.py | fee383d25ce736bf463db816d659a7dfe387e5e7 | [] | no_license | riaaniru2613/iste.nitk.ac.in-1 | 76434cd2a019b14e29dba138618975d8dd14c6a0 | 573001912bac0c53a7118c35be6358aeb0f96b1d | refs/heads/master | 2023-07-07T11:45:07.357822 | 2021-08-05T16:28:08 | 2021-08-05T16:28:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from rest_framework import serializers
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
fields = kwargs.pop('fields', None)
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields is not None:
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name) | [
"amodhshenoy@gmail.com"
] | amodhshenoy@gmail.com |
f49bf0a3be14a5038d8868cbf934a3c39958629e | e585c222ecc8fa95b7c47a80cb0efb2be578b01e | /base/views.py | 29d2118ea0ffa9a512d9af9fa9e223dade01b788 | [] | no_license | 49527/miniprogram_backend | e0c13075e6af8eb1ce040c345ec7bbd448ddd58e | 105e8d85c71dfb2c7ecaf64f35c48ac3dedc9a4d | refs/heads/master | 2020-04-09T02:08:02.166013 | 2018-12-11T14:48:00 | 2018-12-11T14:48:00 | 159,929,690 | 0 | 0 | null | 2018-12-01T09:38:22 | 2018-12-01T09:38:22 | null | UTF-8 | Python | false | false | 3,824 | py | import urllib
import json
import logging
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework.exceptions import MethodNotAllowed
from django.http.response import HttpResponseNotAllowed
from django.conf import settings
from django.http.response import HttpResponse, FileResponse
from base.exceptions import WLException
from base.util.serializer_helper import errors_summery
logger = logging.getLogger(__name__)
class WLAPIView(object):
API_VERSION = "0.1"
parser_classes = (JSONParser, )
DEFAULT_VALIDATE_EXC_CODE = 400
ERROR_HTTP_STATUS = False
http_method_names = ['get', 'post', 'options']
def generate_response(self, data, context):
return Response(data={
"response": dict(
{"result": 200},
**data
),
"version": self.API_VERSION,
"context": context
})
def get_request_obj(self, request, method=None):
if method is None:
method = request.method
if method == "POST":
try:
context = request.data.get("context", None)
data = request.data["data"]
return data, context
except KeyError:
raise WLException(code=400, message="Request format incorrect, data field is missing.")
elif method == "GET":
objs = request.GET
if "context" in objs:
context = objs.pop("context")
try:
context = json.loads(urllib.unquote(context))
except ValueError:
context = None
else:
context = None
data = objs
return data, context
else:
raise WLException(code=500, message="Unexpected call of get request object method.")
def validate_serializer(self, serializer, exc_code=None):
if not serializer.is_valid():
message = errors_summery(serializer)
raise WLException(
message=message,
code=exc_code if exc_code is not None else self.DEFAULT_VALIDATE_EXC_CODE
)
def handle_exception(self, exc):
if isinstance(exc, WLException):
reason = exc.message
code = exc.code
if exc.code == 500:
logger.exception("WLException 500", extra={"request": self.request})
else:
logger.warn("WLException: %d, %s" % (code, reason), extra={"request": self.request})
elif isinstance(exc, MethodNotAllowed):
return HttpResponseNotAllowed(self.http_method_names)
else:
if settings.DEBUG:
reason = "%s %s" % (str(exc.__class__), str(exc))
else:
reason = "Internal Error"
code = 500
# Log the detailed exception
logger.exception("Exception not handled", extra={"request": self.request})
if self.ERROR_HTTP_STATUS:
return HttpResponse(content=reason, status=code)
else:
return Response(data={
"response": {
"result": code,
"reason": reason
},
"version": self.API_VERSION,
})
class WLBinaryView(WLAPIView):
ERROR_HTTP_STATUS = True
def get(self, request):
data, context = self.get_request_obj(request)
io_stream, content_type = self.get_io_stream(data, context)
return FileResponse(io_stream, content_type=content_type)
def get_io_stream(self, data, context):
"""
:param data:
:param context:
:return: BinaryIO, content_type
"""
raise NotImplementedError
| [
"fhy14@mails.tsinghua.edu.cn"
] | fhy14@mails.tsinghua.edu.cn |
0b1b38916d41392f1d08f3a10dbb7bce96a9e49a | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20171227/example_httplib2/01get.py | 8a8bd52dd952e4265c41b351d1c6da01239cdbc7 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 218 | py | import httplib2
import urllib.parse as parselib
http = httplib2.Http()
qs = parselib.urlencode({"name": "foo"})
response, body = http.request(f"http://localhost:44444/?{qs}", method="GET")
print(body.decode("utf-8"))
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
30a08c0dd0df890fdfc29c1163cc085d343e74f9 | 63c261c8bfd7c15f6cdb4a08ea2354a6cd2b7761 | /acaizerograu/acaizerograu/outros/migrations/0015_acaienergy_img.py | 2facb02213763ea87b8e37caec314dce25edb154 | [] | no_license | filhosdaputa/AcaiZero | 93295498d95bcc13d020f2255e6b87a12cff04bf | 99a775f823d98a0b7b10e685936f1c12ccd1a70a | refs/heads/master | 2022-10-29T05:31:10.512990 | 2017-08-11T13:49:06 | 2017-08-11T13:49:06 | 149,019,853 | 0 | 1 | null | 2022-10-18T00:41:16 | 2018-09-16T17:38:48 | JavaScript | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-24 22:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('outros', '0014_acaicreme_img'),
]
operations = [
migrations.AddField(
model_name='acaienergy',
name='img',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
| [
"igor-peres@hotmail.com"
] | igor-peres@hotmail.com |
27a385f5bed81772f708b3340dd406c08d200b27 | 6732dce33ccc8d3912c7dd9bb5a029988586a649 | /tests/all_tests_cached.py | 0515e76f07896484a441f62a9a98df0cd0eb011e | [
"Apache-2.0"
] | permissive | hamada2029/gdata-python3 | 8a0d3cb53b707b7ad2f826a486df254c813e7463 | c1028f6567b480908b90848523bebaf78e6b49f7 | refs/heads/master | 2021-01-22T12:53:28.196826 | 2014-11-30T07:05:30 | 2014-11-30T07:05:30 | 46,613,040 | 1 | 0 | null | 2015-11-21T11:44:20 | 2015-11-21T11:44:19 | null | UTF-8 | Python | false | false | 1,088 | py | #!/usr/bin/python3
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
import gdata.test_config as conf
conf.options.set_value('runlive', 'true')
conf.options.set_value('savecache', 'true')
conf.options.set_value('clearcache', 'false')
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| [
"jvarshney20@gmail.com"
] | jvarshney20@gmail.com |
ed1d23fc1f6ecda72389cdaea307ea28a1e07b23 | 83b242997a1560214285fd38ab4d39a0b1210ddc | /opencv/SimpleBlobDetector.py | 3b471d90063328e07509532a210cbe45856f5a4b | [] | no_license | ivartz/vid2fft | 0a25d853e178b43fd0a5f765934887963f5c37f9 | 1b6ec82de04f86819ab4c1056d4f9d9bde1ed9c8 | refs/heads/master | 2020-08-07T21:44:28.745553 | 2019-10-08T09:18:41 | 2019-10-08T09:18:41 | 213,594,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,154 | py | #/******************************************************************************
#
# Copyright (c) 2018 Antillia.com TOSHIYUKI ARAI. ALL RIGHTS RESERVED.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#******************************************************************************/
# SimpleBlobDetector.py
# encodig: utf-8
import sys
import os
import cv2
import traceback
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
#
sys.path.append('../')
from SOL4Py.ZApplicationView import *
from SOL4Py.ZLabeledComboBox import ZLabeledComboBox
from SOL4Py.ZLabeledSlider import ZLabeledSlider
from SOL4Py.opencv.ZOpenCVImageView import ZOpenCVImageView
from SOL4Py.ZVerticalPane import ZVerticalPane
class MainView(ZApplicationView):
# Inner classes
#--------------------------------------------
class SourceImageView(ZOpenCVImageView):
def __init__(self, parent):
ZOpenCVImageView.__init__(self, parent)
def load(self, filename):
self.load_opencv_image(filename)
self.update()
class DetectedImageView(ZOpenCVImageView):
def __init__(self, parent):
ZOpenCVImageView.__init__(self, parent)
def load(self, filename):
source_image = self.load_opencv_image(filename)
self.gray_image = cv2.cvtColor(source_image, cv2.COLOR_RGB2GRAY)
def detect(self, minDist, minArea, maxArea):
source_image = self.get_opencv_image()
params = cv2.SimpleBlobDetector_Params()
params.thresholdStep = 10.0
params.minThreshold = 50.0
params.maxThreshold = 220.0
params.filterByArea = True
params.minArea = minArea
params.maxArea = maxArea
params.filterByColor = True
params.blobColor = 0
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.8
params.filterByInertia = True
params.minInertiaRatio = 0.1
params.minRepeatability = 2
params.minDistBetweenBlobs= 5.0
params.minDistBetweenBlobs= float(minDist)
detector = cv2.SimpleBlobDetector_create(params)
keypoints = detector.detect(self.gray_image);
out_image = cv2.drawKeypoints(source_image, keypoints,
None, (0, 0, 255),
cv2.DrawMatchesFlags_DRAW_RICH_KEYPOINTS )
self.set_opencv_image(out_image)
self.update()
#--------------------------------------------
# MainView Constructor
def __init__(self, title, x, y, width, height):
super(MainView, self).__init__(title, x, y, width, height)
filename = "../images/cat.jpg"
# 1 Create first imageview.
self.source_image_view = self.SourceImageView(self)
# 2 Create second imageview.
self.detectd_image_view = self.DetectedImageView(self)
# 3 Load the file
self.load_file(filename)
# 4 Add two image views to a main_layout of this main view.
self.add(self.source_image_view)
self.add(self.detectd_image_view)
self.show()
def add_control_pane(self, fixed_width=220):
# Control pane widget
self.vpane = ZVerticalPane(self, fixed_width)
self.minDist = 9;
self.minArea = 15;
self.maxArea = 131
self.minDistance_slider = ZLabeledSlider(self.vpane, "MinDistanceBetweenBlob", take_odd =False,
minimum=5, maximum=100, value=self.minDist, fixed_width=200)
self.minDistance_slider.add_value_changed_callback(self.minDistance_value_changed)
self.minArea_slider = ZLabeledSlider(self.vpane, "MinArea", take_odd =False,
minimum=1, maximum=100, value=self.minArea, fixed_width=200)
self.minArea_slider.add_value_changed_callback(self.minArea_value_changed)
self.maxArea_slider = ZLabeledSlider(self.vpane, "MaxArea", take_odd =False,
minimum=100, maximum=200, value=self.maxArea, fixed_width=200)
self.maxArea_slider.add_value_changed_callback(self.maxArea_value_changed)
self.vpane.add(self.minDistance_slider)
self.vpane.add(self.minArea_slider)
self.vpane.add(self.maxArea_slider)
self.set_right_dock(self.vpane)
def file_open(self):
options = QFileDialog.Options()
filename, _ = QFileDialog.getOpenFileName(self,"FileOpenDialog", "",
"All Files (*);;Image Files (*.png;*jpg;*.jpeg)", options=options)
if filename:
self.load_file(filename)
def load_file(self, filename):
self.source_image_view.load(filename)
self.detectd_image_view.load(filename)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
self.set_filenamed_title(filename)
def minDistance_value_changed(self, value):
self.minDist= int(value)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
def minArea_value_changed(self, value):
self.minArea = int(value)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
def maxArea_value_changed(self, value):
self.maxArea = int(value)
self.detectd_image_view.detect(self.minDist, self.minArea, self.maxArea)
#*************************************************
#
if main(__name__):
try:
app_name = os.path.basename(sys.argv[0])
applet = QApplication(sys.argv)
main_view = MainView(app_name, 40, 40, 900, 380)
main_view.show ()
applet.exec_()
except:
traceback.print_exc()
pass
| [
"djloek@gmail.com"
] | djloek@gmail.com |
c0b3d26047b039b6f39ae57cad8047f7af89eb6c | 9356f0b10133ed0671cd5414de81cadc97e0097d | /stravalib/tests/functional/test_client_write.py | 41d66e97f4b86e2f9b467b01faca0e83f12fb383 | [
"Apache-2.0"
] | permissive | peter-kolenic/stravalib | 850800ce716243a8498d2f6c4a9078bb29737dee | 571adc063179d0ef1519a468fcf2cfd9852b9874 | refs/heads/master | 2021-01-18T17:19:28.938813 | 2015-05-23T21:30:54 | 2015-05-23T21:30:54 | 36,108,269 | 1 | 1 | null | 2015-05-23T05:27:57 | 2015-05-23T05:27:56 | null | UTF-8 | Python | false | false | 3,326 | py | from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
from io import BytesIO
from stravalib import model, exc, attributes, unithelper as uh
from stravalib.client import Client
from stravalib.tests.functional import FunctionalTestBase
class ClientWriteTest(FunctionalTestBase):
def test_create_activity(self):
"""
Test Client.create_activity simple case.
"""
now = datetime.now().replace(microsecond=0)
a = self.client.create_activity("test_create_activity#simple",
activity_type=model.Activity.RIDE,
start_date_local=now,
elapsed_time=timedelta(hours=3, minutes=4, seconds=5),
distance=uh.miles(15.2))
print a
self.assertIsInstance(a, model.Activity)
self.assertEquals("test_create_activity#simple", a.name)
self.assertEquals(now, a.start_date_local)
self.assertEquals(round(float(uh.miles(15.2)), 2), round(float(uh.miles(a.distance)), 2))
self.assertEquals(timedelta(hours=3, minutes=4, seconds=5), a.elapsed_time)
def test_update_activity(self):
"""
Test Client.update_activity simple case.
"""
now = datetime.now().replace(microsecond=0)
a = self.client.create_activity("test_update_activity#create",
activity_type=model.Activity.RIDE,
start_date_local=now,
elapsed_time=timedelta(hours=3, minutes=4, seconds=5),
distance=uh.miles(15.2))
self.assertIsInstance(a, model.Activity)
self.assertEquals("test_update_activity#create", a.name)
update1 = self.client.update_activity(a.id, name="test_update_activivty#update")
self.assertEquals("test_update_activivty#update", update1.name)
self.assertFalse(update1.private)
self.assertFalse(update1.trainer)
self.assertFalse(update1.commute)
update2 = self.client.update_activity(a.id, private=True)
self.assertTrue(update2.private)
update3 = self.client.update_activity(a.id, trainer=True)
self.assertTrue(update3.private)
self.assertTrue(update3.trainer)
def test_upload_activity(self):
"""
Test uploading an activity.
NOTE: This requires clearing out the uploaded activities from configured
writable Strava acct.
"""
with open(os.path.join(RESOURCES_DIR, 'sample.tcx')) as fp:
uploader = self.client.upload_activity(fp, data_type='tcx')
self.assertTrue(uploader.is_processing)
a = uploader.wait()
self.assertTrue(uploader.is_complete)
self.assertIsInstance(a, model.Activity)
self.assertEquals("02/21/2009 Leiden, ZH, The Netherlands", a.name)
# And we'll get an error if we try the same file again
with self.assertRaises(exc.ActivityUploadFailed):
self.client.upload_activity(fp, data_type='tcx')
| [
"hans@xmpl.org"
] | hans@xmpl.org |
14d592ef6f3a932f1bb749f91f9ff77b9741938e | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/transformers/tests/models/splinter/test_modeling_splinter.py | f064611b6a9e985045b57a4c73450cd202e878c0 | [
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 20,353 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Splinter model. """
import copy
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel
from transformers.models.splinter.modeling_splinter import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST
class SplinterModelTester:
def __init__(
self,
parent,
batch_size=13,
num_questions=3,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
question_token_id=1,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_questions = num_questions
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.question_token_id = question_token_id
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[:, 1] = self.question_token_id
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
start_positions = None
end_positions = None
question_positions = None
if self.use_labels:
start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size)
end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size)
question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels)
config = SplinterConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
question_token_id=self.question_token_id,
)
return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions)
def create_and_check_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_question_answering(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=start_positions[:, 0],
end_positions=end_positions[:, 0],
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_pretraining(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SplinterModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
SplinterModel,
SplinterForQuestionAnswering,
SplinterForPreTraining,
)
if is_torch_available()
else ()
)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if issubclass(model_class, SplinterForPreTraining):
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size,
self.model_tester.num_questions,
dtype=torch.long,
device=torch_device,
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size,
self.model_tester.num_questions,
dtype=torch.long,
device=torch_device,
)
inputs_dict["question_positions"] = torch.zeros(
self.model_tester.batch_size,
self.model_tester.num_questions,
dtype=torch.long,
device=torch_device,
)
elif issubclass(model_class, SplinterForQuestionAnswering):
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = SplinterModelTester(self)
self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
if isinstance(model, SplinterForPreTraining):
with self.assertRaises(TypeError):
# question_positions must not be None.
model(**inputs)[0]
else:
model(**inputs)[0]
@slow
def test_model_from_pretrained(self):
for model_name in SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = SplinterModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs.
# When the batch is distributed to multiple devices, each replica could get different values for the maximal number
# of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different
# shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output.
@require_torch_multi_gpu
def test_multi_gpu_data_parallel_forward(self):
from torch import nn
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# some params shouldn't be scattered by nn.DataParallel
# so just remove them if they are present.
blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"]
for k in blacklist_non_batched_params:
inputs_dict.pop(k, None)
# move input tensors to cuda:O
for k, v in inputs_dict.items():
if torch.is_tensor(v):
inputs_dict[k] = v.to(0)
for model_class in self.all_model_classes:
# Skip this case since it will fail sometimes, as described above.
if model_class == SplinterForPreTraining:
continue
model = model_class(config=config)
model.to(0)
model.eval()
# Wrap model in nn.DataParallel
model = nn.DataParallel(model)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class SplinterModelIntegrationTest(unittest.TestCase):
@slow
def test_splinter_question_answering(self):
model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]"
# Output should be the span "the United Kingdom"
input_ids = torch.tensor(
[[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
output = model(input_ids)
expected_shape = torch.Size((1, 16))
self.assertEqual(output.start_logits.shape, expected_shape)
self.assertEqual(output.end_logits.shape, expected_shape)
self.assertEqual(torch.argmax(output.start_logits), 10)
self.assertEqual(torch.argmax(output.end_logits), 12)
@slow
def test_splinter_pretraining(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
question_positions = torch.tensor([[1, 5]], dtype=torch.long)
output = model(input_ids, question_positions=question_positions)
expected_shape = torch.Size((1, 2, 16))
self.assertEqual(output.start_logits.shape, expected_shape)
self.assertEqual(output.end_logits.shape, expected_shape)
self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7)
self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7)
self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10)
self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12)
@slow
def test_splinter_pretraining_loss_requires_question_positions(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
start_positions = torch.tensor([[7, 10]], dtype=torch.long)
end_positions = torch.tensor([7, 12], dtype=torch.long)
with self.assertRaises(TypeError):
model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
)
@slow
def test_splinter_pretraining_loss(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
]
)
start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long)
end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long)
question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long)
output = model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
self.assertAlmostEqual(output.loss.item(), 0.0024, 4)
@slow
def test_splinter_pretraining_loss_with_padding(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
]
)
start_positions = torch.tensor([[7, 10]], dtype=torch.long)
end_positions = torch.tensor([7, 12], dtype=torch.long)
question_positions = torch.tensor([[1, 5]], dtype=torch.long)
start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long)
end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long)
question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long)
output = model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
output_with_padding = model(
input_ids,
start_positions=start_positions_with_padding,
end_positions=end_positions_with_padding,
question_positions=question_positions_with_padding,
)
self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4)
# Note that the original code uses 0 to denote padded question tokens
# and their start and end positions. As the pad_token_id of the model's
# config is used for the losse's ignore_index in SplinterForPreTraining,
# we add this test to ensure anybody making changes to the default
# value of the config, will be aware of the implication.
self.assertEqual(model.config.pad_token_id, 0)
@slow
def test_splinter_pretraining_prepare_question_positions(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
input_ids = torch.tensor(
[
[101, 104, 1, 2, 104, 3, 4, 102],
[101, 1, 104, 2, 104, 3, 104, 102],
[101, 1, 2, 104, 104, 3, 4, 102],
[101, 1, 2, 3, 4, 5, 104, 102],
]
)
question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long)
output_without_positions = model(input_ids)
output_with_positions = model(input_ids, question_positions=question_positions)
self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all())
self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
| [
"sqy1415@gmail.com"
] | sqy1415@gmail.com |
1b46a272a3f67f353f53056f13e3223b617f355c | 303a4d41da8f2cd2000630ff30424d2875490e67 | /190329multitimetest/gendat.py | 7704c283a733f1501c221aafe5d58fcc19b0e6d5 | [] | no_license | noobermin/sharks | beb1d3d6a593e8d62f3d7416697d4de1fe9558b1 | af87113781eb67af45a9c2f79b73b1512ae0a1fa | refs/heads/master | 2022-05-10T11:55:17.200591 | 2021-09-30T14:27:22 | 2021-09-30T14:27:22 | 19,997,024 | 0 | 2 | null | 2016-05-20T19:27:49 | 2014-05-20T20:49:16 | Common Lisp | UTF-8 | Python | false | false | 5,587 | py | #!/usr/bin/env python
'''
Generate a dat file.
'''
from io import BytesIO; #we python3 now
import re;
import numpy as np;
from pys import test,parse_numtuple,sd,take,mk_getkw;
mt = lambda t,m=1e-4: tuple([i*m for i in t]);
c = 299792458
c_cgs=c*100;
e0 = 8.8541878176e-12
datdefaults = {
'expf': 1.5,
'tlim': (0,27.5, 0,0,0.0 ,0.0,0.0),
'n_s' : 1e23,
'n_min' : 1e18,
'long_margin' : [2.5, 5.0],
'sdim': (17.5,27.5, 0.0,0.0, 0.0,0.0),
'type' : 'singlescale',
'ux' : 1e-4,
'dat_xres' : 100,
'datfmt' : '%.8e',
};
def gendats(ds,**kw):
return [ gendat(**sd(kw, d))
for d in ds ];
def gendat(**kw):
getkw=mk_getkw(kw,datdefaults);
xres = getkw('dat_xres');
yres=zres=xres;
if test(kw,'dat_yres'): yres = kw['dat_yres'];
if test(kw,'dat_zres'): zres = kw['dat_zres'];
unit=getkw('ux');
tlim = mt(getkw('tlim'),m=unit);
fmt = getkw('datfmt');
if test(kw,'f_1D') or test(kw, 'data1D'):
dim = 1;
elif (test(kw,'f_2D') or test(kw, 'data2D')) and test(kw, 'tlim'):
dim = 2;
elif (test(kw,'f_3D') or test(kw, 'data3D')) and test(kw, 'tlim'):
dim = 3;
else:
raise ValueError("Cannot reckon data dimensionality");
if dim == 1:
if test(kw,'f_1D'):
x = np.linspace(tlim[0],tlim[1],xres);
d = getkw('f_1D')(x);
elif test(kw,'data1D'):
x,d = getkw('data1D');
s = BytesIO();
np.savetxt(s,np.array([x,d]).T,fmt=fmt,);
return s.getvalue();
elif dim == 2:
if test(kw,'f_2D'):
x = np.linspace(tlim[0],tlim[1],xres);
if np.isclose(tlim[2],tlim[3]):
y = np.linspace(tlim[4],tlim[5],yres);
else:
y = np.linspace(tlim[2],tlim[3],yres);
X,Y = np.meshgrid(x,y,indexing='ij');
d = getkw('f_2D')(X,Y);
elif test(kw,'data2D'):
x,y,d = getkw('data2D');
s = BytesIO();
np.savetxt(s,np.array(list(d.shape)).reshape(1,-1), fmt='%i');
np.savetxt(s,np.array(x).reshape(1,-1), fmt=fmt);
np.savetxt(s,np.array(y).reshape(1,-1), fmt=fmt);
np.savetxt(s,np.array(d).T,fmt=fmt,);
return s.getvalue();
else:
s = BytesIO();
if test(kw, 'f_3D'):
X,Y,Z = np.mgrid[
tlim[0]:tlim[1]:xres*1j,
tlim[2]:tlim[3]:yres*1j,
tlim[4]:tlim[5]:zres*1j];
d = getkw('f_3D')(X,Y,Z);
np.savetxt(s,np.array(list(d.shape)).reshape(1,-1), fmt='%i');
np.savetxt(s,X[:,0,0].reshape(1,-1),fmt=fmt);
np.savetxt(s,Y[0,:,0].reshape(1,-1),fmt=fmt);
np.savetxt(s,Z[0,0,:].reshape(1,-1),fmt=fmt);
del X,Y,Z;
elif test(kw,'data3D'):
x,y,z,d = getkw('data3D');
np.savetxt(s,np.array(list(d.shape)).reshape(1,-1), fmt='%i');
np.savetxt(s,np.array(x).reshape(1,-1),fmt=fmt);
np.savetxt(s,np.array(y).reshape(1,-1),fmt=fmt);
np.savetxt(s,np.array(z).reshape(1,-1),fmt=fmt);
#manual is probably best.
zl = d.shape[-1];
for i in range(zl):
np.savetxt(s,np.array(d[:,:,i]).T,fmt=fmt);
return s.getvalue();
pass;
def mktwoscales(solid, sdim, xdim, L_front, L_back,
tlim=None,
front_floor=0.0,
back_floor=0.0):
if tlim is None:
tlim = xdim;
#the darkness...
ppf_len = abs(sdim[0] - tlim[0]);
if front_floor > 0.0:
ppf_len = min(np.log(solid/front_floor)*L_front, ppf_len);
ppb_len = abs(sdim[1] - tlim[1]);
if back_floor > 0.0:
ppb_len = min(np.log(solid/back_floor)*L_back, ppb_len);
def outf(x):
out = np.zeros_like(x);
good= np.logical_and(x >= xdim[0],x <= xdim[1])
out[np.logical_and(sdim[0] >= x, x >= tlim[0])] = front_floor;
out[np.logical_and(sdim[1] <= x, x <= tlim[1])] = back_floor;
solids = np.logical_and(sdim[0] <= x, x <= sdim[1]);
out[solids] = solid;
fronts = np.logical_and(sdim[0] - ppf_len <= x, x<= sdim[0]);
out[fronts] = solid*np.exp(-np.abs(x-sdim[0])/L_front)[fronts];
backs = np.logical_and(sdim[1] <= x, x <= sdim[1] + ppb_len);
out[backs] = solid*np.exp(-np.abs(x-sdim[1])/L_back)[backs];
return out;
return outf;
def mkdecay(solid, sdim, xdim, l):
def out(x):
if x <= xdim[0] or x >= xdim[1]:
return 0.0;
elif sdim[0] <= x <= sdim[1]:
return solid;
else:
return np.exp(-np.abs(x-sdim[0])/l)*solid;
return np.vectorize(out);
def tlim_mvorig(tlim):
return (
0, tlim[1]-tlim[0],
0, tlim[3]-tlim[2],
0, tlim[5]-tlim[4])
def genf(**kw):
getkw=mk_getkw(kw,datdefaults);
if getkw('type') == 'singlescale':
tlim = mt(getkw('tlim'),m=getkw('ux'));
xdim = tlim[0], tlim[1];
return mkdecay(
getkw('n_s'), mt(getkw('sdim'),m=getkw('ux')),
xdim, getkw('expf')*getkw('ux'));
else:
raise NotImplementedError("Coming soon!");
onescale_defaults = sd(
datdefaults,
solid_len=10,
xlen=27.5,
);
def genonescale(**kw):
getkw=mk_getkw(kw,onescale_defaults);
slen = getkw("solid_len");
xlen = getkw("xlen");
kw1 = sd(
kw,
tlim=(0.0, xlen) + (0.0,0.0,0.0,0.0),
sdim= (xlen-slen, xlen) + (0.0,0.0,0.0,0.0));
kw1['f_1D']= genf(**kw1)
return gendat(**kw1);
| [
"ngirmang.1@osu.edu"
] | ngirmang.1@osu.edu |
324e053537ed14e06f80510fe149a26724df36b1 | 5c254373f6725107931b68704436c2dbcd39d877 | /ute/probabilistic_utils/mallow.py | a9193339cded704e6b8f18ef329bbb1af5c8466e | [
"MIT"
] | permissive | JunLi-Galios/unsup_temp_embed_alternating | 22330346094720ecba2e5af305febe586566b92f | 1b054fd82aadcfe1aa219be17beb77c89efd974e | refs/heads/master | 2023-03-21T04:06:16.044321 | 2021-03-20T06:06:06 | 2021-03-20T06:06:06 | 322,737,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | #!/usr/bin/env python
"""Implementation of the Generalized Mallow Model. It's used for modeling
temporal relations within video collection of one complex activity. """
__author__ = 'Anna Kukleva'
__date__ = 'August 2018'
import numpy as np
class Mallow(object):
"""The Generalized Mallows Model"""
def __init__(self, K, rho_0=1.0, nu_0=0.1):
"""
Args:
K: number of subactions in current complex activity
"""
self._canon_ordering = None
# number of subactions
self._K = K
self.k = 0
self.rho = [1e-8] * (K - 1)
self.rho_0 = rho_0
self._nu_0 = nu_0
self._dispersion = np.zeros((self._K, 1))
self._v_j_0 = {}
self._init_v_j_0()
self._v_j_sample = 0
self._nu_sample = 0
def _init_v_j_0(self):
for k in range(self._K):
v_j_0 = 1. / (np.exp(self.rho_0) - 1) - \
(self._K - k + 1) / (np.exp((self._K - k + 1) * self.rho_0) - 1)
self._v_j_0[k] = v_j_0
def set_sample_params(self, sum_inv_vals, k, N):
"""
Args:
sum_inv_vals: summation over all videos in collection for certain
position in inverse count vectors
k: current position for computations
N: number of videos in collection
"""
self._k = k
self._nu_sample = self._nu_0 + N
self._v_j_sample = (sum_inv_vals + self._v_j_0[k] * self._nu_0) # / (self._nu_0 + N)
def logpdf(self, ro_j):
norm_factor = np.log(self._normalization_factor(self.k, ro_j))
result = -ro_j * self._v_j_sample - norm_factor * self._nu_sample
return np.array(result)
def _normalization_factor(self, k, rho_k):
power = (self._K - k + 1) * rho_k
numerator = 1. - np.exp(-power)
denominator = 1. - np.exp(-rho_k)
return numerator / denominator
def single_term_prob(self, count, k):
result = -(self.rho[k] * count) - \
np.log(self._normalization_factor(k, self.rho[k]))
return result
@staticmethod
def inversion_counts(ordering):
"""Compute inverse count vector from ordering"""
ordering = np.array(ordering)
inversion_counts_v = []
for idx, val in enumerate(ordering):
idx_end = int(np.where(ordering == idx)[0])
inversion_counts_v.append(np.sum(ordering[:idx_end] > idx))
return inversion_counts_v[:-1]
def ordering(self, inverse_count):
"""Compute ordering from inverse count vector"""
ordering = np.ones(self._K, dtype=int) * -1
for action, val in enumerate(inverse_count):
for idx, established in enumerate(ordering):
if established > -1:
continue
if val == 0:
ordering[idx] = action
break
if established == -1:
val -= 1
# last action
ordering[np.where(ordering == -1)] = self._K - 1
return ordering
| [
"kuklevaanna@gmail.com"
] | kuklevaanna@gmail.com |
3f6f20932447ab92f92ee5991e43992a14450eca | 8baec0fc6e2e2e4b46e7880df9dbaa313c01272f | /data/cn_few_fusion_dataset.py | f4be2acb0d640f67343116793852b0c2840a0172 | [
"BSD-2-Clause"
] | permissive | hologerry/BicycleGAN | 6ce4884fdaf8d4c5231dae537b3f0f552856add9 | 64671c38058744d49e988980770d11b72466c59b | refs/heads/master | 2021-06-26T07:33:16.941169 | 2019-08-20T12:38:44 | 2019-08-20T12:38:44 | 149,060,743 | 0 | 0 | NOASSERTION | 2019-03-13T05:07:19 | 2018-09-17T02:56:34 | Python | UTF-8 | Python | false | false | 4,109 | py | import os
import random
from PIL import Image, ImageFilter
from data.base_dataset import BaseDataset, transform_few_with_label
from data.image_folder import make_dataset
class CnFewFusionDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def rreplace(self, s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_ABC = os.path.join(opt.dataroot, opt.phase)
self.ABC_paths = sorted(make_dataset(self.dir_ABC))
# self.chars = list(range(500)) # only use 500 of 639 to train, and the remain 139 as test set
# guarantee consistent for test
# so just shuffle 500 once
self.shuffled_gb639list = [172, 370, 222, 37, 220, 317, 333, 494, 468, 25,
440, 208, 488, 177, 167, 104, 430, 383, 422, 174,
441, 475, 473, 72, 9, 389, 132, 412, 24, 288,
453, 372, 181, 322, 115, 34, 345, 243, 188, 118,
142, 197, 429, 358, 223, 121, 20, 241, 178, 238,
272, 182, 384, 295, 490, 98, 96, 476, 226, 129,
305, 28, 207, 351, 193, 378, 390, 353, 452, 240,
477, 214, 306, 373, 63, 248, 323, 109, 21, 381,
393, 263, 111, 92, 231, 114, 218, 69, 482, 252,
257, 300, 283, 420, 62, 154, 146, 478, 89, 419]
assert(opt.few_size <= len(self.shuffled_gb639list))
self.chars = self.shuffled_gb639list[:opt.few_size]
def __getitem__(self, index):
ABC_path = self.ABC_paths[index]
ABC = Image.open(ABC_path).convert('RGB')
w3, h = ABC.size
w = int(w3 / 3)
A = ABC.crop((0, 0, w, h))
B = ABC.crop((w, 0, w+w, h))
C = ABC.crop((w+w, 0, w+w+w, h))
Bases = []
Shapes = []
Colors = []
Style_paths = []
blur_Shapes = []
blur_Colors = []
target_char = int(ABC_path.split('_')[-1].split('.')[0])
ABC_path_c = ABC_path
label = 0.0
if target_char in self.chars:
label = 1.0
# for shapes
random.shuffle(self.chars)
chars_random = self.chars[:self.opt.nencode]
for char in chars_random:
s_path = self.rreplace(ABC_path_c, str(target_char), str(char), 1) # /path/to/img/XXXX_XX_XXX.png
s_path = s_path.replace(self.opt.phase, 'style')
Style_paths.append(s_path)
Bases.append(Image.open(s_path).convert('RGB').crop((0, 0, w, h)))
Shapes.append(Image.open(s_path).convert('RGB').crop((w, 0, w+w, h)))
Colors.append(Image.open(s_path).convert('RGB').crop((w+w, 0, w+w+w, h)))
blur_Shapes.append(
Image.open(s_path).convert('RGB').crop((w, 0, w+w, h)).filter(
ImageFilter.GaussianBlur(radius=(random.random()*2+2)))
)
blur_Colors.append(
Image.open(s_path).convert('RGB').crop((w+w, 0, w+w+w, h)).filter(
ImageFilter.GaussianBlur(radius=(random.random()*2+2)))
)
A, B, B_G, C, C_G, C_l, label, Bases, Shapes, Colors, blur_Shapes, blur_Colors = \
transform_few_with_label(self.opt, A, B, C, label, Bases, Shapes, Colors, blur_Shapes, blur_Colors)
# A is the reference, B is the gray shape, C is the gradient
return {'A': A, 'B': B, 'B_G': B_G, 'C': C, 'C_G': C_G, 'C_l': C_l, 'label': label,
'Bases': Bases, 'Shapes': Shapes, 'Colors': Colors,
'blur_Shapes': blur_Shapes, 'blur_Colors': blur_Colors,
'ABC_path': ABC_path, 'Style_paths': Style_paths,
}
def __len__(self):
return len(self.ABC_paths)
def name(self):
return 'CnFewFusionDataset'
| [
"hologerry@gmail.com"
] | hologerry@gmail.com |
6a833dc13c4576d7d6ac68aa2ac28032e4b16eb8 | edbf8601ae771031ad8ab27b19c2bf450ca7df76 | /45-Jump-Game-II/JumpGameII.py3 | 68cb781b045a49898b021dd462bc34abdeadfb91 | [] | no_license | gxwangdi/Leetcode | ec619fba272a29ebf8b8c7f0038aefd747ccf44a | 29c4c703d18c6ff2e16b9f912210399be427c1e8 | refs/heads/master | 2022-07-02T22:08:32.556252 | 2022-06-21T16:58:28 | 2022-06-21T16:58:28 | 54,813,467 | 3 | 2 | null | 2022-06-21T16:58:29 | 2016-03-27T05:02:36 | Java | UTF-8 | Python | false | false | 581 | py3 | class Solution:
def jump(self, nums: List[int]) -> int:
if nums == None or len(nums) == 0 :
return -1
size = len(nums)
if size == 1 :
return 0
dp = [sys.maxsize]*size
dp[0] = 0
cur = 1
for i in range(size) :
far = i + nums[i]
value = dp[i] + 1
if far >= size -1:
return value
if far < cur :
continue
while cur <= far:
dp[cur] = value
cur+=1
return dp[-1]
| [
"gxwangdi@gmail.com"
] | gxwangdi@gmail.com |
234d3754983682a50c503af825e2d5e008b2e442 | 1268030197a27bf2ef5e3f5ab8df38993457fed5 | /rasa_core/rasa_core/featurizers.py | 78a3d1964c43b0ef3b094a3a44e733acd8e8a96d | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | parimalpate123/rasa_slack_chatbot | 439abd9a541d6314b46c6fb303c0275803fc9357 | 206aacab62f12be9df9f009f65736caed3e8edac | refs/heads/master | 2020-04-17T14:13:49.917604 | 2019-05-07T11:08:07 | 2019-05-07T11:08:07 | 166,649,129 | 0 | 1 | null | 2019-01-29T11:09:07 | 2019-01-20T10:32:59 | Python | UTF-8 | Python | false | false | 25,203 | py | import io
import jsonpickle
import logging
import numpy as np
import os
from tqdm import tqdm
from typing import Tuple, List, Optional, Dict, Text, Any
from rasa_core import utils
from rasa_core.actions.action import ACTION_LISTEN_NAME
from rasa_core.domain import PREV_PREFIX, Domain
from rasa_core.events import ActionExecuted
from rasa_core.trackers import DialogueStateTracker
from rasa_core.training.data import DialogueTrainingData
logger = logging.getLogger(__name__)
class SingleStateFeaturizer(object):
"""Base class for mechanisms to transform the conversations state
into machine learning formats.
Subclasses of SingleStateFeaturizer decide how the bot will transform
the conversation state to a format which a classifier can read:
feature vector."""
def __init__(self):
"""Declares instant variables."""
self.user_feature_len = None
self.slot_feature_len = None
def prepare_from_domain(self, domain: Domain) -> None:
"""Helper method to init based on domain"""
pass
def encode(self, state: Dict[Text, float]) -> np.ndarray:
raise NotImplementedError("SingleStateFeaturizer must have "
"the capacity to "
"encode states to a feature vector")
@staticmethod
def action_as_one_hot(action: Text, domain: Domain) -> np.ndarray:
if action is None:
return np.ones(domain.num_actions, dtype=int) * -1
y = np.zeros(domain.num_actions, dtype=int)
y[domain.index_for_action(action)] = 1
return y
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain
encoded in rows."""
pass
class BinarySingleStateFeaturizer(SingleStateFeaturizer):
"""Assumes all features are binary.
All features should be either on or off, denoting them with 1 or 0."""
def __init__(self):
"""Declares instant variables."""
super(BinarySingleStateFeaturizer, self).__init__()
self.num_features = None
self.input_state_map = None
def prepare_from_domain(self, domain: Domain) -> None:
self.num_features = domain.num_states
self.input_state_map = domain.input_state_map
self.user_feature_len = (len(domain.intent_states) +
len(domain.entity_states))
self.slot_feature_len = len(domain.slot_states)
def encode(self, state: Dict[Text, float]) -> np.ndarray:
"""Returns a binary vector indicating which features are active.
Given a dictionary of states (e.g. 'intent_greet',
'prev_action_listen',...) return a binary vector indicating which
features of `self.input_features` are in the bag. NB it's a
regular double precision float array type.
For example with two active features out of five possible features
this would return a vector like `[0 0 1 0 1]`
If intent features are given with a probability, for example
with two active features and two uncertain intents out
of five possible features this would return a vector
like `[0.3, 0.7, 1.0, 0, 1.0]`.
If this is just a padding vector we set all values to `-1`.
padding vectors are specified by a `None` or `[None]`
value for states.
"""
if not self.num_features:
raise Exception("BinarySingleStateFeaturizer "
"was not prepared "
"before encoding.")
if state is None or None in state:
return np.ones(self.num_features, dtype=np.int32) * -1
# we are going to use floats and convert to int later if possible
used_features = np.zeros(self.num_features, dtype=np.float)
using_only_ints = True
for state_name, prob in state.items():
if state_name in self.input_state_map:
idx = self.input_state_map[state_name]
used_features[idx] = prob
using_only_ints = using_only_ints and utils.is_int(prob)
else:
logger.debug(
"Feature '{}' (value: '{}') could not be found in "
"feature map. Make sure you added all intents and "
"entities to the domain".format(state_name, prob))
if using_only_ints:
# this is an optimization - saves us a bit of memory
return used_features.astype(np.int32)
else:
return used_features
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain
encoded in rows as bag of words."""
return np.eye(domain.num_actions)
class LabelTokenizerSingleStateFeaturizer(SingleStateFeaturizer):
"""SingleStateFeaturizer that splits user intents and
bot action names into tokens and uses these tokens to
create bag-of-words feature vectors.
Args:
split_symbol: The symbol that separates words in
intets and action names.
use_shared_vocab: The flag that specifies if to create
the same vocabulary for user intents and bot actions.
"""
def __init__(self,
use_shared_vocab: bool = False,
split_symbol: Text = '_') -> None:
"""inits vocabulary for label bag of words representation"""
super(LabelTokenizerSingleStateFeaturizer, self).__init__()
self.use_shared_vocab = use_shared_vocab
self.split_symbol = split_symbol
self.num_features = None
self.user_labels = []
self.slot_labels = []
self.bot_labels = []
self.bot_vocab = None
self.user_vocab = None
@staticmethod
def _create_label_token_dict(labels, split_symbol='_'):
"""Splits labels into tokens by using provided symbol.
Creates the lookup dictionary for this tokens.
Values in this dict are used for featurization."""
distinct_tokens = set([token
for label in labels
for token in label.split(split_symbol)])
return {token: idx
for idx, token in enumerate(sorted(distinct_tokens))}
def prepare_from_domain(self, domain: Domain) -> None:
"""Creates internal vocabularies for user intents
and bot actions to use for featurization"""
self.user_labels = domain.intent_states + domain.entity_states
self.slot_labels = domain.slot_states
self.bot_labels = domain.action_names
if self.use_shared_vocab:
self.bot_vocab = self._create_label_token_dict(self.bot_labels +
self.user_labels,
self.split_symbol)
self.user_vocab = self.bot_vocab
else:
self.bot_vocab = self._create_label_token_dict(self.bot_labels,
self.split_symbol)
self.user_vocab = self._create_label_token_dict(self.user_labels,
self.split_symbol)
self.num_features = (len(self.user_vocab) +
len(self.slot_labels) +
len(self.bot_vocab))
self.user_feature_len = len(self.user_vocab)
self.slot_feature_len = len(self.slot_labels)
def encode(self, state: Dict[Text, float]) -> np.ndarray:
if not self.num_features:
raise Exception("LabelTokenizerSingleStateFeaturizer "
"was not prepared before encoding.")
if state is None or None in state:
return np.ones(self.num_features, dtype=np.int32) * -1
# we are going to use floats and convert to int later if possible
used_features = np.zeros(self.num_features, dtype=np.float)
using_only_ints = True
for state_name, prob in state.items():
using_only_ints = using_only_ints and utils.is_int(prob)
if state_name in self.user_labels:
if PREV_PREFIX + ACTION_LISTEN_NAME in state:
# else we predict next action from bot action and memory
for t in state_name.split(self.split_symbol):
used_features[self.user_vocab[t]] += prob
elif state_name in self.slot_labels:
offset = len(self.user_vocab)
idx = self.slot_labels.index(state_name)
used_features[offset + idx] += prob
elif state_name[len(PREV_PREFIX):] in self.bot_labels:
action_name = state_name[len(PREV_PREFIX):]
for t in action_name.split(self.split_symbol):
offset = len(self.user_vocab) + len(self.slot_labels)
idx = self.bot_vocab[t]
used_features[offset + idx] += prob
else:
logger.warning("Feature '{}' could not be found in "
"feature map.".format(state_name))
if using_only_ints:
# this is an optimization - saves us a bit of memory
return used_features.astype(np.int32)
else:
return used_features
def create_encoded_all_actions(self, domain: Domain) -> np.ndarray:
"""Create matrix with all actions from domain
encoded in rows as bag of words."""
encoded_all_actions = np.zeros((domain.num_actions,
len(self.bot_vocab)),
dtype=int)
for idx, name in enumerate(domain.action_names):
for t in name.split(self.split_symbol):
encoded_all_actions[idx, self.bot_vocab[t]] = 1
return encoded_all_actions
class TrackerFeaturizer(object):
"""Base class for actual tracker featurizers"""
def __init__(self,
state_featurizer: Optional[SingleStateFeaturizer] = None,
use_intent_probabilities: bool = False) -> None:
self.state_featurizer = state_featurizer or SingleStateFeaturizer()
self.use_intent_probabilities = use_intent_probabilities
def _create_states(self,
tracker: DialogueStateTracker,
domain: Domain,
is_binary_training: bool = False
) -> List[Dict[Text, float]]:
"""Create states: a list of dictionaries.
If use_intent_probabilities is False (default behaviour),
pick the most probable intent out of all provided ones and
set its probability to 1.0, while all the others to 0.0."""
states = tracker.past_states(domain)
# during training we encounter only 1 or 0
if not self.use_intent_probabilities and not is_binary_training:
bin_states = []
for state in states:
# copy state dict to preserve internal order of keys
bin_state = dict(state)
best_intent = None
best_intent_prob = -1.0
for state_name, prob in state:
if state_name.startswith('intent_'):
if prob > best_intent_prob:
# finding the maximum confidence intent
if best_intent is not None:
# delete previous best intent
del bin_state[best_intent]
best_intent = state_name
best_intent_prob = prob
else:
# delete other intents
del bin_state[state_name]
if best_intent is not None:
# set the confidence of best intent to 1.0
bin_state[best_intent] = 1.0
bin_states.append(bin_state)
return bin_states
else:
return [dict(state) for state in states]
def _pad_states(self, states: List[Any]) -> List[Any]:
return states
def _featurize_states(
self,
trackers_as_states: List[List[Dict[Text, float]]]
) -> Tuple[np.ndarray, List[int]]:
"""Create X"""
features = []
true_lengths = []
for tracker_states in trackers_as_states:
dialogue_len = len(tracker_states)
# len(trackers_as_states) = 1 means
# it is called during prediction or we have
# only one story, so no padding is needed
if len(trackers_as_states) > 1:
tracker_states = self._pad_states(tracker_states)
story_features = [self.state_featurizer.encode(state)
for state in tracker_states]
features.append(story_features)
true_lengths.append(dialogue_len)
# noinspection PyPep8Naming
X = np.array(features)
return X, true_lengths
def _featurize_labels(
self,
trackers_as_actions: List[List[Text]],
domain: Domain
) -> np.ndarray:
"""Create y"""
labels = []
for tracker_actions in trackers_as_actions:
if len(trackers_as_actions) > 1:
tracker_actions = self._pad_states(tracker_actions)
story_labels = [self.state_featurizer.action_as_one_hot(action,
domain)
for action in tracker_actions]
labels.append(story_labels)
# if it is MaxHistoryFeaturizer, squeeze out time axis
y = np.array(labels).squeeze()
return y
def training_states_and_actions(
self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> Tuple[List[List[Dict]], List[List[Text]]]:
"""Transforms list of trackers to lists of states and actions"""
raise NotImplementedError("Featurizer must have the capacity to "
"encode trackers to feature vectors")
def featurize_trackers(self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> DialogueTrainingData:
"""Create training data"""
self.state_featurizer.prepare_from_domain(domain)
(trackers_as_states,
trackers_as_actions) = self.training_states_and_actions(trackers,
domain)
# noinspection PyPep8Naming
X, true_lengths = self._featurize_states(trackers_as_states)
y = self._featurize_labels(trackers_as_actions, domain)
return DialogueTrainingData(X, y, true_lengths)
def prediction_states(self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> List[List[Dict[Text, float]]]:
"""Transforms list of trackers to lists of states for prediction"""
raise NotImplementedError("Featurizer must have the capacity to "
"create feature vector")
# noinspection PyPep8Naming
def create_X(self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> np.ndarray:
"""Create X for prediction"""
trackers_as_states = self.prediction_states(trackers, domain)
X, _ = self._featurize_states(trackers_as_states)
return X
def persist(self, path):
featurizer_file = os.path.join(path, "featurizer.json")
utils.create_dir_for_file(featurizer_file)
with io.open(featurizer_file, 'w', encoding="utf-8") as f:
# noinspection PyTypeChecker
f.write(str(jsonpickle.encode(self)))
@staticmethod
def load(path):
featurizer_file = os.path.join(path, "featurizer.json")
if os.path.isfile(featurizer_file):
return jsonpickle.decode(utils.read_file(featurizer_file))
else:
logger.error("Couldn't load featurizer for policy. "
"File '{}' doesn't exist.".format(featurizer_file))
return None
class FullDialogueTrackerFeaturizer(TrackerFeaturizer):
"""Tracker featurizer that takes the trackers
and creates full dialogue training data for
time distributed rnn.
Training data is padded up to the length of the longest
dialogue with -1"""
def __init__(self,
state_featurizer: SingleStateFeaturizer,
use_intent_probabilities: bool = False) -> None:
super(FullDialogueTrackerFeaturizer, self).__init__(
state_featurizer, use_intent_probabilities
)
self.max_len = None
@staticmethod
def _calculate_max_len(trackers_as_actions):
if trackers_as_actions:
return max([len(states) for states in trackers_as_actions])
else:
return None
def _pad_states(self, states: List[Any]) -> List[Any]:
"""Pads states up to max_len"""
if len(states) < self.max_len:
states += [None] * (self.max_len - len(states))
return states
def training_states_and_actions(
self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> Tuple[List[List[Dict]], List[List[Text]]]:
trackers_as_states = []
trackers_as_actions = []
logger.debug("Creating states and action examples from "
"collected trackers (by {}({}))..."
"".format(type(self).__name__,
type(self.state_featurizer).__name__))
pbar = tqdm(trackers,
desc="Processed trackers",
disable=(not logger.isEnabledFor(logging.DEBUG)))
for tracker in pbar:
states = self._create_states(tracker, domain,
is_binary_training=True)
delete_first_state = False
actions = []
for event in tracker.applied_events():
if isinstance(event, ActionExecuted):
if not event.unpredictable:
# only actions which can be
# predicted at a stories start
actions.append(event.action_name)
else:
# unpredictable actions can be
# only the first in the story
if delete_first_state:
raise Exception("Found two unpredictable "
"actions in one story."
"Check your story files.")
else:
delete_first_state = True
if delete_first_state:
states = states[1:]
trackers_as_states.append(states[:-1])
trackers_as_actions.append(actions)
self.max_len = self._calculate_max_len(trackers_as_actions)
logger.debug("The longest dialogue has {} actions."
"".format(self.max_len))
return trackers_as_states, trackers_as_actions
def prediction_states(self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> List[List[Dict[Text, float]]]:
trackers_as_states = [self._create_states(tracker, domain)
for tracker in trackers]
return trackers_as_states
class MaxHistoryTrackerFeaturizer(TrackerFeaturizer):
"""Tracker featurizer that takes the trackers,
slices them into max_history batches and
creates training data for rnn that uses last output
for prediction.
Training data is padded up to the max_history with -1"""
MAX_HISTORY_DEFAULT = 5
def __init__(self,
state_featurizer: Optional[SingleStateFeaturizer] = None,
max_history: int = None,
remove_duplicates: bool = True,
use_intent_probabilities: bool = False) -> None:
super(MaxHistoryTrackerFeaturizer, self).__init__(
state_featurizer, use_intent_probabilities
)
self.max_history = max_history or self.MAX_HISTORY_DEFAULT
self.remove_duplicates = remove_duplicates
@staticmethod
def slice_state_history(
states: List[Dict[Text, float]],
slice_length: int
) -> List[Optional[Dict[Text, float]]]:
"""Slices states from the trackers history.
If the slice is at the array borders, padding will be added to ensure
the slice length."""
slice_end = len(states)
slice_start = max(0, slice_end - slice_length)
padding = [None] * max(0, slice_length - slice_end)
# noinspection PyTypeChecker
state_features = padding + states[slice_start:]
return state_features
@staticmethod
def _hash_example(states, action):
frozen_states = tuple((s if s is None
else frozenset(s.items())
for s in states))
frozen_actions = (action,)
return hash((frozen_states, frozen_actions))
def training_states_and_actions(
self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> Tuple[List[List[Dict]], List[List[Text]]]:
trackers_as_states = []
trackers_as_actions = []
# from multiple states that create equal featurizations
# we only need to keep one.
hashed_examples = set()
logger.debug("Creating states and action examples from "
"collected trackers (by {}({}))..."
"".format(type(self).__name__,
type(self.state_featurizer).__name__))
pbar = tqdm(trackers, desc="Processed trackers",
disable=(not logger.isEnabledFor(logging.DEBUG)))
for tracker in pbar:
states = self._create_states(tracker, domain, True)
idx = 0
for event in tracker.applied_events():
if isinstance(event, ActionExecuted):
if not event.unpredictable:
# only actions which can be
# predicted at a stories start
sliced_states = self.slice_state_history(
states[:idx + 1], self.max_history)
if self.remove_duplicates:
hashed = self._hash_example(sliced_states,
event.action_name)
# only continue with tracker_states that created a
# hashed_featurization we haven't observed
if hashed not in hashed_examples:
hashed_examples.add(hashed)
trackers_as_states.append(sliced_states)
trackers_as_actions.append([event.action_name])
else:
trackers_as_states.append(sliced_states)
trackers_as_actions.append([event.action_name])
pbar.set_postfix({"# actions": "{:d}".format(
len(trackers_as_actions))})
idx += 1
logger.debug("Created {} action examples."
"".format(len(trackers_as_actions)))
return trackers_as_states, trackers_as_actions
def prediction_states(self,
trackers: List[DialogueStateTracker],
domain: Domain
) -> List[List[Dict[Text, float]]]:
trackers_as_states = [self._create_states(tracker, domain)
for tracker in trackers]
trackers_as_states = [self.slice_state_history(states,
self.max_history)
for states in trackers_as_states]
return trackers_as_states
| [
"noreply@github.com"
] | parimalpate123.noreply@github.com |
25aadc99c54d46377158797eb238e1e889e95e9b | d9d6250eb862e4b4cace91f5d7ab82bc70ea689c | /src/comment/migrations/0001_initial.py | 4b471ce54e4a68c5da6989acac2be0b2de8ce46f | [] | no_license | belal-bh/CLIC_PUST | f6ae867115899733722d356b1f27a1bc78eee89f | 59c251e621ac2f6460bd4faa31aad5e569a060c2 | refs/heads/master | 2022-04-08T13:05:06.795597 | 2020-03-15T10:12:45 | 2020-03-15T10:12:45 | 212,201,928 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | # Generated by Django 2.2.1 on 2019-10-10 07:10
import account.helpers
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to=account.helpers.UploadTo('image'), width_field='width_field')),
('height_field', models.IntegerField(default=0)),
('width_field', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comment.Comment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"bh.pro.pust@gmail.com"
] | bh.pro.pust@gmail.com |
01aeafb98ed6d93725ba3ab260a74eaa6daeeb51 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/20dd5e82-5cc5-11e4-af55-00155d01fe08.py | a599a33e0e06904fc5484a89cf9782cb80531146 | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | #!/usr/bin/python
################################################################################
# 20dd5e82-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20dd5e82-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Accounts
usernames = cli.get_secedit_account('SeProfileSingleProcessPrivilege')
# Output Lines
self.output = [("SeProfileSingleProcessPrivilege=")] + usernames
# Recommended MultiSZ
rec_usernames = ("BUILTIN\Administrators")
for user in usernames:
if user.lower() not in rec_usernames.lower():
self.is_compliant = False
return self.is_compliant
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
abce8b6224be5ad4780574b9df6386674fd23647 | 227ecf8b7967cfcf3bb0822d268941c04a05bd20 | /matrix_comp_approx_colored.py | dda878aef7ff1809716989e29e163264cbf6539a | [] | no_license | johnjasa/derivative_comparisons | 1a8f3dba62dd9e081537cb6ecf4a1df93192893b | d50a1f86042841b37804fbb3abbc600f3870cce5 | refs/heads/master | 2021-05-18T17:54:42.906729 | 2020-04-06T17:45:13 | 2020-04-06T17:45:13 | 251,347,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | import numpy as np
import openmdao.api as om
class MatrixComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_inputs', default=2)
self.options.declare('num_outputs', default=5)
self.options.declare('bandwidth', default=2)
self.options.declare('random_seed', default=314)
def setup(self):
self.add_input('x', shape=self.options['num_inputs'])
self.add_output('y', shape=self.options['num_outputs'])
self.declare_partials('y', 'x', method='fd')
self.declare_coloring('*', method='cs', show_summary=True)
np.random.seed(self.options['random_seed'])
self.random_array = np.random.random_sample(self.options['num_inputs'])
def compute(self, inputs, outputs):
num_inputs = self.options['num_inputs']
num_outputs = self.options['num_outputs']
bandwidth = self.options['bandwidth']
x = inputs['x']
y = outputs['y']
x_and_random = x + self.random_array
tiled_x = np.tile(x_and_random, int(np.ceil(num_outputs / num_inputs) + bandwidth))
for i in range(num_outputs):
y[i] = np.sum(tiled_x[i:i+bandwidth]**4)
| [
"johnjasa11@gmail.com"
] | johnjasa11@gmail.com |
f66590ed24326e5a66bd05a44b6fe1bd619b3f61 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1227.py | 942da9b5fe104de59a0e5faf6af34254b248e801 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/1784FCF9-7DFD-AA45-AEA1-5EBCEDE11A59.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1227.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
5023035cb29590f585108c7aee78dc4373800804 | c6053ad14e9a9161128ab43ced5604d801ba616d | /Lemon/Python_Base/Lesson10_object_20181117/homework_02.py | c3bfb59c5135b0b7432c470d7a36aa6518d3cc6c | [] | no_license | HesterXu/Home | 0f6bdace39f15e8be26031f88248f2febf33954d | ef8fa0becb687b7b6f73a7167bdde562b8c539be | refs/heads/master | 2020-04-04T00:56:35.183580 | 2018-12-25T02:48:51 | 2018-12-25T02:49:05 | 155,662,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # -*- coding: utf-8 -*-
# @Time : 2018/11/17/13:35
# @Author : Hester Xu
# Email : xuruizhu@yeah.net
# @File : homework_02.py
# @Software : PyCharm
'''
2:定义一个学生类。
1)有下面的类属性: 1 姓名 2 年龄 3 成绩(语文,数学,英语)[每课成绩的类型为整数] ,均放在初始化函数里面。
2)类方法:
a)获取学生的姓名:get_name() 返回类型:str
b)获取学生的年龄:get_age() 返回类型:int
c) 返回3门科目中最高的分数。get_course() 返回类型:int
写好类以后,可以定义2个同学测试下: zm = Student('zhangming',20,[69,88,100]) 返回结果: zhangming 20 100
'''
class Student:
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
def get_name(self):
return self.name
def get_age(self):
return self.age
def get_course(self):
return max(self.score)
zm = Student('zhangming', 20, [69, 88, 100])
print(zm.get_name())
print(zm.get_age())
print(zm.get_course())
| [
"xuruizhu@yeah.net"
] | xuruizhu@yeah.net |
f114eaac45b345590a3ee78311ae9c41599eb2fc | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/resources/v20210101/deployment_at_tenant_scope.py | 4eb4d31f0b896cf765e4478fcd0b755ba3109a72 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 10,262 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DeploymentAtTenantScopeArgs', 'DeploymentAtTenantScope']
@pulumi.input_type
class DeploymentAtTenantScopeArgs:
def __init__(__self__, *,
properties: pulumi.Input['DeploymentPropertiesArgs'],
deployment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a DeploymentAtTenantScope resource.
:param pulumi.Input['DeploymentPropertiesArgs'] properties: The deployment properties.
:param pulumi.Input[str] deployment_name: The name of the deployment.
:param pulumi.Input[str] location: The location to store the deployment data.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Deployment tags
"""
pulumi.set(__self__, "properties", properties)
if deployment_name is not None:
pulumi.set(__self__, "deployment_name", deployment_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def properties(self) -> pulumi.Input['DeploymentPropertiesArgs']:
"""
The deployment properties.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: pulumi.Input['DeploymentPropertiesArgs']):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="deploymentName")
def deployment_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the deployment.
"""
return pulumi.get(self, "deployment_name")
@deployment_name.setter
def deployment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location to store the deployment data.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Deployment tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class DeploymentAtTenantScope(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deployment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Deployment information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] deployment_name: The name of the deployment.
:param pulumi.Input[str] location: The location to store the deployment data.
:param pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']] properties: The deployment properties.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Deployment tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DeploymentAtTenantScopeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Deployment information.
:param str resource_name: The name of the resource.
:param DeploymentAtTenantScopeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DeploymentAtTenantScopeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deployment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DeploymentAtTenantScopeArgs.__new__(DeploymentAtTenantScopeArgs)
__props__.__dict__["deployment_name"] = deployment_name
__props__.__dict__["location"] = location
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__.__dict__["properties"] = properties
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:resources/v20210101:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources/v20190701:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190701:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources/v20190801:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190801:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources/v20191001:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources/v20191001:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources/v20200601:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources/v20200601:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources/v20200801:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources/v20200801:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources/v20201001:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources/v20201001:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-native:resources/v20210401:DeploymentAtTenantScope"), pulumi.Alias(type_="azure-nextgen:resources/v20210401:DeploymentAtTenantScope")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DeploymentAtTenantScope, __self__).__init__(
'azure-native:resources/v20210101:DeploymentAtTenantScope',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DeploymentAtTenantScope':
"""
Get an existing DeploymentAtTenantScope resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DeploymentAtTenantScopeArgs.__new__(DeploymentAtTenantScopeArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return DeploymentAtTenantScope(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
the location of the deployment.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the deployment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.DeploymentPropertiesExtendedResponse']:
"""
Deployment properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Deployment tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the deployment.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
b9d52469bee1df5cd39bc77ab102e80e5ecfd4e5 | 1365c4f43d597e613c137a5bce3230fcbf07a5c0 | /pinkblue/urls.py | 3be566acf6e37499a17f77e842ad1e899a6402be | [] | no_license | Pradam/pinkblue | 8dfc9c17d3797ea55918712932df72e70d14e0a8 | 5e9cd085e8d77fd659ff3a27d73af843f8363c01 | refs/heads/master | 2020-04-23T10:04:20.994037 | 2019-02-18T04:03:53 | 2019-02-18T04:03:53 | 171,091,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | """pinkblue URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('stock/', include('inventory.urls'))
]
| [
"pradamabhilash@gmail.com"
] | pradamabhilash@gmail.com |
6ed3e6a009cf9820d10c5b2bcec7966bc71920da | 9f2c8c6b9c7caac464193fa9a995dc7244f3aac5 | /Exercicios Curso Em Video Mundo 2/ex038.py | bc7fb3f83e250eb62ce07ca8dab2bccf6cde09df | [
"MIT"
] | permissive | JorgeTranin/Python_Curso_Em_Video | a5c1a119e30aa08663d5b3e3d86625fb852ccbe8 | be74c9301aafc055bdf883be649cb8b7716617e3 | refs/heads/master | 2021-06-13T23:29:36.184378 | 2020-04-10T00:49:25 | 2020-04-10T00:49:25 | 254,464,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | n1 = int(input('Digite um numero! '))
n2 = int(input('Digite outro numero! '))
if n1 > n2:
print('Entre {} e {} O primeiro valor é maior'.format(n1, n2))
elif n2 > n1:
print('Entre {} e {} O segundo valor é maior.'.format(n1, n2))
elif n1 == n2:
print('Os dois valores são iguais.')
| [
"antoniotraninjorge@gmail.com"
] | antoniotraninjorge@gmail.com |
fc50e5a2055a8a78a3042ca9d49a37270c2e9c4b | 108034973f9046a7603d5fe3f26c59b20a7e68da | /lab/lab13/tests/schedule.py | 4247e34134bdaacf49dedc64c9d011381688e8f3 | [] | no_license | paulhzq/cs61a | b1b1387cefbaaf1823c02d535891db7d085f3b04 | 9eee13df9ad113591dc55d106561951cea34abc5 | refs/heads/master | 2020-05-23T08:16:14.193086 | 2017-01-15T02:06:18 | 2017-01-15T02:06:18 | 70,255,875 | 8 | 8 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | test = {
'name': 'schedule',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
sqlite> SELECT * FROM schedule;
SFO, SLC, PDX|176
SFO, LAX, PDX|186
SFO, PDX|192
""",
'hidden': False,
'locked': False
}
],
'ordered': True,
'scored': True,
'setup': r"""
sqlite> .read lab13.sql
""",
'teardown': '',
'type': 'sqlite'
}
]
}
| [
"paul_hzq@hotmail.com"
] | paul_hzq@hotmail.com |
676e220636adf6125be74d69a020cc4d43e83248 | 556417a05b437c111290287df47a39f15fb28f4b | /apps/payement/forms.py | bc9b95551bc0609de4ac2cd8b711096f021e1781 | [] | no_license | root92/test-erp | 74626f7b0ce423e9451dd0cc9371ed644a9b8af9 | ef108353b5a886822574bded7f2f0b323c483c37 | refs/heads/master | 2020-04-21T20:53:04.401368 | 2018-01-30T16:10:15 | 2018-01-30T16:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | from django import forms
from .models import Payement, Fees
class PayementForm(forms.ModelForm):
class Meta:
model = Payement
fields =['fees', 'student', 'amount']
labels = {
'student': 'Elève',
'fees': 'frais',
'amount': 'Montant'
}
widgets = {
'student': forms.Select(attrs={'class': 'form-control form-element' }),
'fees': forms.Select(attrs={'class': 'form-control form-element' }),
'amount': forms.TextInput(attrs={'class': 'form-control form-element' }),
}
class FeeForm(forms.ModelForm):
class Meta:
model = Fees
fields = ['label', 'fee_value', 'fee_description']
labels = {
'label': 'Libellé',
'fee_value': 'Montant',
'fee_description': 'Description',
}
widgets = {
'label': forms.TextInput(attrs={'class': 'form-control form-element' }),
'fee_value': forms.TextInput(attrs={'class': 'form-control form-element' }),
'fee_description':forms.Textarea(attrs={'class': 'form-control admis-process-comment',
'required':False})
}
| [
"souleymanemoudou@gmail.com"
] | souleymanemoudou@gmail.com |
ccdf9c1f393e3f9a5c95bca4392d2f9bdfb53e88 | 30150c7f6ed7a10ac50eee3f40101bc3165ebf9e | /src/building/DistributedBuildingMgrAI.py | 3d4b808591e89497e5a2bc776e3e1ecfbec26d08 | [] | no_license | toontown-restoration-project/toontown | c2ad0d552cb9d5d3232ae6941e28f00c11ca3aa8 | 9bef6d9f823b2c12a176b33518eaa51ddbe3fd2f | refs/heads/master | 2022-12-23T19:46:16.697036 | 2020-10-02T20:17:09 | 2020-10-02T20:17:09 | 300,672,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,481 | py | """ DistributedBuildingMgrAI module: contains the DistributedBuildingMgrAI
class, the server side handler of all buildings in a neighborhood."""
# AI code should not import ShowBaseGlobal because it creates a graphics window
# Use AIBaseGlobal instead
# from ShowBaseGlobal import *
import os
from direct.task.Task import Task
import pickle
from otp.ai.AIBaseGlobal import *
from . import DistributedBuildingAI
from . import HQBuildingAI
from . import GagshopBuildingAI
from . import PetshopBuildingAI
from toontown.building.KartShopBuildingAI import KartShopBuildingAI
from toontown.building import DistributedAnimBuildingAI
#import DistributedDoorAI
from direct.directnotify import DirectNotifyGlobal
from toontown.hood import ZoneUtil
import time
import random
class DistributedBuildingMgrAI:
"""
DistributedBuildingMgrAI class: a server side object, keeps track of
all buildings within a single neighborhood (street), handles
converting them from good to bad, and hands out information about
buildings to whoever asks.
Landmark data will be saved to an AI Server local file.
*How landmark building info gets loaded:
load list from dna;
look for backup .buildings file;
if present:
load from backup buildings file;
#if buildings file is present:
# remove buildings file;
else:
load .buildings file;
compare dna list with saved list;
if they are different:
make reasonable matches for suit blocks;
create the building AI dictionary
*Saving building data:
check for backup buildings file;
if present:
remove buildings file;
else:
move buildings file to backup file;
write new buildings file;
remove backup buildings file;
"""
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBuildingMgrAI')
serverDatafolder = simbase.config.GetString('server-data-folder', "")
def __init__(self, air, branchID, dnaStore, trophyMgr):
"""
branchID: The street number. Such as 2200.
"""
self.branchID = branchID
self.canonicalBranchID = ZoneUtil.getCanonicalZoneId(branchID)
assert(self.debugPrint("__init__(air, branchID, dnaStore, trophyMgr)"))
self.air = air
self.__buildings = {}
self.dnaStore = dnaStore
self.trophyMgr = trophyMgr
self.shard = str(air.districtId)
self.backupExtension = '.bu'
self.findAllLandmarkBuildings()
self.doLaterTask = None
def cleanup(self):
taskMgr.remove(str(self.branchID)+'_delayed_save-timer')
for building in list(self.__buildings.values()):
building.cleanup()
self.__buildings = {}
def isValidBlockNumber(self, blockNumber):
"""return true if that block refers to a real block"""
assert(self.debugPrint("isValidBlockNumber(blockNumber="+str(blockNumber)+")"))
return blockNumber in self.__buildings
def delayedSaveTask(self, task):
assert(self.debugPrint("delayedSaveTask()"))
self.save()
self.doLaterTask=None
return Task.done
def isSuitBlock(self, blockNumber):
"""return true if that block is a suit block/building"""
assert(self.debugPrint("isSuitBlock(blockNumber="+str(blockNumber)+")"))
assert(blockNumber in self.__buildings)
return self.__buildings[blockNumber].isSuitBlock()
def getSuitBlocks(self):
assert(self.debugPrint("getSuitBlocks()"))
blocks=[]
for i in list(self.__buildings.values()):
if i.isSuitBlock():
blocks.append(i.getBlock()[0])
return blocks
def getEstablishedSuitBlocks(self):
assert(self.debugPrint("getEstablishedSuitBlocks()"))
blocks=[]
for i in list(self.__buildings.values()):
if i.isEstablishedSuitBlock():
blocks.append(i.getBlock()[0])
return blocks
def getToonBlocks(self):
assert(self.debugPrint("getToonBlocks()"))
blocks=[]
for i in list(self.__buildings.values()):
if isinstance(i, HQBuildingAI.HQBuildingAI):
continue
if not i.isSuitBlock():
blocks.append(i.getBlock()[0])
return blocks
def getBuildings(self):
return list(self.__buildings.values())
def getFrontDoorPoint(self, blockNumber):
"""get any associated path point for the specified building,
useful for suits to know where to go when exiting from a
building"""
assert(self.debugPrint("getFrontDoorPoint(blockNumber="+str(blockNumber)+")"))
assert(blockNumber in self.__buildings)
return self.__buildings[blockNumber].getFrontDoorPoint()
def getBuildingTrack(self, blockNumber):
"""get any associated path point for the specified building,
useful for suits to know where to go when exiting from a
building"""
assert(self.debugPrint("getBuildingTrack(blockNumber="+str(blockNumber)+")"))
assert(blockNumber in self.__buildings)
return self.__buildings[blockNumber].track
def getBuilding( self, blockNumber ):
assert(self.debugPrint("getBuilding(%s)" %(str(blockNumber),)))
assert(blockNumber in self.__buildings)
return self.__buildings[blockNumber]
def setFrontDoorPoint(self, blockNumber, point):
"""get any associated path point for the specified building,
useful for suits to know where to go when exiting from a
building"""
assert(self.debugPrint("setFrontDoorPoint(blockNumber="+str(blockNumber)
+", point="+str(point)+")"))
assert(blockNumber in self.__buildings)
return self.__buildings[blockNumber].setFrontDoorPoint(point)
def getDNABlockLists(self):
blocks=[]
hqBlocks=[]
gagshopBlocks=[]
petshopBlocks=[]
kartshopBlocks = []
animBldgBlocks = []
for i in range(self.dnaStore.getNumBlockNumbers()):
blockNumber = self.dnaStore.getBlockNumberAt(i)
buildingType = self.dnaStore.getBlockBuildingType(blockNumber)
if (buildingType == 'hq'):
hqBlocks.append(blockNumber)
elif (buildingType == 'gagshop'):
gagshopBlocks.append(blockNumber)
elif (buildingType == 'petshop'):
petshopBlocks.append(blockNumber)
elif( buildingType == 'kartshop' ):
kartshopBlocks.append( blockNumber )
elif( buildingType == 'animbldg' ):
animBldgBlocks.append( blockNumber )
else:
blocks.append(blockNumber)
return blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks, animBldgBlocks
def findAllLandmarkBuildings(self):
assert(self.debugPrint("findAllLandmarkBuildings()"))
# Load the saved buildings:
buildings=self.load()
# Create the distributed buildings:
blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks, animBldgBlocks = self.getDNABlockLists()
for block in blocks:
# Used saved data, if appropriate:
self.newBuilding(block, buildings.get(block, None))
for block in animBldgBlocks:
# Used saved data, if appropriate:
self.newAnimBuilding(block, buildings.get(block, None))
for block in hqBlocks:
self.newHQBuilding(block)
for block in gagshopBlocks:
self.newGagshopBuilding(block)
if simbase.wantPets:
for block in petshopBlocks:
self.newPetshopBuilding(block)
if( simbase.wantKarts ):
for block in kartshopBlocks:
self.newKartShopBuilding( block )
def newBuilding(self, blockNumber, blockData=None):
"""Create a new building and keep track of it."""
assert(self.debugPrint("newBuilding(blockNumber="+str(blockNumber)
+", blockData="+str(blockData)+")"))
assert(blockNumber not in self.__buildings)
building=DistributedBuildingAI.DistributedBuildingAI(
self.air, blockNumber, self.branchID, self.trophyMgr)
building.generateWithRequired(self.branchID)
if blockData:
building.track = blockData.get("track", "c")
building.difficulty = int(blockData.get("difficulty", 1))
building.numFloors = int(blockData.get("numFloors", 1))
building.numFloors = max(1, min(5, building.numFloors))
if not ZoneUtil.isWelcomeValley(building.zoneId):
building.updateSavedBy(blockData.get("savedBy"))
else:
self.notify.warning('we had a cog building in welcome valley %d' % building.zoneId)
building.becameSuitTime = blockData.get("becameSuitTime", time.time())
# Double check the state becuase we have seen the building
# saved out with other states (like waitForVictor). If we
# get one of these weird states, just make it a toon bldg
if blockData["state"] == "suit":
building.setState("suit")
elif blockData['state'] == 'cogdo':
if simbase.air.wantCogdominiums:
building.setState("cogdo")
else:
building.setState("toon")
else:
building.setState("toon")
self.__buildings[blockNumber] = building
return building
def newAnimBuilding(self, blockNumber, blockData=None):
"""Create a new building and keep track of it."""
assert(self.debugPrint("newBuilding(blockNumber="+str(blockNumber)
+", blockData="+str(blockData)+")"))
assert(blockNumber not in self.__buildings)
building=DistributedAnimBuildingAI.DistributedAnimBuildingAI(
self.air, blockNumber, self.branchID, self.trophyMgr)
building.generateWithRequired(self.branchID)
if blockData:
building.track = blockData.get("track", "c")
building.difficulty = int(blockData.get("difficulty", 1))
building.numFloors = int(blockData.get("numFloors", 1))
if not ZoneUtil.isWelcomeValley(building.zoneId):
building.updateSavedBy(blockData.get("savedBy"))
else:
self.notify.warning('we had a cog building in welcome valley %d' % building.zoneId)
building.becameSuitTime = blockData.get("becameSuitTime", time.time())
# Double check the state becuase we have seen the building
# saved out with other states (like waitForVictor). If we
# get one of these weird states, just make it a toon bldg
if blockData["state"] == "suit":
building.setState("suit")
else:
building.setState("toon")
else:
building.setState("toon")
self.__buildings[blockNumber] = building
return building
def newHQBuilding(self, blockNumber):
"""Create a new HQ building and keep track of it."""
assert(blockNumber not in self.__buildings)
dnaStore = self.air.dnaStoreMap[self.canonicalBranchID]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchID)
interiorZoneId = (self.branchID-self.branchID%100)+500+blockNumber
assert(self.debugPrint("newHQBuilding(blockNumber=%s exteriorZoneId=%s interiorZoneId=%s" %
(blockNumber, exteriorZoneId, interiorZoneId)))
building=HQBuildingAI.HQBuildingAI(self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newGagshopBuilding(self, blockNumber):
"""Create a new Gagshop building and keep track of it."""
assert(self.debugPrint("newGagshopBuilding(blockNumber="+str(blockNumber)+")"))
assert(blockNumber not in self.__buildings)
dnaStore = self.air.dnaStoreMap[self.canonicalBranchID]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchID)
interiorZoneId = (self.branchID-self.branchID%100)+500+blockNumber
building=GagshopBuildingAI.GagshopBuildingAI(self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newPetshopBuilding(self, blockNumber):
"""Create a new Petshop building and keep track of it."""
assert(self.debugPrint("newPetshopBuilding(blockNumber="+str(blockNumber)+")"))
assert(blockNumber not in self.__buildings)
dnaStore = self.air.dnaStoreMap[self.canonicalBranchID]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchID)
interiorZoneId = (self.branchID-self.branchID%100)+500+blockNumber
building=PetshopBuildingAI.PetshopBuildingAI(self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newKartShopBuilding( self, blockNumber ):
"""
Purpose: The newKartShopBuilding Method creates a new KartShop
building and keeps track of it.
Params: blockNumber - block that the shop is on.
Return: None
"""
assert( self.debugPrint( "newKartShopBuilding(blockNumber=" + str( blockNumber ) + ")" ) )
assert( blockNumber not in self.__buildings )
dnaStore = self.air.dnaStoreMap[ self.canonicalBranchID ]
# Retrieve the Exterior and Interior ZoneIds
exteriorZoneId = dnaStore.getZoneFromBlockNumber( blockNumber )
exteriorZoneId = ZoneUtil.getTrueZoneId( exteriorZoneId, self.branchID )
interiorZoneId = ( self.branchID - self.branchID%100 ) + 500 + blockNumber
building = KartShopBuildingAI( self.air, exteriorZoneId, interiorZoneId, blockNumber )
self.__buildings[ blockNumber ] = building
return building
def getFileName(self):
"""Figure out the path to the saved state"""
f = "%s%s_%d.buildings" % (self.serverDatafolder, self.shard, self.branchID)
assert(self.debugPrint("getFileName() returning \""+str(f)+"\""))
return f
def saveTo(self, file, block=None):
"""Save data to specified file"""
assert(self.debugPrint("saveTo(file="+str(file)+", block="+str(block)+")"))
if block:
# Save just this one block to the file:
pickleData=block.getPickleData()
pickle.dump(pickleData, file)
else:
# Save them all:
for i in list(self.__buildings.values()):
# HQs do not need to be saved
if isinstance(i, HQBuildingAI.HQBuildingAI):
continue
pickleData=i.getPickleData()
pickle.dump(pickleData, file)
def fastSave(self, block):
"""Save data to default location"""
return
# This code has not been tested or connected. If the normal save takes
# too long on the AI server, this fastSave should be considered.
assert(0)
assert(self.debugPrint("fastSave(block="+str(block)+")"))
try:
fileName=self.getFileName()+'.delta'
working=fileName+'.temp'
# Change the name to flag the work in progress:
if os.path.exists(working):
os.remove(working)
os.rename(fileName, working)
file=open(working, 'w')
file.seek(0, 2)
self.saveTo(file, block)
file.close()
# Change the name to flag the work complete:
os.rename(working, fileName)
except IOError:
self.notify.error(str(sys.exc_info()[1]))
# Even if it's just the rename that failed, we don't want to
# clobber the prior file.
def save(self):
"""Save data to default location"""
assert(self.debugPrint("save()"))
try:
fileName=self.getFileName()
backup=fileName+self.backupExtension
# Move current file as the backup file:
if os.path.exists(fileName):
os.rename(fileName, backup)
file=open(fileName, 'w')
file.seek(0)
self.saveTo(file)
file.close()
if os.path.exists(backup):
os.remove(backup)
except EnvironmentError:
self.notify.warning(str(sys.exc_info()[1]))
# Even if it's just the rename that failed, we don't want to
# clobber the prior file.
def loadFrom(self, file):
"""Load data from specified file"""
assert(self.debugPrint("loadFrom(file="+str(file)+")"))
blocks={}
try:
while 1:
pickleData=pickle.load(file)
blocks[int(pickleData['block'])]=pickleData
except EOFError:
pass
return blocks
def load(self):
"""Load data from default location"""
assert(self.debugPrint("load()"))
fileName=self.getFileName()
try:
# Try to open the backup file:
file=open(fileName+self.backupExtension, 'r')
# Remove the (assumed) broken file:
if os.path.exists(fileName):
os.remove(fileName)
except IOError:
# OK, there's no backup file, good.
try:
# Open the real file:
file=open(fileName, 'r')
except IOError:
# OK, there's no file. Start new list:
return {}
file.seek(0)
blocks=self.loadFrom(file)
file.close()
return blocks
if __debug__:
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug(
str(self.__dict__.get('branchID', '?'))+' '+message)
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
9b4423958aa920b68ecdc3b7b0b67fddf60b8c27 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /ABC/abc101-abc150/abc142/a.py | 5e40cee36eb2e3dbbc38c7b5b5e18aa6317544d4 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 165 | py | # -*- coding: utf-8 -*-
def main():
from math import ceil
n = int(input())
print(ceil(n / 2) / n)
if __name__ == '__main__':
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
6808d2b19dcde91927041394b1afc5ea14c5e750 | a1a43879a2da109d9fe8d9a75f4fda73f0d7166b | /api/tests_v2/compare.py | 867fb572fcc46f017e8682e5674ec51fc82d49ca | [] | no_license | PaddlePaddle/benchmark | a3ed62841598d079529c7440367385fc883835aa | f0e0a303e9af29abb2e86e8918c102b152a37883 | refs/heads/master | 2023-09-01T13:11:09.892877 | 2023-08-21T09:32:49 | 2023-08-21T09:32:49 | 173,032,424 | 78 | 352 | null | 2023-09-14T05:13:08 | 2019-02-28T03:14:16 | Python | UTF-8 | Python | false | false | 1,874 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
class CompareConfig(APIConfig):
def __init__(self):
super(CompareConfig, self).__init__('compare')
self.api_name = 'less_than'
self.api_list = {
'less_than': 'less',
'less_equal': 'less_equal',
'not_equal': 'not_equal',
'greater_than': 'greater',
'greater_equal': 'greater_equal',
'equal': 'equal'
}
class PDCompare(PaddleAPIBenchmarkBase):
def build_program(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = self.layers(config.api_name, x=x, y=y)
self.feed_vars = [x, y]
self.fetch_vars = [result]
class TFCompare(TensorflowAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = self.layers(config.api_name, x=x, y=y)
self.feed_list = [x, y]
self.fetch_list = [result]
if __name__ == '__main__':
test_main(PDCompare(), TFCompare(), config=CompareConfig())
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
5ff1f136a4a394975d0d1989cb5cf7d296f32655 | 3bf0bdebf785063ce1a721d4a83750ba0b5033df | /src/sentry/web/frontend/remove_project.py | 985d0a3a4168278f42470b58ee4dbe6b15abec9a | [
"BSD-2-Clause"
] | permissive | TaurusTiger/sentry | cf932d3fbac81673157ef5f483bbb3daf6a664f3 | dca33172b70d0cf79a56f751543eea364ce92ee6 | refs/heads/master | 2021-01-21T19:13:43.098303 | 2015-10-10T00:41:24 | 2015-10-10T00:41:24 | 43,991,907 | 1 | 0 | null | 2015-10-10T03:19:34 | 2015-10-10T03:19:33 | null | UTF-8 | Python | false | false | 1,884 | py | from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry.api import client
from sentry.models import OrganizationMemberType
from sentry.permissions import can_remove_project
from sentry.web.frontend.base import ProjectView
class RemoveProjectForm(forms.Form):
pass
class RemoveProjectView(ProjectView):
required_access = OrganizationMemberType.OWNER
sudo_required = True
def get_form(self, request):
if request.method == 'POST':
return RemoveProjectForm(request.POST)
return RemoveProjectForm()
def get(self, request, organization, team, project):
if not can_remove_project(request.user, project):
return HttpResponseRedirect(reverse('sentry'))
form = self.get_form(request)
context = {
'form': form,
}
return self.respond('sentry/projects/remove.html', context)
def post(self, request, organization, team, project):
if not can_remove_project(request.user, project):
return HttpResponseRedirect(reverse('sentry'))
form = self.get_form(request)
if form.is_valid():
client.delete('/projects/{}/{}/'.format(organization.slug, project.slug),
request.user, is_sudo=True)
messages.add_message(
request, messages.SUCCESS,
_(u'The project %r was scheduled for deletion.') % (project.name.encode('utf-8'),))
return HttpResponseRedirect(reverse('sentry-organization-home', args=[team.organization.slug]))
context = {
'form': form,
}
return self.respond('sentry/projects/remove.html', context)
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
66317284cc07a9785b1fa7a0ff525d864ac27676 | e51b99514bd9b12c7cde4128549aa0206e0391f3 | /24 swapPairs.py | c571fe8d4ecf91d4d33a5163b3d27c4323825f6d | [] | no_license | ABenxj/leetcode | 5f65d2a90f79a32c8d9387bb6c4a655061d004cd | f2c162654a83c51495ebd161f42a1d0b69caf72d | refs/heads/main | 2023-05-14T11:55:28.180609 | 2021-06-08T01:11:54 | 2021-06-08T01:11:54 | 347,963,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | #!/usr/bin/env pyhton
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 , Inc. All Rights Reserved
#
"""
Authors: jufei
Date: 2021/4/7 4:19 PM
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
"""
准备好三个指针,交换顺序,并依次向后遍历
:param head:
:return:
"""
if not head or not head.next:
return head
ans = ListNode(0)
ans.next = head
pre, left, right = ans, head, head.next
while True:
left.next = right.next
right.next = left
pre.next = right
if not left.next or not left.next.next:
break
pre, left, right = left, left.next, left.next.next
return ans.next
| [
"jufei@wecash.net"
] | jufei@wecash.net |
90501e32e6ea9c14c125b254dcf091e8d125b049 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/coghq/CashbotMintLavaRoomFoyer_Battle00.py | 060df18a0bba15f595366b19d1077ab11dca586c | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | # 2013.08.22 22:18:15 Pacific Daylight Time
# Embedded file name: toontown.coghq.CashbotMintLavaRoomFoyer_Battle00
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE18a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10004: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(23.908908844, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'cellId': 0,
'radius': 10},
10002: {'type': 'model',
'name': 'crates',
'comment': '',
'parentEntId': 10001,
'pos': Point3(17.3283443451, 20.1608715057, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},
10003: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10001,
'pos': Point3(-14.04317379, 20.9443073273, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_E.bam'},
10006: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(-3.16324114799, -0.608929097652, 5.57751512527),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},
10000: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': 1},
10001: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10005: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Point3(-90.0, 0.0, 0.0),
'scale': 1}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\coghq\CashbotMintLavaRoomFoyer_Battle00.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:18:15 Pacific Daylight Time
| [
"anonymoustoontown@gmail.com"
] | anonymoustoontown@gmail.com |
5b01280a33dbeeca6cee9f2a38e5def7526cefc2 | 3b53aa80a584416a9c8e0de4efb8ef682012bf9e | /0x11-python-network_1/10-my_github.py | 11ea426758188bcc5229f9716f56b1d970c29f2a | [] | no_license | Diegokernel/holbertonschool-higher_level_programming | c273c140b1761046f1a7db80a135d87115c34a9b | 7ebd07e947d6c9a9173699d117741eae38dfcdbe | refs/heads/master | 2020-05-18T01:31:17.582237 | 2019-10-04T04:13:23 | 2019-10-04T04:13:23 | 184,092,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | #!/usr/bin/python3
"""takes your Github credentials (username and password) and uses the Github API to display your id"""
import requests
import sys
if __name__ == "__main__":
page = "https://api.github.com/user"
q = (sys.argv[1], sys.argv[2])
req = requests.get(page, auth=q)
print(req.json().get("id"))
| [
"777@holbertonschool.com"
] | 777@holbertonschool.com |
bd0f4f29e65e2be6d51c4e9d8be129c9ac840a5b | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Withings/Measure/GetActivityMetrics.py | 9847037ed05b8219cb3ec705519d9d2a852c6162 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetActivityMetrics
# Retrieves activity metrics for the specified user.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetActivityMetrics(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetActivityMetrics Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Withings/Measure/GetActivityMetrics')
def new_input_set(self):
return GetActivityMetricsInputSet()
def _make_result_set(self, result, path):
return GetActivityMetricsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetActivityMetricsChoreographyExecution(session, exec_id, path)
class GetActivityMetricsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetActivityMetrics
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
InputSet._set_input(self, 'AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Withings.)
"""
InputSet._set_input(self, 'ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Withings.)
"""
InputSet._set_input(self, 'ConsumerSecret', value)
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((required, date) The date for the log in YYYY-MM-DD format.)
"""
InputSet._set_input(self, 'Date', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((required, string) The ID of the user to retrieve activity metrics for.)
"""
InputSet._set_input(self, 'UserID', value)
class GetActivityMetricsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetActivityMetrics Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Withings.)
"""
return self._output.get('Response', None)
class GetActivityMetricsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetActivityMetricsResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
95f26936b10e68352c2da05ab0c55e794949d63f | 1624fd1db522c3d8b7533418cec09793ca6f80a3 | /setup.py | 2dc7d322bcf4973fbaedb0117b1d89744453ce88 | [
"MIT"
] | permissive | yuwin/UnbalancedDataset | 7c3444f1f3b82a0c0b941c514096c39a330eb4e7 | e97ea2f23e9c06d44c6cbc14145db87f104f61a7 | refs/heads/master | 2021-01-18T13:04:18.082366 | 2016-06-27T23:51:38 | 2016-06-27T23:51:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | #! /usr/bin/env python
"""Toolbox for imbalanced dataset in machine learning."""
import sys
import os
import codecs
from setuptools import setup, find_packages
def load_version():
"""Executes imblearn/version.py in a globals dictionary and
return it.
"""
# load all vars into globals, otherwise
# the later function call using global vars doesn't work.
globals_dict = {}
with codecs.open(os.path.join('imblearn', 'version.py'),
encoding='utf-8-sig') as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
descr = """Toolbox for imbalanced dataset in machine learning."""
_VERSION_GLOBALS = load_version()
DISTNAME = 'imbalanced-learn'
DESCRIPTION = 'Toolbox for imbalanced dataset in machine learning.'
LONG_DESCRIPTION = descr
MAINTAINER = 'Fernando Nogueira, Guillaume Lemaitre'
MAINTAINER_EMAIL = 'fmfnogueira@gmail.com, g.lemaitre58@gmail.com'
URL = 'https://github.com/fmfn/UnbalancedDataset'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_imbalanced_dataset_installing=True)
install_requires = \
['%s>=%s' % (mod, meta['min_version'])
for mod, meta in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']
if not meta['required_at_installation']]
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
install_requires=install_requires,)
| [
"glemaitre@visor.udg.edu"
] | glemaitre@visor.udg.edu |
76ebde0afed83ac4627c0e5b5ade1bb9588d1735 | 47f4e3aabb6dcb0f9a48c8a5634eac1523b71b2c | /edit_being/qyaddons/ct_pos_ticket/__manifest__.py | 75d13c59ac772e7fda752e19009424c2c23dd1b7 | [] | no_license | marvin981973/odoo-2 | 485b7815b639da17400f38ab2200fb6956486451 | f45a562b1bd962697f096e7f7bc57b131b3e11f3 | refs/heads/master | 2020-06-26T06:22:16.520775 | 2018-03-11T13:26:04 | 2018-03-11T13:26:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
{
'name': 'POS小票',
'summary': '修改打印小票格式',
'description': """
修改POS内部连接小票打印机打印出来的内容格式
""",
'category': 'other',
'version': '1.0',
'author': '今晨科技|企通软件',
'website': 'http://www.168nz.cn/',
'depends': ['base', 'web','point_of_sale'],
'data': [
'views/template.xml',
],
'qweb': [
'static/src/xml/*.xml',
],
'installable': True,
'application': True,
} | [
"guwenfengvip@163.com"
] | guwenfengvip@163.com |
6501cb660574bc51eb7bcf609abd69325d478992 | a96ce59aa2c6c40388b08f9586aec3ee57482048 | /backend/proud_pond_26824/wsgi.py | 23df67ba826db77b421127be98d558ad0ef1f3cc | [] | no_license | crowdbotics-apps/proud-pond-26824 | a32c51aa6b7876b0b541646c163c9557cdd586ad | d9a0b2daae92d0da785f88589fc9c00b7d710542 | refs/heads/master | 2023-04-23T06:06:31.081083 | 2021-05-17T21:40:47 | 2021-05-17T21:40:47 | 368,328,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for proud_pond_26824 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proud_pond_26824.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
30e40d8e872dd61da615410d1d1d9f51cb8e0986 | 29fb2eb3b9bb21b529e814da53518fab2958693a | /bayesian_treatment/10_table_Electron_table_Comparison.py | a2d7ec3151e13a8c90fa98b2d96e424c973e65e7 | [] | no_license | Vital-Fernandez/thesis_pipeline | acca734b1a2ce11b0bee5bd41fab534022ea295e | 1253e2ed94e0f502a16cae6b88f84b633d0f16c2 | refs/heads/master | 2022-05-31T10:15:47.241645 | 2021-05-18T17:43:44 | 2021-05-18T17:43:44 | 90,319,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,892 | py | from dazer_methods import Dazer
from lib.CodeTools.sigfig import round_sig
from uncertainties import unumpy
from collections import OrderedDict
from pylatex import Package, NoEscape
from numpy import isnan
from pandas import isnull
import pandas as pd
import numpy as np
import uncertainties as un
from uncertainties.umath import pow as umath_pow, log10 as umath_log10, exp as umath_exp, isnan as un_isnan
def colorChooser(ObsRatio, TheRatio):
if (TheRatio * 0.95 < ObsRatio < TheRatio * 1.05):
color = 'ForestGreen' # 'green'#
elif (TheRatio * 0.90 < ObsRatio < TheRatio * 1.10):
color = 'YellowOrange' # 'yellow'#
else:
color = 'BrickRed'
return color
#Load observational data
bayes_catalogue_df_address = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_BayesianResults.txt'
bayes_catalogue_df = pd.read_csv(bayes_catalogue_df_address, delim_whitespace=True, header=0, index_col=0)
#Define data to load
# Import library object
dz = Dazer()
dz.load_elements()
# Load observational data
catalogue_dict = dz.import_catalogue()
catalogue_df = dz.load_excel_DF('/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx')
AbundancesFileExtension = '_' + catalogue_dict['Datatype'] + '_linesLog_emission_2nd.txt'
dz.quick_indexing(catalogue_df)
# Reddening properties
R_v = 3.4
red_curve = 'G03_average'
cHbeta_type = 'cHbeta_emis'
# Define data to load
ext_data = '_emis2nd'
ext_data_bayes = ''
pdf_address = '/home/vital/Dropbox/Astrophysics/Thesis/tables/objProperties_Preamble'
# Headers
properties_list = ['neSII', 'TeSIII', 'TeOIII']
properties_list = map((lambda x: x + ext_data), properties_list)
properties_list_bayes = ['neSII', 'TeSIII']
headers_format = ['HII Galaxy', r'$\frac{[OIII]\lambda5007\AA}{[OIII]\lambda4959\AA}$', r'$\frac{[SIII]\lambda9531\AA}{[SIII]\lambda9069\AA}$']
headers_format += [r'$n_{e}[SII](cm^{-3})$', r'$T_{e}[SIII](K)$', r'$T_{e}[OIII](K)$']
headers_format += ['$n_{e}(cm^{-3})$', r'$T_{low}(K)$', r'$T_{high}(K)$']
# Set the pdf format
dz.create_pdfDoc(pdf_address, pdf_type='table')
dz.pdf_insert_table(headers_format)
for objName in catalogue_df.loc[dz.idx_include].index:
ouput_folder = '{}{}/'.format(catalogue_dict['Obj_Folder'], objName)
lineslog_address = '{objfolder}{codeName}{lineslog_extension}'.format(objfolder=ouput_folder, codeName=objName, lineslog_extension=AbundancesFileExtension)
# Load lines frame
lineslog_frame = dz.load_lineslog_frame(lineslog_address)
# Perform the reddening correction
cHbeta = catalogue_df.loc[objName, cHbeta_type]
dz.deredden_lines(lineslog_frame, reddening_curve=red_curve, cHbeta=cHbeta, R_v=R_v)
# Sulfur ratios
if set(lineslog_frame.index) >= set(['S3_9069A', 'S3_9531A']):
s3_ratio = lineslog_frame.loc['S3_9531A'].line_Int / lineslog_frame.loc['S3_9069A'].line_Int
s3_color = colorChooser(s3_ratio.nominal_value, dz.S3_ratio)
s3_entry = r'\textcolor{' + s3_color + '}{' + dz.format_for_table(s3_ratio, rounddig=3) + '}'
else:
s3_entry = '-'
# Oxygen ratios
if set(lineslog_frame.index) >= set(['O3_4959A', 'O3_5007A']):
O3_ratio = lineslog_frame.loc['O3_5007A'].line_Int / lineslog_frame.loc['O3_4959A'].line_Int
O3_color = colorChooser(O3_ratio.nominal_value, dz.O3_5000_ratio)
O3_entry = r'\textcolor{' + O3_color + '}{' + dz.format_for_table(O3_ratio, rounddig=3) + '}'
else:
O3_entry = '-'
# Fill the table
if (catalogue_df.loc[objName].T_low == 'TeSIII') and (catalogue_df.loc[objName].T_high == 'TeOIII'):
exponent = ''
elif (catalogue_df.loc[objName].T_low != 'TeSIII'):
exponent = 'O'
else:
exponent = 'S'
# Add the Bayesian data
bayesCodeName = '{}'.format(bayes_catalogue_df.loc[objName].quick_index)
bayes_values = []
print '------', bayesCodeName, objName
if bayesCodeName not in ['SHOC588', 'SHOC592', 'SHOC036', 'SHOC575', 'SHOC579', 'SHOC220']:
objData = bayes_catalogue_df.loc[objName]
for param in properties_list_bayes:
param_value = objData[param]
param_err = objData[param + '_err']
param_un = un.ufloat(param_value, param_err)
if np.isnan(param_un.nominal_value):
param_un = np.nan
bayes_values.append(param_un)
param_un = (1.0807 * param_un / 10000.0 - 0.0846) * 10000.0
bayes_values.append(param_un)
else:
bayes_values = ['-', '-', '-']
entry_name = '{codename}$^{{{elements}}}$'.format(codename=catalogue_df.loc[objName].quick_index, elements=exponent)
T_low_entry = r'$T_{e}[SIII]$' if catalogue_df.loc[objName].T_low == 'TeSIII' else r'$T_{e}[SIII] eq.16$'
T_high_entry = r'$T_{e}[OIII]$' if catalogue_df.loc[objName].T_high == 'TeOIII' else r'$T_{e}[OIII] eq.16$'
row = [entry_name] + [O3_entry] + [s3_entry] + list(catalogue_df.loc[objName, properties_list].values) + bayes_values
dz.addTableRow(row, last_row=False if catalogue_df.index[-1] != objName else True, rounddig=3)
dz.generate_pdf(clean_tex=False)
# dz.generate_pdf(output_address=pdf_address)
print 'Table generated'
# from dazer_methods import Dazer
# from uncertainties import unumpy
# from collections import OrderedDict
# from pylatex import Package, NoEscape
# from numpy import isnan
# from pandas import isnull
# import pandas as pd
# import numpy as np
# import uncertainties as un
# from uncertainties.umath import pow as umath_pow, log10 as umath_log10, exp as umath_exp, isnan as un_isnan
#
# dz = Dazer()
#
# #Load observational data
# bayes_catalogue_df_address = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_BayesianResults.txt'
# bayes_catalogue_df = pd.read_csv(bayes_catalogue_df_address, delim_whitespace=True, header=0, index_col=0)
#
# #Define data to load
# ext_data = ''
# pdf_address = '/home/vital/Dropbox/Astrophysics/Thesis/tables/bayes_AbundancesTable'
#
# #Headers
# headers_dic = OrderedDict()
# headers_dic['HeI_HI'] = r'$\nicefrac{He}{H}$'
# headers_dic['Ymass_O'] = r'$Y_{\left(\nicefrac{O}{H}\right)}$'
# headers_dic['Ymass_S'] = r'$Y_{\left(\nicefrac{S}{H}\right)}$'
# headers_dic['OI_HI'] = r'$12 + log\left(\nicefrac{O}{H}\right)$'
# headers_dic['NI_HI'] = r'$12 + log\left(\nicefrac{N}{H}\right)$'
# headers_dic['SI_HI'] = r'$12 + log\left(\nicefrac{S}{H}\right)$'
#
# properties_list = map(( lambda x: x + ext_data), headers_dic.keys())
# headers_format = ['HII Galaxy'] + headers_dic.values()
#
# # Create a new list for the different entries
# metals_list = properties_list[:]
#
# del metals_list[metals_list.index('HeI_HI' + ext_data)]
# del metals_list[metals_list.index('Ymass_O' + ext_data)]
# del metals_list[metals_list.index('Ymass_S' + ext_data)]
#
# #Set the pdf format
# dz.pdf_insert_table(headers_format)
#
# print properties_list
#
# for objName in bayes_catalogue_df.index:
#
# entry_name = '{}'.format(bayes_catalogue_df.loc[objName].quick_index)
#
# if entry_name not in ['SHOC588', 'SHOC592', 'SHOC036', 'SHOC575', 'SHOC579', 'SHOC220']:
#
# objData = bayes_catalogue_df.loc[objName]
# row = [entry_name]
#
# for param in properties_list:
# param_value = objData[param]
# param_err = objData[param + '_err']
# param_un = un.ufloat(param_value, param_err)
#
# if param not in ['HeI_HI', 'Ymass_O', 'Ymass_S']:
# param_un = 12 + umath_log10(param_un)
#
# if np.isnan(param_un.nominal_value):
# param_un = np.nan
#
# row.append(param_un)
#
# dz.addTableRow(row, last_row = False if bayes_catalogue_df.index[-1] != objName else True, rounddig=3, rounddig_er=1)
#
# dz.generate_pdf()
# #dz.generate_pdf(output_address=pdf_address)
| [
"vital.fernandez@gmail.com"
] | vital.fernandez@gmail.com |
d543b03fd232f81b04d4ea29f1993ad04ba26c94 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/automation/v20180115/outputs.py | 0a8b07f580f75db3fc25b8e64b9658b630192036 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'DscConfigurationAssociationPropertyResponse',
]
@pulumi.output_type
class DscConfigurationAssociationPropertyResponse(dict):
"""
The Dsc configuration property associated with the entity.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
The Dsc configuration property associated with the entity.
:param str name: Gets or sets the name of the Dsc configuration.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets or sets the name of the Dsc configuration.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
62b22fda6d4ef03350bbf3914df64d4c0dc25f95 | 03d68ceacf35455d5cd692411940400bcf7d8541 | /tools/coded/ipconvert.py | 8c7976dcd974585fe6525b7feb923f28afa0f24c | [] | no_license | j4ckzh0u/ctf-tools-1 | 569822fe102e54084ff26916760205598ab9db3f | 119a5b4b73a032d49740ab371055e9f2400cb79a | refs/heads/master | 2021-05-24T12:49:44.102597 | 2020-03-31T06:48:27 | 2020-03-31T06:48:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | #coding=utf-8
#version 1.2
import sys
def tab_to_8(binip):
if len(binip)>8:
raise Exception('lenth error')
return '0'*(8-len(binip))+binip
def dot_to_bin(ip):
ip=str(ip)
if ip.count('.')!=3:
return False
ip=ip.split('.')
return ''.join([tab_to_8(str(bin(int(i,base=10)))[2:]) for i in ip])
def int_to_dot(ip):
ip=bin(ip)[2:]
if len(ip)>32:
return False
ip='0'*(32-len(ip))+ip
return '.'.join([str(int(ip[i*8:(i+1)*8],base=2)) for i in range(4)])
def dot_to_oct(dot_ip):
ip=dot_ip.split('.')
if len(ip)!=4:
return False
return '0'+'.'.join([oct(int(i))[2:] for i in ip])
def main(ip):
out='dot: {}\nbin: {}\nhex: {}\nint: {}\noct: {}'
if ip=='exit()':
exit()
elif ip[:2]=='0b' or ip[:2]=='0x' or ip.find('.')==-1:#二进制输入||十六进制输入||十进制输入
if ip[:2]=='0b':
ip=int(ip,base=2)
elif ip[:2]=='0x':
ip=int(ip,base=16)
else:
ip=int(ip)
dot_ip=int_to_dot(ip)
if dot_ip==False:
print('ip format error')
return
bin_ip=dot_to_bin(dot_ip)
else:
bin_ip=dot_to_bin(ip)
if bin_ip==False:#格式不正确
print('ip format error')
return
dot_ip=ip
ip=int(bin_ip,base=2)
#输出
print(out.format(dot_ip,bin_ip,hex(int(bin_ip,base=2))[2:],ip,dot_to_oct(dot_ip)))
if len(sys.argv)==2:
ip=sys.argv[1]
print()
main(ip)
exit()
print('ps:输入二进制ip需要以0b开头,十六进制以0x开头')
if __name__ == "__main__":
while True:
ip=input('input ip:')
main(ip) | [
"yun1067530461@gmail.com"
] | yun1067530461@gmail.com |
6d82dde142112a41c6c2e0432c936797e40d7fb7 | 79e19819aec49b500825f82a7de149eb6a0ba81d | /leetcode/104.py | d15b1378939a65e4139d4810208f43daccfa2bcb | [] | no_license | seoyeonhwng/algorithm | 635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26 | 90406ee75de69996e666ea505ff5d9045c2ad941 | refs/heads/master | 2023-05-03T16:51:48.454619 | 2021-05-26T00:54:40 | 2021-05-26T00:54:40 | 297,548,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if not root:
return 0
queue = collections.deque([root])
depth = 0
while queue:
depth += 1
for _ in range(len(queue)):
v = queue.popleft()
if v.left:
queue.append(v.left)
if v.right:
queue.append(v.right)
return depth
| [
"seoyeon@nowbusking.com"
] | seoyeon@nowbusking.com |
79d951e2625eb26324e7cfc5ffbd419f507c265c | e7aaccb209ed344f719907fb1995d1c109771084 | /pipeline/make_daily_timeseries.py | f27fee47485b9d40280e539d573cce1dbe116ce0 | [] | no_license | ua-snap/seaice_noaa_indicators | 27ab4313b110be48666075310b7d5d6d4037b88a | 174353a2dd9bf2fef681cc52dce501a44ad1db59 | refs/heads/master | 2022-07-10T03:31:22.183114 | 2019-09-21T17:31:08 | 2019-09-21T17:31:08 | 132,519,899 | 2 | 0 | null | 2022-06-21T22:48:07 | 2018-05-07T21:47:47 | Python | UTF-8 | Python | false | false | 11,987 | py | # # # # # # # # # # # # # # # # # # # #
# # make a full daily array with and
# # interpolate missing dates linearly
# # 2D spatial / 1D profile hann smoothed
# #
# # Author: Michael Lindgren (malindgren@alaska.edu)
# # # # # # # # # # # # # # # # # # # #
def nan_helper( y ):
'''
Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
'''
return np.isnan( y ), lambda z: z.nonzero()[0]
def interp_1d_along_axis( y ):
''' interpolate across 1D timeslices of a 3D array. '''
nans, x = nan_helper( y )
y[nans] = np.interp( x(nans), x(~nans), y[~nans] )
return y
def interpolate(x):
if not np.isnan(x).all():
index = np.arange(len(x))
notnan = np.logical_not(np.isnan(x))
return np.interp(index, index[notnan], x[notnan])
def make_datetimes( timestr ):
# timestr = '19790703'
year = int(timestr[:4])
month = int(timestr[4:6])
day = int(timestr[6:])
return dt.datetime(year,month,day)
def open_raster( fn ):
'''
open a raster using `rasterio` and return
the `numpy` array representing band 1
'''
with rasterio.open( fn ) as rst:
arr = rst.read(1)
return arr
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
User must give either:
fn = path to the rasterio readable raster
OR
meta & numpy ndarray (usually obtained by rasterio.open(fn).read( 1 ))
where:
meta = a rasterio style metadata dictionary ( rasterio.open(fn).meta )
numpy_array = 2d numpy array representing a raster described by the meta
input_crs = rasterio style proj4 dict, example: { 'init':'epsg:3338' }
to_latlong = boolean. If True all coordinates will be returned as EPSG:4326
If False all coordinates will be returned in input_crs
returns:
meshgrid of longitudes and latitudes
borrowed from here: https://gis.stackexchange.com/a/129857
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.transform # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'transform' ]
else:
p1 = None
T0 = meta[ 'transform' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: ( c, r ) * T1
# All eastings and northings -- this is much faster than np.apply_along_axis
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if to_latlong == False:
return eastings, northings
elif (to_latlong == True) & (input_crs != None):
# Project all longitudes, latitudes
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
else:
BaseException( 'cant reproject to latlong without an input_crs' )
def make_xarray_dset( arr, times, rasterio_meta_dict ):
meta = rasterio_meta_dict
xc,yc = coordinates(meta=meta, numpy_array=arr[1,...])
attrs = {'proj4string':'EPSG:3411', 'proj_name':'NSIDC North Pole Stereographic',
'affine_transform': str(list(meta['transform']))}
ds = xr.Dataset({'sic':(['time','yc', 'xc'], arr)},
coords={'xc': ('xc', xc[0,]),
'yc': ('yc', yc[:,0]),
'time':times }, attrs=attrs )
return ds
def mean_filter_2D( arr, footprint ):
'''
2D mean filter that overlooks np.nan and -9999 masks
while averaging across the footprint window.
input is a 2D array and footprint
output is a smoothed 2D array
'''
from scipy.ndimage import generic_filter
indmask = np.where(arr == -9999)
indnodata = np.where(np.isnan(arr) == True)
arr[indmask] = np.nan # make mask nodata
out = generic_filter( arr, np.nanmean, footprint=footprint, origin=0 )
out[indmask] = -9999 # mask
out[indnodata] = np.nan # nodata
return out
def run_meanfilter(x):
return mean_filter_2D( *x )
def hanning_smooth( x ):
''' smoothing to mimick the smoothing from meetings with Mark/Hajo'''
from scipy import signal
win = np.array([0.25,0.5,0.25])
return signal.convolve(x, win, mode='same') / sum(win)
def stack_rasters( files, ncpus=32 ):
pool = mp.Pool( ncpus )
arr = np.array( pool.map( open_raster, files ) )
pool.close()
pool.join()
return arr
# # # MULTIPROCESSING APPROACHES TO GENERIC FILTER BUT DONT WORK DUE TO SOME OpenBLAS ISSUE.
# def spatial_smooth( arr, footprint, ncpus=32 ):
# arr_list = [a.copy() for a in arr] # unpack 3d (time,rows,cols) array to 2d list
# f = partial( mean_filter_2D, footprint=footprint )
# pool = mp.Pool( ncpus )
# out_arr = pool.map( f, arr_list )
# pool.close()
# pool.join()
# return np.array(out_arr)
# def spatial_smooth( arr, size=3, ncpus=32 ):
# f = partial( mean_filter_2D, size=size )
# pool = mp.Pool( ncpus )
# out_arr = pool.map( f, [a for a in arr] )
# pool.close()
# pool.join()
# return np.array(out_arr)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def make_output_dirs( dirname ):
if not os.path.exists( dirname ):
_ = os.makedirs( dirname )
return dirname
if __name__ == '__main__':
import os, rasterio
import datetime as dt
import pandas as pd
import numpy as np
import xarray as xr
from functools import partial
import multiprocessing as mp
import argparse
from scipy.ndimage import generic_filter
# parse some args
parser = argparse.ArgumentParser( description='stack the hourly outputs from raw WRF outputs to NetCDF files of hourlies broken up by year.' )
parser.add_argument( "-b", "--base_path", action='store', dest='base_path', type=str, help="input hourly directory containing the NSIDC_0051 data converted to GTiff" )
parser.add_argument( "-n", "--ncpus", action='store', dest='ncpus', type=int, help="number of cpus to use" )
# unpack args
args = parser.parse_args()
base_path = args.base_path
ncpus = args.ncpus
# # # # TESTING
# base_path = '/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/nsidc_0051'
# ncpus = 32
# # # # # #
# list all data
input_path = os.path.join( base_path,'prepped','north' )
files = sorted([ os.path.join(r,fn) for r,s,files in os.walk(input_path) for fn in files if fn.endswith('.tif') ])
data_times = [ make_datetimes( os.path.basename(fn).split('.')[0].split('_')[1] ) for fn in files ]
# date-fu for filenames and slicing
begin = data_times[0]
end = data_times[-1]
begin_str = begin.strftime('%Y-%m-%d')
end_str = end.strftime('%Y-%m-%d')
# stack the irregularly spaced data to a netcdf
with rasterio.open( files[0] ) as template:
meta = template.meta.copy()
height,width = template.shape
arr = stack_rasters( files, ncpus=ncpus )
ds = make_xarray_dset( arr.copy(), pd.DatetimeIndex(data_times), meta )
da = ds['sic'].copy()
# interpolate to daily
da_interp = da.resample(time='1D').asfreq()
# get a masks layer from the raw files. These are all values > 250
# ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------ ------------
# 251 Circular mask used in the Arctic to cover the irregularly-shaped data
# gap around the pole (caused by the orbit inclination and instrument swath)
# 252 Unused
# 253 Coastlines
# 254 Superimposed land mask
# 255 Missing data
# make a mask of the known nodata values when we start...
mask = (arr[0] > 250) & (arr[0] < 300)
# set masks to nodata
dat = da_interp.values.copy()
# make the nodata mask np.nan for computations
out_masked = []
for i in dat:
i[mask] = np.nan
out_masked = out_masked + [i]
# put the cleaned up data back into the stacked NetCDF
da_interp.data = np.array(out_masked)
da_interp.data = np.apply_along_axis(interpolate, axis=0, arr=da_interp).round(4)
# spatially smooth the 2-D daily slices of data using a mean generic filter. (without any aggregation)
print('spatial smooth')
footprint_type = 'queens'
footprint_lu = {'rooks':np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]),
'queens':np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])}
footprint = footprint_lu[ footprint_type ]
# run using multiprocessing -- YMMV this is a tad flaky at times.
args = [(i.copy(), footprint) for i in da_interp.values]
pool = mp.Pool(10)
out = pool.map(run_meanfilter, args)
pool.close()
pool.join()
def _maskit(x, mask):
'''masking function'''
x[mask == True] = -9999
return x
# mask the spatial smoothed outputs with the mask at each 2D slice.
smoothed = np.array([_maskit(i, mask) for i in out]).copy()
print('hanning smooth')
n = 3 # perform 3 iterative smooths on the same series
for i in range(n):
smoothed = np.apply_along_axis( hanning_smooth, arr=smoothed, axis=0 )
# make sure no values < 0, set to 0
smoothed[np.where((smoothed < 0) & (~np.isnan(smoothed)))] = 0
# make sure no values > 1, set to 1
smoothed[np.where((smoothed > 1) & (~np.isnan(smoothed)))] = 1
# mask it again to make sure the nodata and land are properly masked following hanning.
smoothed = np.array([_maskit(i, mask) for i in smoothed]).copy()
# # make whatever np.nan's are left -9999's
# # this appears to occur only around a small mask around the landmask
# smoothed[np.isnan(smoothed)] = -9999
# write this out as a GeoTiff
out_fn = os.path.join( base_path,'smoothed','GTiff','nsidc_0051_sic_nasateam_{}-{}_north_smoothed.tif'.format(str(begin.year),str(end.year)) )
_ = make_output_dirs( os.path.dirname(out_fn) )
meta.update(count=smoothed.shape[0], compress='lzw')
with rasterio.open( out_fn, 'w', **meta ) as out:
out.write( smoothed.astype(np.float32) )
# write it out as a NetCDF
out_ds = da_interp.copy(deep=True)
out_ds.values = smoothed.astype(np.float32)
out_ds = out_ds.to_dataset( name='sic' )
out_ds.attrs = ds.attrs
# output encoding
encoding = out_ds.sic.encoding.copy()
encoding.update({ 'zlib':True, 'comp':5, 'contiguous':False, 'dtype':'float32' })
out_ds.sic.encoding = encoding
out_fn = os.path.join( base_path,'smoothed','NetCDF','nsidc_0051_sic_nasateam_{}-{}_north_smoothed.nc'.format(str(begin.year),str(end.year)) )
_ = make_output_dirs( os.path.dirname(out_fn) )
out_ds.to_netcdf( out_fn , format='NETCDF4' )
| [
"lindgren.mike@gmail.com"
] | lindgren.mike@gmail.com |
0e2b20cc7003718d91f5888ba076de4eff653767 | b76c08a4c33245a737fa0e139d212bb424017cd1 | /src/cybersource/tests/test_models.py | 0de6ae3f4b7ba8af4a49ab21716ba81bce88f55b | [
"ISC"
] | permissive | thelabnyc/django-oscar-cybersource | 5b09845121ef1c074335c01e86c649c36e4e51e4 | 95b33362adf8ba0217ac73c6f816b544c9faa18d | refs/heads/master | 2023-03-15T15:25:55.388795 | 2023-03-14T16:00:07 | 2023-03-14T16:00:07 | 58,149,620 | 4 | 3 | ISC | 2023-02-07T22:17:15 | 2016-05-05T17:45:52 | Python | UTF-8 | Python | false | false | 3,376 | py | from django.test import TestCase
from ..models import CyberSourceReply, PaymentToken, SecureAcceptanceProfile
from .factories import build_accepted_token_reply_data
class PaymentTokenTest(TestCase):
def test_log_data_parsing(self):
data = build_accepted_token_reply_data("S123456789", "")
log = CyberSourceReply.objects.create(
data=data,
auth_avs_code=data.get("auth_avs_code"),
auth_code=data.get("auth_code"),
auth_response=data.get("auth_response"),
auth_trans_ref_no=data.get("auth_trans_ref_no"),
decision=data.get("decision"),
message=data.get("message"),
reason_code=data.get("reason_code"),
req_bill_to_address_postal_code=data.get("req_bill_to_address_postal_code"),
req_bill_to_forename=data.get("req_bill_to_forename"),
req_bill_to_surname=data.get("req_bill_to_surname"),
req_card_expiry_date=data.get("req_card_expiry_date"),
req_reference_number=data.get("req_reference_number"),
req_transaction_type=data.get("req_transaction_type"),
req_transaction_uuid=data.get("req_transaction_uuid"),
request_token=data.get("request_token"),
transaction_id=data.get("transaction_id"),
)
token = PaymentToken.objects.create(
log=log,
token=data["payment_token"],
masked_card_number=data["req_card_number"],
card_type=data["req_card_type"],
)
self.assertEqual(token.card_type_name, "Visa")
self.assertEqual(token.billing_zip_code, "10001")
self.assertEqual(token.expiry_month, "12")
self.assertEqual(token.expiry_year, "2020")
self.assertEqual(token.card_last4, "1111")
self.assertEqual(token.card_holder, "Bob Smith")
class SecureAcceptanceProfileTest(TestCase):
def setUp(self):
SecureAcceptanceProfile.objects.create(
hostname="foo.example.com",
profile_id="a",
access_key="",
secret_key="",
is_default=False,
)
SecureAcceptanceProfile.objects.create(
hostname="bar.example.com",
profile_id="b",
access_key="",
secret_key="",
is_default=False,
)
SecureAcceptanceProfile.objects.create(
hostname="www.example.com",
profile_id="c",
access_key="",
secret_key="",
is_default=True,
)
def test_get_profile(self):
profile = SecureAcceptanceProfile.get_profile("foo.example.com")
self.assertEqual(profile.profile_id, "a")
profile = SecureAcceptanceProfile.get_profile("bar.example.com")
self.assertEqual(profile.profile_id, "b")
profile = SecureAcceptanceProfile.get_profile("www.example.com")
self.assertEqual(profile.profile_id, "c")
def test_default_fallback(self):
profile = SecureAcceptanceProfile.get_profile("baz.example.com")
self.assertEqual(profile.profile_id, "c")
def test_no_profiles(self):
SecureAcceptanceProfile.objects.all().delete()
profile = SecureAcceptanceProfile.get_profile("www.example.com")
self.assertEqual(profile.profile_id, "2A37F989-C8B2-4FEF-ACCF-2562577780E2")
| [
"crgwbr@gmail.com"
] | crgwbr@gmail.com |
9bd9fd8e914cfb6c6e9206d96e6448f17e74db1a | dfb4cb8d916b62d7272ca353302d1ad95e4d7244 | /qa/rpc-tests/forknotify.py | cb1481fcf20133fcbce7f26965cb5cf73b0cf0e7 | [
"MIT"
] | permissive | mirzaei-ce/core-shahbit | d166ab47067bf66c3015c3da49ff31cd29f843db | 57ad738667b3d458c92d94aee713c184d911c537 | refs/heads/master | 2021-07-21T11:09:22.493418 | 2017-10-25T13:50:55 | 2017-10-25T13:50:55 | 108,276,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -alertnotify
#
from test_framework.test_framework import ShahbitTestFramework
from test_framework.util import *
class ForkNotifyTest(ShahbitTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
with open(self.alert_filename, 'w') as f:
pass # Just open then close to create zero-length file
self.nodes.append(start_node(0, self.options.tmpdir,
["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
# Node1 mines block.version=211 blocks
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockversion=211"]))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Mine 51 up-version blocks
self.nodes[1].generate(51)
self.sync_all()
# -alertnotify should trigger on the 51'st,
# but mine and sync another to give
# -alertnotify time to write
self.nodes[1].generate(1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
if len(alert_text) == 0:
raise AssertionError("-alertnotify did not warn of up-version blocks")
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text2 = f.read()
if alert_text != alert_text2:
raise AssertionError("-alertnotify excessive warning of up-version blocks")
if __name__ == '__main__':
ForkNotifyTest().main()
| [
"mirzaei@ce.sharif.edu"
] | mirzaei@ce.sharif.edu |
495615fd0a075747a90732de5998be193f2a7a0a | 4081698d691baafc58343c72a721622cec251f67 | /tools/testing/cross_language/util/cli_daead.py | d1bc265be0234911d65cf6485037529b47aeb990 | [
"Apache-2.0"
] | permissive | thalescpl-io/tink | 5ac62a54b73414402f6b600cff0fd21a4f999137 | 0d1769b28cabe2a60daca9b8da0bd14def54bc21 | refs/heads/master | 2021-03-10T03:27:58.161079 | 2020-05-15T23:45:42 | 2020-05-15T23:45:42 | 246,412,910 | 0 | 0 | Apache-2.0 | 2020-03-10T21:33:19 | 2020-03-10T21:33:18 | null | UTF-8 | Python | false | false | 3,134 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps a Deterministic AEAD CLI into a Python Tink DeterministicAead class."""
# Placeholder for import for type annotations
import os
import subprocess
import tempfile
import tink
from tink import cleartext_keyset_handle
from tink import daead
from typing import Text
# All languages that have an Deterministic AEAD CLI.
LANGUAGES = ('cc', 'go', 'java', 'python')
# Path are relative to tools directory.
_DAEAD_CLI_PATHS = {
'cc': 'testing/cc/deterministic_aead_cli_cc',
'go': 'testing/go/deterministic_aead_cli_go',
'java': 'testing/deterministic_aead_cli_java',
'python': 'testing/python/deterministic_aead_cli_python',
}
def _tools_path() -> Text:
util_path = os.path.dirname(os.path.abspath(__file__))
return os.path.dirname(os.path.dirname(os.path.dirname(util_path)))
class CliDeterministicAead(daead.DeterministicAead):
"""Wraps Deterministic AEAD CLI binary into a DeterministicAead primitive."""
def __init__(self, lang: Text, keyset_handle: tink.KeysetHandle) -> None:
self.lang = lang
self._cli = os.path.join(_tools_path(), _DAEAD_CLI_PATHS[lang])
self._keyset_handle = keyset_handle
def _run(self, operation: Text, input_data: bytes,
associated_data: bytes) -> bytes:
with tempfile.TemporaryDirectory() as tmpdir:
keyset_filename = os.path.join(tmpdir, 'keyset_file')
input_filename = os.path.join(tmpdir, 'input_file')
associated_data_filename = os.path.join(tmpdir, 'associated_data_file')
output_filename = os.path.join(tmpdir, 'output_file')
with open(keyset_filename, 'wb') as f:
cleartext_keyset_handle.write(
tink.BinaryKeysetWriter(f), self._keyset_handle)
with open(input_filename, 'wb') as f:
f.write(input_data)
with open(associated_data_filename, 'wb') as f:
f.write(associated_data)
try:
unused_return_value = subprocess.check_output([
self._cli, keyset_filename, operation,
input_filename, associated_data_filename, output_filename
])
except subprocess.CalledProcessError as e:
raise tink.TinkError(e)
with open(output_filename, 'rb') as f:
output_data = f.read()
return output_data
def encrypt_deterministically(
self, plaintext: bytes, associated_data: bytes) -> bytes:
return self._run('encryptdeterministically', plaintext, associated_data)
def decrypt_deterministically(
self, ciphertext: bytes, associated_data: bytes) -> bytes:
return self._run('decryptdeterministically', ciphertext, associated_data)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
21e7f14bf83ed3670db484b437bab5433bc03ac0 | 2901c198fd36f16e59e22e37d748497bdc51246e | /firstproject/clients/migrations/0008_client_client_id.py | 405e84e77e18398b1f41294fbdefe19d60698974 | [] | no_license | Sarathsathyan/FREELANCING- | b81803340983e4396ee1be032d75367ce416ea79 | bb800f900757ffb757ddb95e2c3c5924785f3386 | refs/heads/master | 2020-05-27T11:47:54.465644 | 2019-08-22T17:40:47 | 2019-08-22T17:40:47 | 188,605,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 2.2.1 on 2019-07-06 05:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0007_auto_20190706_0513'),
]
operations = [
migrations.AddField(
model_name='client',
name='client_id',
field=models.IntegerField(null=True),
),
]
| [
"sarathsathyan98@gmail.com"
] | sarathsathyan98@gmail.com |
2ff9e5a093af8bb5e1ef34ea5c281a6cdf3c10be | 7debcea5a702835479a3639e5deed7ed3f277d65 | /텍스트마이닝 - 네이버 영화 리뷰 크롤링.py | b7567a8752a29953d33e33ae10b7f85119214f35 | [] | no_license | swj8905/Intermediate_Course_0918 | 902db757e130332c7f3d64aa1007a1d0c8a62508 | e2199888d84006934001e1863ce4ec10819fc7f2 | refs/heads/master | 2023-08-11T04:40:45.978468 | 2021-09-26T03:47:17 | 2021-09-26T03:47:17 | 407,747,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | from bs4 import BeautifulSoup
import urllib.request as req
page_num = 1
while True:
code = req.urlopen("https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn?code=204496&type=after&isActualPointWriteExecute=false&isMileageSubscriptionAlready=false&isMileageSubscriptionReject=false&page={}".format(page_num))
soup = BeautifulSoup(code, "html.parser")
comment = soup.select("li > div.score_reple > p > span")
if len(comment) == 0:
break
for i in comment:
i = i.text.strip()
if i == "관람객":
continue
print(i)
page_num += 1 | [
"swj8905@naver.com"
] | swj8905@naver.com |
174e32b528f75a1f2e37b3ade6a4145d9a082f66 | 705649d075e112e5546c5d01bf0ae45122c251ea | /account/admin.py | ecb8ced5f615b776cab362d94afa4ab3e2ee07e4 | [] | no_license | liuyuhang791034063/LaoLiu_blog | ffbb81f72ed86803bbebfbae9397aaefdff4d0cc | b9352d1ea84533aa948b342c39e512f134df7acd | refs/heads/master | 2020-03-13T20:40:41.224540 | 2018-05-23T05:44:45 | 2018-05-23T05:44:45 | 131,279,834 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from django.contrib import admin
from .models import UserProfile,UserInfo
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user','birth','phone')
list_filter = ("phone",)
admin.site.register(UserProfile, UserProfileAdmin)
class UserInfoAdmin(admin.ModelAdmin):
list_display = ('user','school','company','profession','address','aboutme','photo')
list_filter = ('school','company','profession')
admin.site.register(UserInfo,UserInfoAdmin) | [
"liuyuhang791034063@qq.com"
] | liuyuhang791034063@qq.com |
a1fbde175cd3d2f6a0772b2147af4995a3d118cc | c31e69b763e1b52d3cefa4f5a49432ae966f22d0 | /day31/07_漏斗图.py | 5f9a116ddb867d090212802276bb1f64595e7a71 | [] | no_license | lvah/201901python | cbda174a3c97bc5a2f732c8e16fc7cf8451522d2 | 7bffe04a846f2df6344141f576820730a7bbfa6a | refs/heads/master | 2022-12-13T09:49:29.631719 | 2019-04-06T09:48:33 | 2019-04-06T09:48:33 | 165,477,671 | 3 | 0 | null | 2022-12-08T04:57:01 | 2019-01-13T07:23:44 | HTML | UTF-8 | Python | false | false | 326 | py | """
文件名: $NAME.py
日期: 22
作者: lvah
联系: xc_guofan@qq.com
代码描述:
"""
# Funnel
from pyecharts import Funnel
x_movies_name = ["猩球崛起", "敦刻尔克", "蜘蛛侠", "战狼2"]
y_16 = [20, 40, 60, 80]
funnel = Funnel("xxxx")
funnel.add("电影信息", x_movies_name, y_16)
funnel.render()
| [
"976131979@qq.com"
] | 976131979@qq.com |
7118661969f3778192f0d3212141eb85eb5b3f80 | 1715ff978e90ae468cd29decc8ebbe8a662f42fb | /sgrstats/accounts/views.py | 650dfc81d51b087fab9f7292783e5ab572669295 | [
"Apache-2.0"
] | permissive | Kami/sgrstats.com | 449de4c9c3371e124f3f86fa09df39e82afc60fe | cb23404acae57db2159b464042dbd378b5b91099 | refs/heads/master | 2020-05-16T08:32:03.262093 | 2012-04-05T07:25:13 | 2012-04-05T07:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,376 | py | import os
import fnmatch
import datetime
from django.shortcuts import render_to_response, get_object_or_404, HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from sgrstats.settings import SIGNATURE_IMAGES_PATH, SIGNATURE_IMAGES_URL
from django.contrib.auth.models import User
from sgrstats.stats.models import UserProfile
from sgrstats.stats.views import get_player_objectives
from forms import SettingsForm
from core.views import update_online_users
@login_required
@update_online_users
def settings(request):
return render_to_response('accounts/settings.html', {}, context_instance = RequestContext(request))
@login_required
def update_settings(request):
if request.method == 'POST':
form = SettingsForm(request.POST)
if form.is_valid():
show_on_rankings = form.cleaned_data['show_on_rankings']
user = UserProfile.objects.get(user = request.user)
if show_on_rankings:
user.show_on_rankings = True
else:
user.show_on_rankings = False
user.save()
messages.add_message(request, messages.SUCCESS, 'Your profile has been successfully updated.')
else:
messages.add_message(request, messages.ERROR, message = 'Error occured when trying to update your settings.')
return render_to_response('other/message.html', {}, context_instance = RequestContext(request))
@login_required
def link_account(request, account_id):
player_stats = get_player_objectives(request, account_id)
redirect_to = request.REQUEST.get('next', '')
# Account with this ID doesn't exist
if not player_stats:
messages.add_message(request, messages.ERROR, 'Player with this ID does not exist!')
return HttpResponseRedirect(reverse('account_settings'))
user_profile = UserProfile.objects.get(user = request.user)
user_profile.account_id = int(account_id)
user_profile.save()
if redirect_to:
return HttpResponseRedirect(redirect_to)
messages.add_message(request, messages.SUCCESS, 'Your website account <strong>%s</strong> has been successfully linked to a FireSky account with account id <strong>%s</strong>' % (request.user.username, account_id))
return HttpResponseRedirect(reverse('account_settings'))
@login_required
def unlink_account(request):
user_profile = UserProfile.objects.get(user = request.user)
redirect_to = request.REQUEST.get('next', '')
# No FireSky account is linked with this website account
if not user_profile.account_id:
messages.add_message(request, messages.ERROR, 'You have no FireSky account linked to this website account!')
return HttpResponseRedirect(reverse('account_settings'))
account_id = user_profile.account_id
user_profile.account_id = None
user_profile.save()
if redirect_to:
return HttpResponseRedirect(redirect_to)
messages.add_message(request, messages.SUCCESS, 'FireSky account with account id <strong>%s</strong> has been successfully unlinked from your website account (<strong>%s</strong>)' % (account_id, request.user.username))
return HttpResponseRedirect(reverse('account_settings'))
@login_required
def link_form(request):
return render_to_response('accounts/link_account_form.html', {}, context_instance = RequestContext(request))
@login_required
@update_online_users
def signature_images(request):
user = User.objects.get(pk = request.user.id)
user_profile = UserProfile.objects.get(user = user)
account_id = user_profile.account_id
dynamic_signature_status = user_profile.dynamic_signature
if not account_id or not dynamic_signature_status:
messages.add_message(request, messages.ERROR, 'You have no FireSky account linked to your profile or signature image generation is disabled')
return HttpResponseRedirect(reverse('account_settings'))
available_templates = get_available_templates()
if available_templates:
available_signatures = get_available_signature_images_for_account_id(available_templates, account_id)
else:
available_signatures = None
return render_to_response('accounts/signature_images.html', {'available_templates': available_templates, 'available_signatures': available_signatures, 'images_url': SIGNATURE_IMAGES_URL}, context_instance = RequestContext(request))
@login_required
@update_online_users
def signature_image_details(request, template_name):
user = User.objects.get(pk = request.user.id)
user_profile = UserProfile.objects.get(user = user)
account_id = user_profile.account_id
dynamic_signature_status = user_profile.dynamic_signature
if not account_id or not dynamic_signature_status:
raise Http404()
available_templates = get_available_templates()
available_templates_names = [os.path.split(template)[1] for template in available_templates]
if not template_name in available_templates_names:
raise Http404()
signature_exists = signature_image_exists(template_name, account_id)
if not signature_exists:
raise Http404()
signature_path = get_signature_image_name_for_template_name_and_account_id(template_name, account_id)
return render_to_response('accounts/signature_image_details.html', {'template': template_name, 'signature_path': signature_path, 'images_url': SIGNATURE_IMAGES_URL}, context_instance = RequestContext(request))
@login_required
@update_online_users
def dynamic_signature(request, status = 'enable'):
user = User.objects.get(pk = request.user.id)
user_profile = UserProfile.objects.get(user = user)
account_id = user.get_profile().account_id
dynamic_signature = user.get_profile().dynamic_signature
if status == 'enable':
if dynamic_signature == 1:
messages.add_message(request, messages.ERROR, 'Dynamic signature image generation is not disabled!')
else:
user_profile.dynamic_signature = True
messages.add_message(request, messages.SUCCESS, 'You have successfully enabled dynamic signature image generation.')
elif status == 'disable':
if dynamic_signature == 1:
user_profile.dynamic_signature = False
messages.add_message(request, messages.SUCCESS, 'You have successfully disabled dynamic signature image generation.')
else:
messages.add_message(request, messages.ERROR, 'Dynamic signature image generation is not enabled!')
user_profile.save()
return HttpResponseRedirect(reverse('account_settings'))
# helper functions
def get_available_templates():
""" Returns available signature templates. """
templates = [os.path.join(SIGNATURE_IMAGES_PATH, file) for file in os.listdir(SIGNATURE_IMAGES_PATH) if os.path.isdir(os.path.join(SIGNATURE_IMAGES_PATH, file))]
return templates
def get_available_signature_images_for_account_id(available_templates, account_id):
""" Returns all the available signature images for the given account id. """
signature_list = []
pattern = '%s*' % account_id
for template in available_templates:
for root, dirs, files in os.walk(template):
signatures = fnmatch.filter(files, pattern)
if signatures:
template_title = os.path.split(template)
template_path = os.path.join(template_title[1], signatures[0]).replace('\\', '/')
signature_extension = os.path.splitext(template_path)[1]
signature_list.append((template_title[1], template_path, signature_extension))
return signature_list
def get_signature_image_name_for_template_name_and_account_id(template_name, account_id):
""" Returns signature image url (template name + account id + template extension). """
pattern = '%s*' % account_id
for root, dirs, files in os.walk(os.path.join(SIGNATURE_IMAGES_PATH, template_name)):
signatures = fnmatch.filter(files, pattern)
if signatures:
template_path = os.path.join(template_name, signatures[0]).replace('\\', '/')
return template_path
return None
def signature_image_exists(template, account_id):
""" Check if the signature image for a given template exists for specified account id. """
pattern = '%s*' % account_id
path = os.path.join(SIGNATURE_IMAGES_PATH, template)
for root, dirs, files in os.walk(path):
if fnmatch.filter(files, pattern):
return True
return False | [
"tomaz@tomaz.me"
] | tomaz@tomaz.me |
be8fee0b6bd84369dcb6184b9d336616c62b9c1e | 52381a4fc02e90ce1fcfffd8d9876d9e8f44c248 | /core/domain/improvements_domain.py | 25ef52e9fe1a9039bf11be65260e769fa9f4e94e | [
"Apache-2.0"
] | permissive | ankita240796/oppia | 18aa1609a0f237ce76142b2a0d3169e830e5bcdd | ba4f072e494fd59df53fecc37e67cea7f9727234 | refs/heads/develop | 2022-07-11T01:11:53.136252 | 2022-06-30T08:55:49 | 2022-06-30T08:55:49 | 160,626,761 | 0 | 0 | Apache-2.0 | 2020-04-28T16:12:26 | 2018-12-06T06:02:18 | Python | UTF-8 | Python | false | false | 7,962 | py | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects related to Oppia improvement tasks."""
from __future__ import annotations
import datetime
from core import feconf
from core import utils
from core.constants import constants
from typing import Optional
from typing_extensions import TypedDict
class TaskEntryDict(TypedDict):
"""Dict for TaskEntry object."""
entity_type: str
entity_id: str
entity_version: int
task_type: str
target_type: str
target_id: str
issue_description: Optional[str]
status: str
resolver_username: Optional[str]
resolver_profile_picture_data_url: Optional[str]
resolved_on_msecs: Optional[float]
class TaskEntry:
"""Domain object representing an actionable task from the improvements tab.
Attributes:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry refers to.
For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str or None. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str or None. The corresponding user who resolved this task.
resolved_on: datetime or None. The datetime at which this task was
resolved.
"""
def __init__(
self,
entity_type: str,
entity_id: str,
entity_version: int,
task_type: str,
target_type: str,
target_id: str,
issue_description: Optional[str],
status: str,
resolver_id: Optional[str] = None,
resolved_on: Optional[datetime.datetime] = None
) -> None:
"""Initializes a new TaskEntry domain object from the given values.
Args:
entity_type: str. The type of entity the task entry refers to.
For example: "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example: an exploration ID.
entity_version: int. The version of the entity the task entry refers
to. For example: an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, when entity type is "exploration": "state".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to describe
why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str. The corresponding user who resolved this task.
Only used when status is resolved, otherwise replaced with None.
resolved_on: datetime. The datetime at which this task was resolved.
Only used when status is resolved, otherwise replaced with None.
"""
if status != constants.TASK_STATUS_RESOLVED:
resolver_id = None
resolved_on = None
self.entity_type = entity_type
self.entity_id = entity_id
self.entity_version = entity_version
self.task_type = task_type
self.target_type = target_type
self.target_id = target_id
self.issue_description = issue_description
self.status = status
self.resolver_id = resolver_id
self.resolved_on = resolved_on
@property
def task_id(self) -> str:
"""Returns the unique identifier of this task.
Value has the form: "[entity_type].[entity_id].[entity_version].
[task_type].[target_type].[target_id]"
Returns:
str. The ID of this task.
"""
return feconf.TASK_ENTRY_ID_TEMPLATE % (
self.entity_type, self.entity_id, self.entity_version,
self.task_type, self.target_type, self.target_id)
@property
def composite_entity_id(self) -> str:
"""Utility field which results in a 20% speedup compared to querying by
each of the invididual fields used to compose it.
Value has the form: "[entity_type].[entity_id].[entity_version]".
Returns:
str. The value of the utility field.
"""
return feconf.COMPOSITE_ENTITY_ID_TEMPLATE % (
self.entity_type, self.entity_id, self.entity_version)
def to_dict(self) -> TaskEntryDict:
"""Returns a dict-representation of the task.
Returns:
dict. Contains the following keys:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry
refers to. For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers
to. For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers
to. For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_username: str|None. Username of the user who resolved
the task when status is resolved. Otherwise None.
resolver_profile_picture_data_url: str|None. Profile picture
URL of the user who resolved the task when status is
resolved. Otherwise None.
resolved_on_msecs: float|None. Time in
milliseconds since epoch at which the task was resolved
when status is resolved. Otherwise None.
"""
return {
'entity_type': self.entity_type,
'entity_id': self.entity_id,
'entity_version': self.entity_version,
'task_type': self.task_type,
'target_type': self.target_type,
'target_id': self.target_id,
'issue_description': self.issue_description,
'status': self.status,
'resolver_username': None,
'resolver_profile_picture_data_url': None,
'resolved_on_msecs': (
None if not self.resolved_on
else utils.get_time_in_millisecs(self.resolved_on)),
}
| [
"noreply@github.com"
] | ankita240796.noreply@github.com |
4e589001fd28d974fbc0d7686671cff17e3ac70a | 999ed80db247794159be1d752bc6f0fc272bd117 | /spytest/spytest/tcmap.py | 5ddaceb157db255ffe9cef77d2518ac2add1e673 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | ramakristipati/sonic-mgmt | 7fee876412f0121da96d751f7d199690c73496f3 | a86f0e5b1742d01b8d8a28a537f79bf608955695 | refs/heads/master | 2023-08-31T07:55:38.446663 | 2023-08-31T06:34:53 | 2023-08-31T06:34:53 | 315,448,103 | 2 | 0 | NOASSERTION | 2020-11-23T21:44:07 | 2020-11-23T21:44:07 | null | UTF-8 | Python | false | false | 18,352 | py | import os
import re
import csv
import threading
from functools import cmp_to_key
from collections import OrderedDict
from spytest.dicts import SpyTestDict
from spytest import env
import utilities.common as utils
_tcm = SpyTestDict()
g_lock = threading.Lock()
def get(reload=False):
if not _tcm or reload:
load()
return _tcm
def get_tclist(func):
if func in _tcm.tclist:
return _tcm.tclist[func]
parts = func.split("[")
if len(parts) == 1:
return []
if parts[0] not in _tcm.tclist:
return []
retval = []
for tc in _tcm.tclist[parts[0]]:
retval.append("{}[{}".format(tc, parts[1]))
return retval
def get_current_releases():
return env.get("SPYTEST_TCMAP_CURRENT_RELEASES", None)
def is_regression_tc(tcid):
releases = get_current_releases()
if not releases:
return None
if tcid not in _tcm.release:
return False
if _tcm.release[tcid] in utils.csv2list(releases):
return False
return True
def get_comp(tcid, default=None):
tcid2 = tcid.split("[")[0]
if tcid2 in _tcm.comp:
return _tcm.comp[tcid2]
return default
def get_func(tcid, default=None):
tcid2 = tcid.split("[")[0]
if tcid2 in _tcm.func:
return _tcm.func[tcid2]
return default
def get_owner(name):
return _tcm.owners.get(name, "")
def get_module_info(path, onload=False):
name = os.path.basename(path)
if g_lock:
g_lock.acquire()
rv = SpyTestDict()
rv.name = name
rv.uitype = ""
rv.fcli = 0
rv.fcli = env.getint("SPYTEST_TCMAP_DEFAULT_FASTER_CLI", "0")
rv.tryssh = env.getint("SPYTEST_TCMAP_DEFAULT_TRYSSH", "0")
rv.random = 0
rv.maxtime = 0
rv.ts = 1
rv.path = path
if name not in _tcm.module_info:
if "--" in name and not onload:
name = name.split("--")[0] + ".py"
if name in _tcm.module_info:
rv = _tcm.module_info[name]
else:
_tcm.module_info[name] = rv
if g_lock:
g_lock.release()
return rv
def get_function_info(name):
if g_lock:
g_lock.acquire()
if "function_info" not in _tcm:
_tcm.function_info = OrderedDict()
if name not in _tcm.function_info:
rv = SpyTestDict()
rv.maxtime = 0
_tcm.function_info[name] = rv
if g_lock:
g_lock.release()
return _tcm.function_info[name]
def _add_entry(release, comp, tcid, func, marker=False):
if tcid in _tcm.release:
msg = "duplicate test case id {}"
_tcm.errors.append(msg.format(tcid))
if func not in _tcm.tclist:
_tcm.tclist[func] = []
if tcid not in _tcm.tclist[func]:
_tcm.tclist[func].append(tcid)
elif tcid not in _tcm.release:
# duplicate error message not yet added
msg = "duplicate test case id {}."
_tcm.errors.append(msg.format(tcid))
_tcm.marker[tcid] = "".join([_tcm.marker.get(tcid, ""), "N" if marker else "O"])
_tcm.release[tcid] = release
_tcm.comp[tcid] = comp
_tcm.func[tcid] = func
def _load_csv(csv_file, path):
if path is not None:
path = os.path.join(os.path.dirname(__file__), '..', path)
csv_file = os.path.join(os.path.abspath(path), csv_file)
if os.path.exists(csv_file):
filepath = csv_file
else:
return []
rows = []
with open(filepath, 'r') as fd:
for row in csv.reader(fd):
rows.append(row)
fd.close()
return rows
def _load_csv_files(csv_files):
rows = []
for csv_file in csv_files.split(","):
for row in _load_csv(csv_file, "reporting"):
rows.append(row)
return rows
def _load_csvs(name, default):
csv_files = env.get(name, default)
return _load_csv_files(csv_files)
def load(do_verify=True, items=None, tcmap_csv=None):
_tcm.tclist = OrderedDict()
_tcm.marker = OrderedDict()
_tcm.release = OrderedDict()
_tcm.comp = OrderedDict()
_tcm.func = OrderedDict()
_tcm.modules = OrderedDict()
_tcm.owners = OrderedDict()
_tcm.module_info = OrderedDict()
_tcm.function_info = OrderedDict()
_tcm.errors = []
_tcm.warnings = []
_tcm.non_mapped = []
_tcm.platform_info = read_platform_info()
for row in _load_csvs("SPYTEST_MODULE_OWNERS_CSV_FILENAME", "owners.csv"):
if len(row) < 2:
continue
name, owner = row[0].strip(), ",".join(row[1:])
if name.startswith("#"):
continue
_tcm.owners[name] = owner
# Module,UIType,FasterCLI,TrySSH,MaxTime,TS
for row in _load_csvs("SPYTEST_MODULE_INFO_CSV_FILENAME", "module_info.csv"):
if len(row) < 6:
continue
name, uitype, fcli, tryssh, random, maxtime = [str(i).strip() for i in row[:6]]
if name.strip().startswith("#"):
continue
ts = "1" if len(row) < 7 else row[6]
ent = get_module_info(name, True)
ent.uitype = uitype
ent.fcli = utils.integer_parse(fcli, env.getint("SPYTEST_TCMAP_DEFAULT_FASTER_CLI", "0"))
ent.tryssh = utils.integer_parse(tryssh, env.getint("SPYTEST_TCMAP_DEFAULT_TRYSSH", "0"))
ent.random = utils.integer_parse(random, 0)
ent.maxtime = utils.integer_parse(maxtime, 0)
ent.ts = utils.integer_parse(ts, 1)
# Function,MaxTime
for row in _load_csvs("SPYTEST_FUNCTION_INFO_CSV_FILENAME", "function_info.csv"):
if len(row) < 2:
continue
name, maxtime = [str(i).strip() for i in row[:2]]
if name.strip().startswith("#"):
continue
ent = _tcm.get_function_info(name)
ent.maxtime = utils.integer_parse(maxtime, 0)
csv_files = tcmap_csv or env.get("SPYTEST_TCMAP_CSV_FILENAME", "tcmap.csv")
for row in _load_csv_files(csv_files):
# Release,Feature,TestCaseID,FunctionName
if len(row) == 3:
# TODO treat the data as module
release, comp, name0 = row[0], row[1], row[2]
if release.strip().startswith("#"):
continue
for name in utils.list_files(name0, "*.py"):
if name in _tcm.modules:
msg = "duplicate module {}"
_tcm.errors.append(msg.format(name))
continue
module = SpyTestDict()
module.release = release
module.comp = comp
module.name = name
_tcm.modules[name] = module
continue
if len(row) < 4:
if row and not row[0].strip().startswith("#"):
print("Invalid line", row)
continue
release, comp, tcid, func = row[0], row[1], row[2], row[3]
if release.strip().startswith("#"):
continue
_add_entry(release, comp, tcid, func)
# verify the tcmap if required
if do_verify:
verify(items)
return _tcm
def verify(items=None):
items = items or []
# create hashes to search module
fspath_map, basename_map = {}, {}
for name, module in _tcm.modules.items():
fspath = os.path.join(os.path.dirname(__file__), '..', 'tests', name)
fspath = os.path.abspath(fspath)
fspath_map[fspath] = module
basename_map[os.path.basename(name)] = module
# expand the modules
for item in items:
module = _tcm.modules.get(item.location[0], None)
module = module or basename_map.get(item.location[0], None)
module = module or fspath_map.get(item.fspath.strpath, None)
if not module:
continue
func = item.location[2]
_add_entry(module.release, module.comp, func, func)
# check if any function mapped in multiple releases
for func, tcid_list in _tcm.tclist.items():
releases = dict()
for tcid in tcid_list:
releases[_tcm.release[tcid]] = 1
if len(releases) > 1:
msg = "function {} is mapped to {} testcases in multiple releases {}"
_tcm.errors.append(msg.format(func, len(tcid_list), releases))
# check if any function mapped in multiple components
for func, tcid_list in _tcm.tclist.items():
components = dict()
for tcid in tcid_list:
components[_tcm.comp[tcid]] = 1
if len(components) > 1:
msg = "function {} is mapped to {} testcases in multiple components {}"
# TODO: enable this once the issues are fixed in tcmap.csv
# _tcm.errors.append(msg.format(func, len(tcid_list), components.keys()))
_tcm.warnings.append(msg.format(func, len(tcid_list), components.keys()))
# find items without tcmap entry
for item in items:
func = item.location[2]
tclist = get_tclist(func)
count = len(tclist)
if count > 1:
continue
if count == 0 or tclist[0] == func:
_tcm.non_mapped.append(func)
def parse_module_csv_row(row):
if not row or row[0].startswith("#"):
return "#", 0, 0, 0
if len(row) == 2:
# happens when --change-module-csv with just
# module name and additional constraints
return 0, 0, row[0], [row[1]]
if len(row) < 3:
print("1. invalid module params: {}".format(row))
return "#", 0, 0, 0
tpref = utils.integer_parse(row[2])
if tpref is not None:
row.pop(2)
if len(row) < 3:
print("2. invalid module params: {}".format(row))
return "#", 0, 0, 0
topo = row[3:] if len(row) > 3 else []
bucket, order, name0 = [str(i).strip() for i in row[:3]]
if bucket.startswith("#"):
return "#", 0, 0, 0
return bucket, order, name0, topo
def get_module_csv_path(module_csv):
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
reporting = os.path.join(root, "reporting")
retval = []
for filepath in module_csv.split(","):
csv_file = filepath
if not os.path.exists(filepath):
csv_file = os.path.join(reporting, filepath)
if not os.path.exists(csv_file):
print("module csv {} not found".format(filepath))
continue
retval.append(csv_file)
return retval
def read_module_csv(append_modules_csv=None, change_modules_csv=None,
module_csv=None):
module_csv = module_csv or env.get("SPYTEST_MODULE_CSV_FILENAME", "modules.csv")
module_rows, repeated, rows = [], {}, []
# read the csv files
for csv_file in get_module_csv_path(module_csv):
with open(csv_file, 'r') as fd:
for row in csv.reader(fd):
rows.append(row)
fd.close()
# append augmented lines
for line in append_modules_csv or []:
line2 = " ".join(utils.make_list(line))
for row in csv.reader([line2]):
rows.append(row)
# rows dict
row_dict = {}
for row in rows:
bucket, order, name0, topo = parse_module_csv_row(row)
if not bucket.startswith("#"):
row_dict[name0] = [bucket, order, name0, topo]
# parse changed lines
change_modules1, change_modules2, renamed = {}, {}, {}
for line in change_modules_csv or []:
line2 = " ".join(utils.make_list(line))
for row in csv.reader([line2]):
bucket, order, name0, topo = parse_module_csv_row(row)
# use module name even when the repeat name is specified
parts = name0.split(".py.")
name = "{}.py".format(parts[0]) if len(parts) > 1 else name0
if name0 not in row_dict:
# repeat name is specified
renamed[name] = name0
# when only constraints are specified order will be 0
if order != 0:
change_modules1[name] = bucket, order, name, topo
else:
change_modules2[name] = topo
# parse the rows
for row in rows:
bucket, order, name0, topo = parse_module_csv_row(row)
if bucket.startswith("#"):
continue
if name0 in change_modules1:
bucket, order, name0, topo = change_modules1[name0]
elif name0 in change_modules2:
topo[-1] = " ".join([topo[-1], change_modules2[name0][0]])
# get the repeat name if specified with --change-module-csv
name0 = renamed.get(name0, name0)
parts = name0.split(".py.")
if len(parts) > 1:
if env.get("SPYTEST_REPEAT_MODULE_SUPPORT") == "0":
continue
name = "{}--{}.py".format(parts[0], parts[1])
module_row = [bucket, order, name]
pname = "{}.py".format(parts[0])
if pname not in repeated:
repeated[pname] = []
found = False
for data in repeated[pname]:
if data.repeat_name == parts[1]:
found = True
break
if found:
continue
data = SpyTestDict(repeat_name=parts[1],
repeat_topo=",".join(topo))
repeated[pname].append(data)
else:
module_row = [bucket, order, name0]
module_row.extend(topo)
module_rows.append(module_row)
return module_csv, module_rows, repeated, renamed
def read_platform_info():
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
csv_file = os.path.join(root, "reporting", "platform-info.csv")
retval = {}
if os.path.exists(csv_file):
with open(csv_file, 'r') as fd:
for row in csv.reader(fd):
if len(row) < 4 or "#" in row[0]:
continue
platform, nos, chip, rev = row[0:4]
retval[platform] = SpyTestDict()
retval[platform].nos = nos
retval[platform].chip = chip
retval[platform].chip_rev = rev
retval[platform].chip_disp = get_chip_disp(chip, rev)
platform_disp = platform if len(row) == 4 else row[4]
retval[platform].platform_disp = platform_disp
fd.close()
return retval
def get_all_chips():
all_chips = []
all_chips.append(["TH", "NA", "TH"])
all_chips.append(["TH2", "NA", "TH2"])
all_chips.append(["TH3", "NA", "TH3"])
all_chips.append(["TD2", "NA", "TD2"])
all_chips.append(["TD3", "X2", "TD3-X2"])
all_chips.append(["TD3", "X3", "TD3-X3"])
all_chips.append(["TD3", "X5", "TD3-X5"])
all_chips.append(["TD3", "X7", "TD3-X7"])
all_chips.append(["TD4", "X9", "TD4-X9"])
all_chips.append(["TD4", "X11", "TD4-X11"])
all_chips.append(["TH4", "NA", "TH4"])
return all_chips
def validate_chip_disp(chip):
chip = chip.replace("-NA", "")
chip = chip.replace("TH3-X7", "TH3")
if chip == "TH1":
return "TH"
return chip.strip()
def get_chip_disp(chip, chip_rev):
if chip and chip_rev and chip_rev not in ["NA", "UNKNOWN"]:
retval = "{}-{}".format(chip, chip_rev)
else:
retval = chip
return validate_chip_disp(retval)
def get_all_chips_new():
return list(get().platform_info.values())
def get_all_platforms():
return list(get().platform_info.keys())
def get_platform_info(platform):
return get().platform_info.get(platform, {})
def get_chip_platforms(chip_disp):
retval = []
for platform, data in get().platform_info.items():
if chip_disp == data.chip_disp:
retval.append(platform)
return retval
def inventory(func, tcid, release, feature):
_add_entry(release, feature, tcid, func, True)
def read_coverage_history(csv_file):
cols, rows = None, []
if os.path.exists(csv_file):
fd = open(csv_file, 'r')
for row in csv.reader(fd):
if not cols:
cols = row
else:
rows.append(row)
fd.close()
chip_cov, platform_cov = {}, {}
platform_start, chip_start = -1, -1
if cols:
if "Platform CV" in cols:
platform_start = cols.index("Platform CV") + 1
if "CHIP CV" in cols:
chip_start = cols.index("CHIP CV") + 1
elif "Chip CV" in cols:
chip_start = cols.index("Chip CV") + 1
if platform_start < 0 or chip_start < 0:
return chip_cov, platform_cov
for row in rows:
module = row[0]
chip_cov[module] = {}
platform_cov[module] = {}
for index, col in enumerate(cols):
if index < chip_start or index == platform_start - 1:
continue
elif index < platform_start:
chip_cov[module][col] = row[index]
else:
platform_cov[module][col] = row[index]
return chip_cov, platform_cov
def _print_msg(msg):
print(msg)
def save(match="ON", filepath=None, printerr=None):
printerr = printerr or _print_msg
tcm = get()
lines, funcs = [], []
for func, testcases in tcm.tclist.items():
if func in funcs:
continue
funcs.append(func)
testcases = utils.find_duplicate(testcases)[1]
for tc in testcases:
marker = tcm.marker.get(tc, "O")
if match == "O" and "O" != marker:
continue
if match == "N" and "N" != marker:
continue
release = tcm.release.get(tc, "") or ""
release = release.replace(" ", "").replace("_", "")
if not release:
printerr("=========== no release {}".format(tc))
continue
try:
lines.append(",".join([release, tcm.comp[tc], tc, func]))
except Exception:
printerr("=========== exception check {}".format(tc))
def cmp_items(a, b):
a = re.sub(r"^Buzznik,", "Buzznik1.0", a)
b = re.sub(r"^Buzznik,", "Buzznik1.0", b)
a = re.sub(r"^Buzznik\+,", "Buzznik2.0", a)
b = re.sub(r"^Buzznik\+,", "Buzznik2.0", b)
if a > b:
return 1
if a == b:
return 0
return -1
lines.sort(key=cmp_to_key(cmp_items))
lines.insert(0, "#Release,Feature,TestCaseID,FunctionName")
if filepath:
utils.write_file(filepath, "\n".join(lines))
return lines
| [
"noreply@github.com"
] | ramakristipati.noreply@github.com |
f5f81681f36f3471f4d27bbec8fce45ee8f30473 | 8157b3619467c8928f2c2d1669d115a00a4e1edc | /bert/optimization.py | 4b75429eaaf8be262b562847068edea6ec84d245 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | soft-pure-empty/GEC-reaching-human-level | 0e332849d45533de99ab8b991e25379c0b9c7cc2 | 2cd542b4fbbb40f426ae6e4625142de17f385744 | refs/heads/master | 2022-10-27T17:19:02.645578 | 2019-03-06T13:06:40 | 2019-03-06T13:06:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,261 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| [
"334973834@qq.com"
] | 334973834@qq.com |
1b87df6e5c9001abd520146c6fc11f2b78351d09 | f124a2bc35fa348d5f5b637eae2a736d67470c76 | /tf-hub2/vector_calcu.py | f04be4c7015163255248a369b1555c7e845c8767 | [
"Apache-2.0"
] | permissive | arfu2016/DuReader | fd173c0eb90abedad0ca65bd9b847ccd58bf567a | 66934852c508bff5540596aa71d5ce40c828b37d | refs/heads/master | 2021-04-06T05:45:13.002887 | 2018-09-06T03:58:26 | 2018-09-06T03:58:26 | 124,838,393 | 0 | 0 | Apache-2.0 | 2018-03-12T05:35:13 | 2018-03-12T05:35:13 | null | UTF-8 | Python | false | false | 5,454 | py | """
@Project : DuReader
@Module : vector_calcu.py
@Author : Deco [deco@cubee.com]
@Created : 5/15/18 10:44 AM
@Desc :
"""
"""
Created on Sun Aug 20 14:40:29 2017
@author: zimuliu
"""
from functools import reduce
from math import acos, pi
import numpy as np
class Vector:
def __init__(self, coordinates):
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
def __str__(self):
return "%dD Vector: %s" % (self.dimension,
', '.join(["%.3f" % round(x, 3)
for x in self.coordinates]))
def __eq__(self, v):
"""两向量相等"""
return self.coordinates is v.coordinates
def _eq_dim(self, v):
"""两向量维度相同"""
assert self.dimension is v.dimension, \
"The dimensions of vectors must be equal!"
def _zero_vec(self):
"""零向量"""
assert self.magnitude() != 0, "Encount with zero vector!"
def plus(self, v):
"""两向量相加"""
self._eq_dim(v)
return Vector([x + y for x, y in zip(self.coordinates, v.coordinates)])
def plus2(self, v):
self._eq_dim(v)
temp = np.array(self.coordinates) + np.array(v.coordinates)
return Vector(temp.tolist())
def minus(self, v):
"""两向量相减"""
self._eq_dim(v)
return Vector([x - y for x, y in zip(self.coordinates, v.coordinates)])
def minus2(self, v):
self._eq_dim(v)
temp = np.array(self.coordinates) - np.array(v.coordinates)
return Vector(temp.tolist())
def scalar_mult(self, m):
"""向量乘以标量"""
return Vector([x * m for x in self.coordinates])
def scalar_mult2(self, m):
temp = np.array(self.coordinates)*m
return Vector(temp.tolist())
def magnitude(self, *args):
"""求向量的norm"""
return reduce(lambda x, y: x + y,
map(lambda z: z ** 2, self.coordinates)) ** 0.5
def magnitude2(self):
return np.linalg.norm(self.coordinates)
def direction(self, *args):
"""转化为向量所在方向的方向向量; 或者说,求单位向量"""
self._zero_vec()
return self.scalar_mult(1 / self.magnitude())
def dot_product(self, v):
"""求向量的点乘,与矩阵的内积有关联"""
self._eq_dim(v)
return reduce(lambda x, y: x + y,
[a * b for a, b in zip(self.coordinates, v.coordinates)])
def dot_product2(self, v):
self._eq_dim(v)
a = np.array(self.coordinates)
b = np.array(v.coordinates)
temp = np.dot(a, b)
print('temp in dot_product2:', temp)
print('type of temp:', type(temp))
print('type of temp.tolist():', type(temp.tolist()))
return temp.tolist()
def multiply_elementwise(self, v):
self._eq_dim(v)
return Vector([a * b for a, b in zip(self.coordinates, v.coordinates)])
def multiply_elementwise2(self, v):
self._eq_dim(v)
temp = np.multiply(self.coordinates, v.coordinates)
return temp.tolist()
def cross_product(self, v):
def cross(a, b):
c = [a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]]
return c
self._eq_dim(v)
a0 = self.coordinates
b0 = v.coordinates
return cross(a0, b0)
def cross_product2(self, v):
self._eq_dim(v)
a = np.array(self.coordinates)
b = np.array(v.coordinates)
temp = np.cross(a, b)
return temp.tolist()
def angle(self, v, degree=False):
"""求两个向量的夹角大小,可以表征两个向量的相似度;
可以选择用实数表示还是用度数表示"""
self._zero_vec()
v._zero_vec()
measurement = pi / 180 if degree else 1
return acos(self.dot_product(v) / (self.magnitude() * v.magnitude())) \
/ measurement
def parallelism(self, v, threshold=10e-6):
"""判断两个向量是否平行"""
self._eq_dim(v)
res = False
if self.magnitude() < threshold or v.magnitude() < threshold:
res = True
else:
ang = self.angle(v)
if ang < threshold or (pi - ang) < threshold:
res = True
return res
def orthogonality(self, v, threshold=10e-6):
"""判断两个向量是否垂直"""
return abs(self.dot_product(v)) < threshold
def projection(self, v):
"""求一个向量在另一个向量方向上的投影"""
_v = v.direction()
weight = self.dot_product(_v)
return _v.scalar_mult(weight)
if __name__ == '__main__':
a = Vector([1, 2])
b = Vector([3, 4])
print(a.magnitude())
print(a.magnitude2())
print(a.plus(b))
print(a.plus2(b))
print(a.minus(b))
print(a.minus2(b))
print(a.scalar_mult(2))
print(a.scalar_mult2(2))
print(a.dot_product(b))
print(a.dot_product2(b))
print(a.multiply_elementwise(b))
print(a.multiply_elementwise2(b))
print(a.angle(b))
print(a.parallelism(b))
print(a.orthogonality(b))
print(a.projection(b))
c = Vector([1, 2, 3])
d = Vector([4, 5, 6])
print(c.cross_product(d))
print(c.cross_product2(d))
| [
"deco@cubee.com"
] | deco@cubee.com |
5a4a9b1572e04fe8cfa1c2652f3d39387e7d03b3 | 52e814745700b54e4b35e783386ad5f796def1e9 | /colour/models/rgb/tests/tests_derivation.py | 2da1ca529a532396ab2d91b80e4f8a7319e37789 | [
"BSD-3-Clause"
] | permissive | scoopxyz/colour | e9c6502f67ff0774ab77f3c2f622b5973f5a9196 | b1d82af250122f82919b4c54d06fdf72c069c5af | refs/heads/develop | 2020-12-30T19:57:48.884001 | 2016-12-28T12:42:44 | 2016-12-28T12:42:44 | 68,670,983 | 0 | 0 | null | 2016-09-20T03:38:17 | 2016-09-20T03:38:17 | null | UTF-8 | Python | false | false | 12,873 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.rgb.derivation` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import re
import unittest
from itertools import permutations
from six import text_type
from colour.models import (
normalised_primary_matrix,
chromatically_adapted_primaries,
primaries_whitepoint,
RGB_luminance_equation,
RGB_luminance)
from colour.models.rgb.derivation import xy_to_z
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2016 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['Testxy_to_z',
'TestNormalisedPrimaryMatrix',
'TestChromaticallyAdaptedPrimaries',
'TestPrimariesWhitepoint',
'TestRGBLuminanceEquation',
'TestRGBLuminance']
class Testxy_to_z(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.xy_to_z` definition unit
tests methods.
"""
def test_xy_to_z(self):
"""
Tests :func:`colour.models.rgb.derivation.xy_to_z` definition.
"""
np.testing.assert_almost_equal(
xy_to_z(np.array([0.2500, 0.2500])),
0.50000000,
decimal=7)
np.testing.assert_almost_equal(
xy_to_z(np.array([0.0001, -0.0770])),
1.07690000,
decimal=7)
np.testing.assert_almost_equal(
xy_to_z(np.array([0.0000, 1.0000])),
0.00000000,
decimal=7)
def test_n_dimensional_xy_to_z(self):
"""
Tests :func:`colour.models.rgb.derivation.xy_to_z` definition
n-dimensional arrays support.
"""
xy = np.array([0.25, 0.25])
z = 0.5
np.testing.assert_almost_equal(
xy_to_z(xy),
z,
decimal=7)
xy = np.tile(xy, (6, 1))
z = np.tile(z, 6, )
np.testing.assert_almost_equal(
xy_to_z(xy),
z,
decimal=7)
xy = np.reshape(xy, (2, 3, 2))
z = np.reshape(z, (2, 3))
np.testing.assert_almost_equal(
xy_to_z(xy),
z,
decimal=7)
@ignore_numpy_errors
def test_nan_xy_to_z(self):
"""
Tests :func:`colour.models.rgb.derivation.xy_to_z` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
xy_to_z(case)
class TestNormalisedPrimaryMatrix(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.normalised_primary_matrix`
definition unit tests methods.
"""
def test_normalised_primary_matrix(self):
"""
Tests :func:`colour.models.rgb.derivation.normalised_primary_matrix`
definition.
"""
np.testing.assert_almost_equal(
normalised_primary_matrix(
np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
np.array([[0.95255240, 0.00000000, 0.00009368],
[0.34396645, 0.72816610, -0.07213255],
[0.00000000, 0.00000000, 1.00882518]]),
decimal=7)
np.testing.assert_almost_equal(
normalised_primary_matrix(
np.array([0.640, 0.330,
0.300, 0.600,
0.150, 0.060]),
np.array([0.3127, 0.3290])),
np.array([[0.41239080, 0.35758434, 0.18048079],
[0.21263901, 0.71516868, 0.07219232],
[0.01933082, 0.11919478, 0.95053215]]),
decimal=7)
@ignore_numpy_errors
def test_nan_normalised_primary_matrix(self):
"""
Tests :func:`colour.models.rgb.derivation.normalised_primary_matrix`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
P = np.array(np.vstack((case, case, case)))
W = np.array(case)
try:
normalised_primary_matrix(P, W)
except np.linalg.linalg.LinAlgError:
import traceback
from colour.utilities import warning
warning(traceback.format_exc())
class TestChromaticallyAdaptedPrimaries(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.\
chromatically_adapted_primaries` definition unit tests methods.
"""
def test_chromatically_adapted_primaries(self):
"""
Tests :func:`colour.models.rgb.derivation.\
chromatically_adapted_primaries` definition.
"""
np.testing.assert_almost_equal(
chromatically_adapted_primaries(
np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
np.array([0.32168, 0.33767]),
np.array([0.34570, 0.35850])),
np.array([[0.73431182, 0.26694964],
[0.02211963, 0.98038009],
[-0.05880375, -0.12573056]]),
decimal=7)
np.testing.assert_almost_equal(
chromatically_adapted_primaries(
np.array([0.640, 0.330,
0.300, 0.600,
0.150, 0.060]),
np.array([0.31270, 0.32900]),
np.array([0.34570, 0.35850])),
np.array([[0.64922534, 0.33062196],
[0.32425276, 0.60237128],
[0.15236177, 0.06118676]]),
decimal=7)
np.testing.assert_almost_equal(
chromatically_adapted_primaries(
np.array([0.640, 0.330,
0.300, 0.600,
0.150, 0.060]),
np.array([0.31270, 0.32900]),
np.array([0.34570, 0.35850]),
'Bradford'),
np.array([[0.64844144, 0.33085331],
[0.32119518, 0.59784434],
[0.15589322, 0.06604921]]),
decimal=7)
@ignore_numpy_errors
def test_nan_chromatically_adapted_primaries(self):
"""
Tests :func:`colour.models.rgb.derivation.\
chromatically_adapted_primaries` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=2))
for case in cases:
P = np.array(np.vstack((case, case, case)))
W = np.array(case)
chromatically_adapted_primaries(P, W, W)
class TestPrimariesWhitepoint(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.primaries_whitepoint`
definition unit tests methods.
"""
def test_primaries_whitepoint(self):
"""
Tests :func:`colour.models.rgb.derivation.primaries_whitepoint`
definition.
"""
P, W = primaries_whitepoint(np.array(
[[0.95255240, 0.00000000, 0.00009368],
[0.34396645, 0.72816610, -0.07213255],
[0.00000000, 0.00000000, 1.00882518]]))
np.testing.assert_almost_equal(
P,
np.array([[0.73470, 0.26530],
[0.00000, 1.00000],
[0.00010, -0.07700]]),
decimal=7)
np.testing.assert_almost_equal(
W,
np.array([0.32168, 0.33767]),
decimal=7)
P, W = primaries_whitepoint(
np.array([[0.41240000, 0.35760000, 0.18050000],
[0.21260000, 0.71520000, 0.07220000],
[0.01930000, 0.11920000, 0.95050000]]))
np.testing.assert_almost_equal(
P,
np.array([[0.64007450, 0.32997051],
[0.30000000, 0.60000000],
[0.15001662, 0.06000665]]),
decimal=7)
np.testing.assert_almost_equal(
W,
np.array([0.31271591, 0.32900148]),
decimal=7)
@ignore_numpy_errors
def test_nan_primaries_whitepoint(self):
"""
Tests :func:`colour.models.rgb.derivation.primaries_whitepoint`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
M = np.array(np.vstack((case, case, case)))
primaries_whitepoint(M)
class TestRGBLuminanceEquation(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.RGB_luminance_equation`
definition unit tests methods.
"""
def test_RGB_luminance_equation(self):
"""
Tests :func:`colour.models.rgb.derivation.RGB_luminance_equation`
definition.
"""
self.assertIsInstance(
RGB_luminance_equation(
np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
text_type)
self.assertTrue(re.match(
# TODO: Simplify that monster.
('Y\s?=\s?[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?.'
'\(R\)\s?[\+-]\s?[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?.'
'\(G\)\s?[\+-]\s?[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?.\(B\)'),
RGB_luminance_equation(
np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
np.array([0.32168, 0.33767]))))
class TestRGBLuminance(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.derivation.RGB_luminance` definition
unit tests methods.
"""
def test_RGB_luminance(self):
"""
Tests:func:`colour.models.rgb.derivation.RGB_luminance`
definition.
"""
self.assertAlmostEqual(
RGB_luminance(
np.array([50.0, 50.0, 50.0]),
np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
50.00000000,
places=7)
self.assertAlmostEqual(
RGB_luminance(
np.array([74.6, 16.1, 100.0]),
np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
30.17011667,
places=7)
self.assertAlmostEqual(
RGB_luminance(
np.array([40.6, 4.2, 67.4]),
np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
np.array([0.32168, 0.33767])),
12.16160184,
places=7)
def test_n_dimensional_RGB_luminance(self):
"""
Tests:func:`colour.models.rgb.derivation.RGB_luminance` definition
n_dimensional arrays support.
"""
RGB = np.array([50.0, 50.0, 50.0]),
P = np.array([0.73470, 0.26530,
0.00000, 1.00000,
0.00010, -0.07700]),
W = np.array([0.32168, 0.33767])
Y = 50
np.testing.assert_almost_equal(
RGB_luminance(RGB, P, W),
Y)
RGB = np.tile(RGB, (6, 1))
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
RGB_luminance(RGB, P, W),
Y)
RGB = np.reshape(RGB, (2, 3, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
RGB_luminance(RGB, P, W),
Y)
@ignore_numpy_errors
def test_nan_RGB_luminance(self):
"""
Tests :func:`colour.models.rgb.derivation.RGB_luminance`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
RGB = np.array(case)
P = np.array(np.vstack((case[0:2], case[0:2], case[0:2])))
W = np.array(case[0:2])
try:
RGB_luminance(RGB, P, W)
except np.linalg.linalg.LinAlgError:
import traceback
from colour.utilities import warning
warning(traceback.format_exc())
if __name__ == '__main__':
unittest.main()
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
08c7a0d5de9c427ddea43392421159401108dedc | 7704dfa69e81c8a2f22b4bdd2b41a1bdad86ac4a | /fuel_upgrade_system/fuel_upgrade/fuel_upgrade/tests/test_cli.py | b6d10faf846aeb2ea48f87e1d6b2f5b8c52536fa | [
"Apache-2.0"
] | permissive | andrei4ka/fuel-web-redhat | 8614af4567d2617a8420869c068d6b1f33ddf30c | 01609fcbbae5cefcd015b6d7a0dbb181e9011c14 | refs/heads/master | 2022-10-16T01:53:59.889901 | 2015-01-23T11:00:22 | 2015-01-23T11:00:22 | 29,728,913 | 0 | 0 | Apache-2.0 | 2022-09-16T17:48:26 | 2015-01-23T10:56:45 | Python | UTF-8 | Python | false | false | 2,251 | py | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from fuel_upgrade import errors
from fuel_upgrade import messages
from fuel_upgrade.cli import parse_args
from fuel_upgrade.cli import run_upgrade
from fuel_upgrade.tests.base import BaseTestCase
@mock.patch('fuel_upgrade.cli.CheckerManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.PreUpgradeHookManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.UpgradeManager', mock.Mock())
@mock.patch('fuel_upgrade.cli.build_config')
class TestAdminPassword(BaseTestCase):
default_args = ['host-system', '--src', '/path']
def get_args(self, args):
return parse_args(args)
def test_use_password_arg(self, mbuild_config):
password = '12345678'
args = self.get_args(self.default_args + ['--password', password])
run_upgrade(args)
mbuild_config.assert_called_once_with(
mock.ANY, password
)
@mock.patch('fuel_upgrade.cli.getpass')
def test_ask_for_password(self, mgetpass, mbuild_config):
password = '987654321'
mgetpass.getpass.return_value = password
args = self.get_args(self.default_args)
run_upgrade(args)
mbuild_config.assert_called_once_with(
mock.ANY, password
)
@mock.patch('fuel_upgrade.cli.getpass')
def test_no_password_provided(self, mgetpass, mbuild_config):
password = ''
mgetpass.getpass.return_value = password
with self.assertRaisesRegexp(errors.CommandError,
messages.no_password_provided):
args = self.get_args(self.default_args)
run_upgrade(args)
| [
"akirilochkin@mirantis.com"
] | akirilochkin@mirantis.com |
7890a12e113f4a009322f64939ac986783a5565f | 372b1321c545757308aa1ef93a3584d5674af40b | /2017/07/solver.py | 13c3dd9fa254b6922c9fe0e5e47fa2453220fdac | [] | no_license | verdouxscience/advent-of-code | a10b129959a75c4821af1b831f88b89e71857bae | 1f993f1104c818a8a0a459357c1be9a78bd33198 | refs/heads/main | 2023-04-09T10:20:44.307794 | 2021-04-05T01:55:18 | 2021-04-05T01:55:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | from aoc_parser import Parser
from aoc_board import Grid, Point, Graph
FILE_NAME = 'data'
class Node:
def __init__(self, value):
value = value.split()
self.id = value[0]
self.weight = int(value[1][1:-1])
def __eq__(self, o):
return str(self) == str(o)
def __hash__(self):
return hash(str(self))
def __repr__(self):
return str(self)
def __str__(self):
return '{}: {}'.format(self.id, self.weight)
def main():
graph = get_graph()
# Part 1: xegshds
top_most = graph.top_most()
print('Part 1: {}'.format(top_most))
# Part 2: 299
graph.get_weight(graph.get_node(top_most))
print('Part 2: {}'.format(graph.to_change))
def get_graph():
graph = Graph()
for line in Parser(FILE_NAME).lines():
line = line.split(' -> ')
node = Node(line[0])
graph.add_node(node)
if len(line) == 2:
for edge in line[1].split(', '):
graph.add_edge(node, edge)
return graph
if __name__ == '__main__':
main()
| [
"suslikovvd@gmail.com"
] | suslikovvd@gmail.com |
c47a2131c66e6a0693914c73f4f493137080963c | aaa6354278eb889264e8cb2ee5877cd8f79d4c04 | /torchwisdom/core/progress.py | 667f9142230617d462920c94bed175de1b0f41fa | [
"MIT"
] | permissive | nunenuh/torchwisdom | 88682ff71a87ebe7c01fbc149b9040e9a26fde89 | 0a0e5dda84d59243a084b053d98f2eabd76474f5 | refs/heads/master | 2020-04-27T09:11:33.078513 | 2019-05-12T13:33:48 | 2019-05-12T13:33:48 | 174,204,225 | 8 | 4 | MIT | 2020-03-08T22:44:04 | 2019-03-06T19:06:45 | Python | UTF-8 | Python | false | false | 3,712 | py | from fastprogress import master_bar
from fastprogress.fastprogress import isnotebook
from torchwisdom.core.callback import Callback
from typing import *
from torchwisdom.core.statemgr import StateManager
from datetime import timedelta
# __all__ = []
class ProgressTable(object):
def __init__(self):
pass
def time_formatter(sec, last_cut=-4)->str:
return str(timedelta(seconds=sec))[:last_cut]
def format_text(text, empty_space=15):
ltext=len(text)
if empty_space>ltext:
len_iter = empty_space-ltext
space = "".join([" " for i in range(len_iter)])
out = space+text
else:
out = " "+text+" "
return out
def build_line_console(line, use_tab=False):
str_build = ""
for ln in line:
text = format_text(ln)
str_build+=text
if use_tab: str_build+="\t"
return str_build
def time_delta_remain(epoch_state):
delta_last = epoch_state.get('time')[-1]
delta = time_formatter(delta_last)
remain_last = epoch_state.get('remain')[-1]
remain = time_formatter(remain_last)
return delta, remain
def time_delta_remain_resume(epoch_state, epoch):
delta_last = epoch_state.get('time')[epoch]
delta = time_formatter(delta_last)
remain_last = epoch_state.get('remain')[epoch]
remain = time_formatter(remain_last)
return delta, remain
def line_builder(metric_state: Dict, epoch, tdelta, tremain):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get('valid')
line = [f'{epoch}']
for key in train.keys():
line.append(f"{train[key]['mean'][-1]:.6f}")
line.append(f"{valid[key]['mean'][-1]:.6f}")
line.append(f'{tdelta}')
line.append(f'{tremain}')
if isnotebook():
return line
else:
return build_line_console(line)
def line_builder_resume(metric_state: Dict, epoch, tdelta, tremain):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get('valid')
line = [f'{epoch+1}']
for key in train.keys():
line.append(f"{train[key]['epoch'][epoch]:.6f}")
line.append(f"{valid[key]['epoch'][epoch]:.6f}")
line.append(f'{tdelta}')
line.append(f'{tremain}')
if isnotebook():
return line
else:
return build_line_console(line)
def line_head_builder(metric_state: Dict):
train: Dict = metric_state.get('train')
line = ['epoch']
for val in train.keys():
line.append(f'trn_{val}')
line.append(f'val_{val}')
line.append('time')
line.append('remain')
if isnotebook():
return line
else:
return build_line_console(line)
def graph_builder(metric_state: Dict, trainer_state: Dict):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get('valid')
epoch_curr = trainer_state.get('epoch')['curr']
train_loss = train.get('loss').get('epoch')
valid_loss = valid.get('loss').get('epoch')
if epoch_curr == 1:
x = [1]
else:
x = list(range(1, len(train_loss)+1))
graph = [[x, train_loss], [x, valid_loss]]
# print(graph)
return graph
def clean_up_metric_resume(metric_state: Dict, epoch_curr):
train: Dict = metric_state.get('train')
valid: Dict = metric_state.get("valid")
for key in train.keys():
# print("train epoch len", len(train[key]['epoch']))
# print(key, train[key]['epoch'])
if len(train[key]['epoch']) != epoch_curr-1:
train[key]['epoch'].pop()
# print("valid epoch len", len(valid[key]['epoch']))
# print(key, valid[key]['epoch'])
if len(valid[key]['epoch']) != epoch_curr-1:
valid[key]['epoch'].pop()
| [
"nunenuh@gmail.com"
] | nunenuh@gmail.com |
8a206f80ed23b3b2eb6fa5863c413c23799e9402 | 9c3c83007c5bf0f36635b0045b2aad7f8a11ac11 | /novice/03-05/microblog/venv/lib/python3.7/tarfile.py | 8a8432a6fb0ea5a29eea2e241474da8bf0b8c753 | [
"MIT"
] | permissive | septiannurtrir/praxis-academy | bc58f9484db36b36c202bf90fdfd359482b72770 | 1ef7f959c372ae991d74ccd373123142c2fbc542 | refs/heads/master | 2021-06-21T17:04:58.379408 | 2019-09-13T16:46:08 | 2019-09-13T16:46:08 | 203,007,994 | 1 | 0 | MIT | 2021-03-20T01:43:24 | 2019-08-18T13:38:23 | Python | UTF-8 | Python | false | false | 56 | py | /home/septiannurtrir/miniconda3/lib/python3.7/tarfile.py | [
"septiannurtrir@gmail.com"
] | septiannurtrir@gmail.com |
bbd0c5e6dfe3b1dd6ce23e3e5ea09fe588e6ecdc | 987a68b9c196f39ba1810a2261cd4a08c35416a3 | /BinarySearch/374-guess-number-higher-or-lower.py | 719ded9d3727476c6b598a21120e1847f0b62c51 | [] | no_license | xizhang77/LeetCode | c26e4699fbe1f2d2c4706b2e5ee82131be066ee5 | ce68f5af57f772185211f4e81952d0345a6d23cb | refs/heads/master | 2021-06-05T15:33:22.318833 | 2019-11-19T06:53:24 | 2019-11-19T06:53:24 | 135,076,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # -*- coding: utf-8 -*-
'''
We are playing the Guess Game. The game is as follows:
I pick a number from 1 to n. You have to guess which number I picked.
Every time you guess wrong, I'll tell you whether the number is higher or lower.
You call a pre-defined API guess(int num) which returns 3 possible results (-1, 1, or 0):
-1 : My number is lower
1 : My number is higher
0 : Congrats! You got it!
Example :
Input: n = 10, pick = 6
Output: 6
'''
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
lower, upper = 1, n
if guess( lower ) == 0:
return lower
if guess( upper ) == 0:
return upper
while True:
ans = (lower + upper)/2
if guess( ans ) == -1:
upper = min( upper, ans )
elif guess( ans ) == 1:
lower = max( lower, ans )
else:
return ans
| [
"xizhang1@cs.stonybrook.edu"
] | xizhang1@cs.stonybrook.edu |
459c64a151d5f14c2571ae8ddcda8396b1a73dee | 2c4648efe8c7e408b8c3a649b2eed8bb846446ec | /codewars/Python/8 kyu/BinToDecimal/bin_to_decimal_test.py | 0aae2d128c84e951df54be278457b2b6b1a82121 | [] | no_license | Adasumizox/ProgrammingChallenges | 9d79bd1b0ce4794b576124f9874aabb86d5c0713 | 3630fcde088d7991e344eb1b84805e9e756aa1a2 | refs/heads/master | 2021-07-16T08:16:57.538577 | 2020-07-19T19:58:28 | 2020-07-19T19:58:28 | 190,159,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from bin_to_decimal import bin_to_decimal
import unittest
class TestBinToDecimal(unittest.TestCase):
def test(self):
tests = (
("1", 1),
("0", 0),
("1001001", 73),
)
for t in tests:
inp, exp = t
self.assertEqual(bin_to_decimal(inp), exp)
def test_rand(self):
from random import randint
for _ in range(100):
n = randint(1, 5000000)
b = bin(n)[2:]
self.assertEqual(bin_to_decimal(b), n)
if __name__ == '__main__':
unittest.main() | [
"darkdan099@gmail.com"
] | darkdan099@gmail.com |
d3f248b1deb5b8422a2aa408ffe8902f430a6cb4 | bd7b1bad2eede510abba21f5faa3b34a001a1bb1 | /code/venv/lib/python3.7/site-packages/sklearn/compose/tests/test_column_transformer.py | d564aa097a63a3d644cad2d53b05fc49b10823f7 | [
"MIT"
] | permissive | zeroknowledgediscovery/zcad | 24514dc4442989927df45fe100ff547453c0c84a | 9ef5f0d294a4148a016b0534298adc527279d14a | refs/heads/master | 2023-08-31T18:23:58.897712 | 2023-08-27T17:50:32 | 2023-08-27T17:50:32 | 178,449,835 | 0 | 1 | MIT | 2022-10-15T10:42:45 | 2019-03-29T17:34:04 | Python | UTF-8 | Python | false | false | 39,210 | py | """
Test the ColumnTransformer.
"""
import numpy as np
from scipy import sparse
import pytest
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.base import BaseEstimator
from sklearn.externals import six
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.exceptions import NotFittedError, DataConversionWarning
from sklearn.preprocessing import StandardScaler, Normalizer, OneHotEncoder
from sklearn.feature_extraction import DictVectorizer
class Trans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# 1D Series -> 2D DataFrame
if hasattr(X, 'to_frame'):
return X.to_frame()
# 1D array -> 2D array
if X.ndim == 1:
return np.atleast_2d(X).T
return X
class DoubleTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
return 2*X
class SparseMatrixTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
n_samples = len(X)
return sparse.eye(n_samples, n_samples).tocsr()
class TransNo2D(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
class TransRaise(BaseEstimator):
def fit(self, X, y=None):
raise ValueError("specific message")
def transform(self, X, y=None):
raise ValueError("specific message")
def test_column_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first1D = np.array([0, 1, 2])
X_res_second1D = np.array([2, 4, 6])
X_res_first = X_res_first1D.reshape(-1, 1)
X_res_both = X_array
cases = [
# single column 1D / 2D
(0, X_res_first),
([0], X_res_first),
# list-like
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer([('trans', Trans(), lambda x: selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_res_first1D,
transformer_weights['trans2'] * X_res_second1D]).T
assert_array_equal(both.fit_transform(X_array), res)
assert_array_equal(both.fit(X_array).transform(X_array), res)
assert len(both.transformers_) == 2
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
assert len(both.transformers_) == 1
def test_column_transformer_dataframe():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_both = X_array
cases = [
# String keys: label based
# scalar
('first', X_res_first),
# list
(['first'], X_res_first),
(['first', 'second'], X_res_both),
# slice
(slice('first', 'second'), X_res_both),
# int keys: positional
# scalar
(0, X_res_first),
# list
([0], X_res_first),
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
(pd.Series([True, False], index=['first', 'second']), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer([('trans', Trans(), lambda X: selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
ct = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_df['first'],
transformer_weights['trans2'] * X_df['second']]).T
assert_array_equal(both.fit_transform(X_df), res)
assert_array_equal(both.fit(X_df).transform(X_df), res)
assert len(both.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# test multiple columns
both = ColumnTransformer([('trans', Trans(), ['first', 'second'])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
# ensure pandas object is passes through
class TransAssert(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert isinstance(X, (pd.DataFrame, pd.Series))
if isinstance(X, pd.Series):
X = X.to_frame()
return X
ct = ColumnTransformer([('trans', TransAssert(), 'first')],
remainder='drop')
ct.fit_transform(X_df)
ct = ColumnTransformer([('trans', TransAssert(), ['first', 'second'])])
ct.fit_transform(X_df)
# integer column spec + integer column names -> still use positional
X_df2 = X_df.copy()
X_df2.columns = [1, 0]
ct = ColumnTransformer([('trans', Trans(), 0)], remainder='drop')
assert_array_equal(ct.fit_transform(X_df), X_res_first)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'drop'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize("pandas", [True, False], ids=['pandas', 'numpy'])
@pytest.mark.parametrize("column", [[], np.array([False, False])],
ids=['list', 'bool'])
def test_column_transformer_empty_columns(pandas, column):
# test case that ensures that the column transformer does also work when
# a given transformer doesn't have any columns to work on
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
if pandas:
pd = pytest.importorskip('pandas')
X = pd.DataFrame(X_array, columns=['first', 'second'])
else:
X = X_array
ct = ColumnTransformer([('trans1', Trans(), [0, 1]),
('trans2', Trans(), column)])
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[1][1], Trans)
ct = ColumnTransformer([('trans1', Trans(), column),
('trans2', Trans(), [0, 1])])
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[0][1], Trans)
ct = ColumnTransformer([('trans', Trans(), column)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], Trans)
fixture = np.array([[], [], []])
ct = ColumnTransformer([('trans', Trans(), column)],
remainder='drop')
assert_array_equal(ct.fit_transform(X), fixture)
assert_array_equal(ct.fit(X).transform(X), fixture)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], Trans)
def test_column_transformer_sparse_array():
X_sparse = sparse.eye(3, 2).tocsr()
# no distinction between 1D and 2D
X_res_first = X_sparse[:, 0]
X_res_both = X_sparse
for col in [0, [0], slice(0, 1)]:
for remainder, res in [('drop', X_res_first),
('passthrough', X_res_both)]:
ct = ColumnTransformer([('trans', Trans(), col)],
remainder=remainder,
sparse_threshold=0.8)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
res)
for col in [[0, 1], slice(0, 2)]:
ct = ColumnTransformer([('trans', Trans(), col)],
sparse_threshold=0.8)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
X_res_both)
def test_column_transformer_list():
X_list = [
[1, float('nan'), 'a'],
[0, 0, 'b']
]
expected_result = np.array([
[1, float('nan'), 1, 0],
[-1, 0, 0, 1],
])
ct = ColumnTransformer([
('numerical', StandardScaler(), [0, 1]),
('categorical', OneHotEncoder(), [2]),
])
with pytest.warns(DataConversionWarning):
# TODO: this warning is not very useful in this case, would be good
# to get rid of it
assert_array_equal(ct.fit_transform(X_list), expected_result)
assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)
def test_column_transformer_sparse_stacking():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
col_trans = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', SparseMatrixTrans(), 1)],
sparse_threshold=0.8)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert sparse.issparse(X_trans)
assert_equal(X_trans.shape, (X_trans.shape[0], X_trans.shape[0] + 1))
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
assert len(col_trans.transformers_) == 2
assert col_trans.transformers_[-1][0] != 'remainder'
col_trans = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', SparseMatrixTrans(), 1)],
sparse_threshold=0.1)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert not sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
def test_column_transformer_mixed_cols_sparse():
df = np.array([['a', 1, True],
['b', 2, False]],
dtype='O')
ct = make_column_transformer(
(OneHotEncoder(), [0]),
('passthrough', [1, 2]),
sparse_threshold=1.0
)
# this shouldn't fail, since boolean can be coerced into a numeric
# See: https://github.com/scikit-learn/scikit-learn/issues/11912
X_trans = ct.fit_transform(df)
assert X_trans.getformat() == 'csr'
assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1],
[0, 1, 2, 0]]))
ct = make_column_transformer(
(OneHotEncoder(), [0]),
('passthrough', [0]),
sparse_threshold=1.0
)
with pytest.raises(ValueError,
match="For a sparse output, all columns should"):
# this fails since strings `a` and `b` cannot be
# coerced into a numeric.
ct.fit_transform(df)
def test_column_transformer_sparse_threshold():
X_array = np.array([['a', 'b'], ['A', 'B']], dtype=object).T
# above data has sparsity of 4 / 8 = 0.5
# apply threshold even if all sparse
col_trans = ColumnTransformer([('trans1', OneHotEncoder(), [0]),
('trans2', OneHotEncoder(), [1])],
sparse_threshold=0.2)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# mixed -> sparsity of (4 + 2) / 8 = 0.75
for thres in [0.75001, 1]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=True), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert sparse.issparse(res)
assert col_trans.sparse_output_
for thres in [0.75, 0]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=True), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# if nothing is sparse -> no sparse
for thres in [0.33, 0, 1]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=False), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
def test_column_transformer_error_msg_1D():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
col_trans = ColumnTransformer([('trans', StandardScaler(), 0)])
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit, X_array)
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit_transform, X_array)
col_trans = ColumnTransformer([('trans', TransRaise(), 0)])
for func in [col_trans.fit, col_trans.fit_transform]:
assert_raise_message(ValueError, "specific message", func, X_array)
def test_2D_transformer_output():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([('trans1', 'drop', 0),
('trans2', TransNo2D(), 1)])
assert_raise_message(ValueError, "the 'trans2' transformer should be 2D",
ct.fit_transform, X_array)
# because fit is also doing transform, this raises already on fit
assert_raise_message(ValueError, "the 'trans2' transformer should be 2D",
ct.fit, X_array)
def test_2D_transformer_output_pandas():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['col1', 'col2'])
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([('trans1', TransNo2D(), 'col1')])
assert_raise_message(ValueError, "the 'trans1' transformer should be 2D",
ct.fit_transform, X_df)
# because fit is also doing transform, this raises already on fit
assert_raise_message(ValueError, "the 'trans1' transformer should be 2D",
ct.fit, X_df)
@pytest.mark.parametrize("remainder", ['drop', 'passthrough'])
def test_column_transformer_invalid_columns(remainder):
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# general invalid
for col in [1.5, ['string', 1], slice(1, 's'), np.array([1.])]:
ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)
assert_raise_message(ValueError, "No valid specification",
ct.fit, X_array)
# invalid for arrays
for col in ['string', ['string', 'other'], slice('a', 'b')]:
ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)
assert_raise_message(ValueError, "Specifying the columns",
ct.fit, X_array)
def test_column_transformer_invalid_transformer():
class NoTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return X
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
ct = ColumnTransformer([('trans', NoTrans(), [0])])
assert_raise_message(TypeError, "All estimators should implement fit",
ct.fit, X_array)
def test_make_column_transformer():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer((scaler, 'first'), (norm, ['second']))
names, transformers, columns = zip(*ct.transformers)
assert_equal(names, ("standardscaler", "normalizer"))
assert_equal(transformers, (scaler, norm))
assert_equal(columns, ('first', ['second']))
# XXX remove in v0.22
with pytest.warns(DeprecationWarning,
match='`make_column_transformer` now expects'):
ct1 = make_column_transformer(([0], norm))
ct2 = make_column_transformer((norm, [0]))
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
assert_almost_equal(ct1.fit_transform(X_array),
ct2.fit_transform(X_array))
with pytest.warns(DeprecationWarning,
match='`make_column_transformer` now expects'):
make_column_transformer(('first', 'drop'))
with pytest.warns(DeprecationWarning,
match='`make_column_transformer` now expects'):
make_column_transformer(('passthrough', 'passthrough'),
('first', 'drop'))
def test_make_column_transformer_pandas():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
norm = Normalizer()
# XXX remove in v0.22
with pytest.warns(DeprecationWarning,
match='`make_column_transformer` now expects'):
ct1 = make_column_transformer((X_df.columns, norm))
ct2 = make_column_transformer((norm, X_df.columns))
assert_almost_equal(ct1.fit_transform(X_df),
ct2.fit_transform(X_df))
def test_make_column_transformer_kwargs():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer((scaler, 'first'), (norm, ['second']),
n_jobs=3, remainder='drop',
sparse_threshold=0.5)
assert_equal(ct.transformers, make_column_transformer(
(scaler, 'first'), (norm, ['second'])).transformers)
assert_equal(ct.n_jobs, 3)
assert_equal(ct.remainder, 'drop')
assert_equal(ct.sparse_threshold, 0.5)
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_column_transformer, (scaler, 'first'), (norm, ['second']),
transformer_weights={'pca': 10, 'Transf': 1}
)
def test_make_column_transformer_remainder_transformer():
scaler = StandardScaler()
norm = Normalizer()
remainder = StandardScaler()
ct = make_column_transformer((scaler, 'first'), (norm, ['second']),
remainder=remainder)
assert ct.remainder == remainder
def test_column_transformer_get_set_params():
ct = ColumnTransformer([('trans1', StandardScaler(), [0]),
('trans2', StandardScaler(), [1])])
exp = {'n_jobs': None,
'remainder': 'drop',
'sparse_threshold': 0.3,
'trans1': ct.transformers[0][1],
'trans1__copy': True,
'trans1__with_mean': True,
'trans1__with_std': True,
'trans2': ct.transformers[1][1],
'trans2__copy': True,
'trans2__with_mean': True,
'trans2__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None}
assert_dict_equal(ct.get_params(), exp)
ct.set_params(trans1__with_mean=False)
assert_false(ct.get_params()['trans1__with_mean'])
ct.set_params(trans1='passthrough')
exp = {'n_jobs': None,
'remainder': 'drop',
'sparse_threshold': 0.3,
'trans1': 'passthrough',
'trans2': ct.transformers[1][1],
'trans2__copy': True,
'trans2__with_mean': True,
'trans2__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None}
assert_dict_equal(ct.get_params(), exp)
def test_column_transformer_named_estimators():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans1', StandardScaler(), [0]),
('trans2', StandardScaler(with_std=False), [1])])
assert_false(hasattr(ct, 'transformers_'))
ct.fit(X_array)
assert hasattr(ct, 'transformers_')
assert isinstance(ct.named_transformers_['trans1'], StandardScaler)
assert isinstance(ct.named_transformers_.trans1, StandardScaler)
assert isinstance(ct.named_transformers_['trans2'], StandardScaler)
assert isinstance(ct.named_transformers_.trans2, StandardScaler)
assert_false(ct.named_transformers_.trans2.with_std)
# check it are fitted transformers
assert_equal(ct.named_transformers_.trans1.mean_, 1.)
def test_column_transformer_cloning():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit(X_array)
assert_false(hasattr(ct.transformers[0][1], 'mean_'))
assert hasattr(ct.transformers_[0][1], 'mean_')
ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit_transform(X_array)
assert_false(hasattr(ct.transformers[0][1], 'mean_'))
assert hasattr(ct.transformers_[0][1], 'mean_')
def test_column_transformer_get_feature_names():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans', Trans(), [0, 1])])
# raise correct error when not fitted
assert_raises(NotFittedError, ct.get_feature_names)
# raise correct error when no feature names are available
ct.fit(X_array)
assert_raise_message(AttributeError,
"Transformer trans (type Trans) does not provide "
"get_feature_names", ct.get_feature_names)
# working example
X = np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}],
[{'c': 5}, {'c': 6}]], dtype=object).T
ct = ColumnTransformer(
[('col' + str(i), DictVectorizer(), i) for i in range(2)])
ct.fit(X)
assert_equal(ct.get_feature_names(), ['col0__a', 'col0__b', 'col1__c'])
# passthrough transformers not supported
ct = ColumnTransformer([('trans', 'passthrough', [0, 1])])
ct.fit(X)
assert_raise_message(
NotImplementedError, 'get_feature_names is not yet supported',
ct.get_feature_names)
ct = ColumnTransformer([('trans', DictVectorizer(), 0)],
remainder='passthrough')
ct.fit(X)
assert_raise_message(
NotImplementedError, 'get_feature_names is not yet supported',
ct.get_feature_names)
# drop transformer
ct = ColumnTransformer(
[('col0', DictVectorizer(), 0), ('col1', 'drop', 1)])
ct.fit(X)
assert_equal(ct.get_feature_names(), ['col0__a', 'col0__b'])
def test_column_transformer_special_strings():
# one 'drop' -> ignore
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', 'drop', [1])])
exp = np.array([[0.], [1.], [2.]])
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# all 'drop' -> return shape 0 array
ct = ColumnTransformer(
[('trans1', 'drop', [0]), ('trans2', 'drop', [1])])
assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# 'passthrough'
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', 'passthrough', [1])])
exp = X_array
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# None itself / other string is not valid
for val in [None, 'other']:
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', None, [1])])
assert_raise_message(TypeError, "All estimators should implement",
ct.fit_transform, X_array)
assert_raise_message(TypeError, "All estimators should implement",
ct.fit, X_array)
def test_column_transformer_remainder():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
X_res_both = X_array
# default drop
ct = ColumnTransformer([('trans1', Trans(), [0])])
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'drop'
assert_array_equal(ct.transformers_[-1][2], [1])
# specify passthrough
ct = ColumnTransformer([('trans', Trans(), [0])], remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
# column order is not preserved (passed through added to end)
ct = ColumnTransformer([('trans1', Trans(), [1])],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [0])
# passthrough when all actual transformers are skipped
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_second)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
# error on invalid arg
ct = ColumnTransformer([('trans1', Trans(), [0])], remainder=1)
assert_raise_message(
ValueError,
"remainder keyword needs to be one of \'drop\', \'passthrough\', "
"or estimator.", ct.fit, X_array)
assert_raise_message(
ValueError,
"remainder keyword needs to be one of \'drop\', \'passthrough\', "
"or estimator.", ct.fit_transform, X_array)
# check default for make_column_transformer
ct = make_column_transformer((Trans(), [0]))
assert ct.remainder == 'drop'
@pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1),
np.array([True, False])])
def test_column_transformer_remainder_numpy(key):
# test different ways that columns are specified with passthrough
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize(
"key", [[0], slice(0, 1), np.array([True, False]), ['first'], 'pd-index',
np.array(['first']), np.array(['first'], dtype=object),
slice(None, 'first'), slice('first', 'first')])
def test_column_transformer_remainder_pandas(key):
# test different ways that columns are specified with passthrough
pd = pytest.importorskip('pandas')
if isinstance(key, six.string_types) and key == 'pd-index':
key = pd.Index(['first'])
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
X_res_both = X_array
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1),
np.array([True, False, False])])
def test_column_transformer_remainder_transformer(key):
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
X_res_both = X_array.copy()
# second and third columns are doubled when remainder = DoubleTrans
X_res_both[:, 1:3] *= 2
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_no_remaining_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', Trans(), [0, 1, 2])],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_array)
assert_array_equal(ct.fit(X_array).transform(X_array), X_array)
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
def test_column_transformer_drops_all_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
# columns are doubled when remainder = DoubleTrans
X_res_both = 2 * X_array.copy()[:, 1:3]
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_sparse_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', Trans(), [0])],
remainder=SparseMatrixTrans(),
sparse_threshold=0.8)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column. There is
# one column in ``transformers``, thus:
assert X_trans.shape == (3, 3 + 1)
exp_array = np.hstack(
(X_array[:, 0].reshape(-1, 1), np.eye(3)))
assert_array_equal(X_trans.toarray(), exp_array)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_drop_all_sparse_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder=SparseMatrixTrans(),
sparse_threshold=0.8)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column, thus:
assert X_trans.shape == (3, 3)
assert_array_equal(X_trans.toarray(), np.eye(3))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_get_set_params_with_remainder():
ct = ColumnTransformer([('trans1', StandardScaler(), [0])],
remainder=StandardScaler())
exp = {'n_jobs': None,
'remainder': ct.remainder,
'remainder__copy': True,
'remainder__with_mean': True,
'remainder__with_std': True,
'sparse_threshold': 0.3,
'trans1': ct.transformers[0][1],
'trans1__copy': True,
'trans1__with_mean': True,
'trans1__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None}
assert ct.get_params() == exp
ct.set_params(remainder__with_std=False)
assert not ct.get_params()['remainder__with_std']
ct.set_params(trans1='passthrough')
exp = {'n_jobs': None,
'remainder': ct.remainder,
'remainder__copy': True,
'remainder__with_mean': True,
'remainder__with_std': False,
'sparse_threshold': 0.3,
'trans1': 'passthrough',
'transformers': ct.transformers,
'transformer_weights': None}
assert ct.get_params() == exp
def test_column_transformer_no_estimators():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).astype('float').T
ct = ColumnTransformer([], remainder=StandardScaler())
params = ct.get_params()
assert params['remainder__with_mean']
X_trans = ct.fit_transform(X_array)
assert X_trans.shape == X_array.shape
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][2] == [0, 1, 2]
def test_column_transformer_no_estimators_set_params():
ct = ColumnTransformer([]).set_params(n_jobs=2)
assert ct.n_jobs == 2
def test_column_transformer_callable_specifier():
# assert that function gets the full array / dataframe
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([[0, 1, 2]]).T
def func(X):
assert_array_equal(X, X_array)
return [0]
ct = ColumnTransformer([('trans', Trans(), func)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
assert callable(ct.transformers[0][2])
assert ct.transformers_[0][2] == [0]
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
def func(X):
assert_array_equal(X.columns, X_df.columns)
assert_array_equal(X.values, X_df.values)
return ['first']
ct = ColumnTransformer([('trans', Trans(), func)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), X_res_first)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
assert callable(ct.transformers[0][2])
assert ct.transformers_[0][2] == ['first']
| [
"jinli7255@gmail.com"
] | jinli7255@gmail.com |
3ef84fc59f17834ac7d0fd369bd367bc09009366 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_knighted.py | 53b6a3c8b9792513c95ece677355011f50817313 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.nouns._knight import _KNIGHT
#calss header
class _KNIGHTED(_KNIGHT, ):
def __init__(self,):
_KNIGHT.__init__(self)
self.name = "KNIGHTED"
self.specie = 'nouns'
self.basic = "knight"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.