blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1b02f8633cfc167c881db6c90101e38b82fd0a1
|
48f6b50c7765d427db95cf5c643240f00d37ee28
|
/docs/conf.py
|
f18ef64f5ac74de0118839f484da396e544f4b3d
|
[
"BSD-2-Clause"
] |
permissive
|
PlayerYrcHen/cdlib
|
943bdbf2a26cc80a6f567268a958ffb12a140c97
|
bc2b5acf81fde1aef5bd9f9a24299cd8eb10b2d4
|
refs/heads/master
| 2023-03-12T15:19:59.028367
| 2021-03-04T12:48:35
| 2021-03-04T12:48:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,694
|
py
|
# -*- coding: utf-8 -*-
#
# CDlib documentation build configuration file, created by
# sphinx-quickstart on Wed May 24 10:59:33 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock as MagicMock
import sphinx_rtd_theme
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['ASLPAw_package', 'ipaddress', 'ASLPAw', 'graph-tool', 'leidenalg', 'numpy', 'scipy', 'networkx', 'karateclub', 'bimlpa', 'sklearn', 'pquality', 'functools', 'nf1',
'ipython', 'pygtk', 'gtk', 'gobject', 'argparse', 'matplotlib', 'matplotlib.pyplot', 'scikit-learn',
'python-igraph', 'wurlitzer', 'pulp', 'seaborn', 'pandas', 'infomap', 'angel-cd', 'omega_index_py3', 'markov_clustering', 'chinese_whispers',
'scipy.sparse']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'collapse_navigation': False,
'display_version': False,
'navigation_depth': 3
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.7.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#autodoc_default_options = {
# 'autosummary': True,
#}
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CDlib'
copyright = u'2019, Giulio Rossetti'
author = u'Giulio Rossetti'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.0'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = ['cdlib.algorithms']
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
html_title = u'CDlib - Community Discovery library'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = u'CDlib'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'cdlib_new.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {'css_files': ['_static/css/custom.css']}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is ensemble.
# html_domain_indices = True
# If false, no index is ensemble.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CDlibdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CDlib.tex', u'CDlib Documentation', u'Giulio Rossetti', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is ensemble.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'CDlib', u'CDlib Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CDlib', u'CDlib Documentation',
author, 'CDlib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is ensemble.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is ensemble.
# epub_use_index = True
autosummary_generate = True
|
[
"giulio.rossetti@gmail.com"
] |
giulio.rossetti@gmail.com
|
e44657d373b7abb167b5cd7ba6bcc10e855de8e4
|
8260dfeb7af3480fe164acc1959af16561d6e85b
|
/workgroup/Addons/gear/Application/Plugins/gear_curveTools.py
|
7a9621414630d7c2c35a4a366f086a7953e30288
|
[] |
no_license
|
jpasserin/gear
|
78e9c93f08d1696a416d2659f1ce29a0fcbfca04
|
9729a7ced44bc1b0e903b64a256af0b6071a000a
|
refs/heads/master
| 2016-09-08T02:01:11.547362
| 2011-07-17T11:01:48
| 2011-07-17T11:01:48
| 2,061,266
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,419
|
py
|
'''
This file is part of GEAR.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com
Url : http://gear.jeremiepasserin.com
Date: 2010 / 11 / 15
'''
## @package gear_curveTools.py
# @author Jeremie Passerin
#
##########################################################
# GLOBAL
##########################################################
import gear
from gear.xsi import xsi, c, dynDispatch
import gear.xsi.primitive as pri
import gear.xsi.curve as cur
import gear.xsi.fcurve as fcv
import gear.xsi.uitoolkit as uit
import gear.xsi.applyop as aop
##########################################################
# XSI LOAD / UNLOAD PLUGIN
##########################################################
# ========================================================
def XSILoadPlugin(in_reg):
in_reg.Author = "Jeremie Passerin"
in_reg.Name = "gear_curveTools"
in_reg.Email = "geerem@hotmail.com"
in_reg.URL = "http://www.jeremiepasserin.com"
in_reg.Major = 1
in_reg.Minor = 0
# Commands
in_reg.RegisterCommand("gear_CurveResampler","gear_CurveResampler")
in_reg.RegisterCommand("gear_ApplyZipperOp","gear_ApplyZipperOp")
in_reg.RegisterCommand("gear_DrawCnsCurve_Linear","gear_DrawCnsCurve_Linear")
in_reg.RegisterCommand("gear_DrawCnsCurve_Cubic","gear_DrawCnsCurve_Cubic")
in_reg.RegisterCommand("gear_MergeCurves","gear_MergeCurves")
in_reg.RegisterCommand("gear_SplitCurves","gear_SplitCurves")
# Operators
in_reg.RegisterOperator("gear_ZipperOp")
return True
# ========================================================
def XSIUnloadPlugin(in_reg):
strPluginName = in_reg.Name
gear.log(str(strPluginName) + str(" has been unloaded."), c.siVerbose)
return True
##########################################################
# CURVE RESAMPLER
##########################################################
# Execute ================================================
def gear_CurveResampler_Execute():
if not xsi.Selection.Count or xsi.Selection(0).Type not in ["crvlist"]:
gear.log("No selection or invalid Selection", gear.sev_error)
return
curve = xsi.Selection(0)
if curve.ActivePrimitive.Geometry.Curves.Count > 1:
gear.log("Curve Resampler works only with single curve", gear.sev_error)
return
ref_crv = uit.pickSession(c.siCurveFilter, "Pick Reference Curve", False)
if not ref_crv:
ref_crv = curve
op = aop.gear_resampler_op(curve, ref_crv, 0, 1)
xsi.InspectObj(op)
##########################################################
# DRAW CONSTRAINED CURVE LINEAR
##########################################################
# Execute ================================================
def gear_DrawCnsCurve_Linear_Execute():
if xsi.Selection.Count < 2:
gear.log("Select enough centers", gear.sev_error)
return
cur.addCnsCurve(xsi.ActiveSceneRoot, "crvCns", xsi.Selection, False, 1)
##########################################################
# DRAW CONSTRAINED CURVE CUBIC
##########################################################
# Execute ================================================
def gear_DrawCnsCurve_Cubic_Execute():
if xsi.Selection.Count < 2:
gear.log("Select enough centers", gear.sev_error)
return
cur.addCnsCurve(xsi.ActiveSceneRoot, "crvCns", xsi.Selection, False, 3)
##########################################################
# MERGE CURVES
##########################################################
# Execute ================================================
def gear_MergeCurves_Execute():
if not xsi.Selection.Count:
gear.log("No selection", gear.sev_error)
return
curves = [curve for curve in xsi.Selection if curve.Type in ["crvlist"]]
if not curves:
gear.log("Invalid selection", gear.sev_error)
return
cur.mergeCurves(curves)
##########################################################
# SPLIT CURVES
##########################################################
# Execute ================================================
def gear_SplitCurves_Execute():
if not xsi.Selection.Count:
gear.log("No selection", gear.sev_error)
return
for curve in xsi.Selection:
if curve.Type not in ["crvlist"]:
gear.log("Invalid selection", gear.sev_warning)
continue
cur.splitCurve(curve)
##########################################################
# ZIPPER OP
##########################################################
# Define =================================================
def gear_ZipperOp_Define(ctxt):
op = ctxt.Source
op.AlwaysEvaluate = False
op.Debug = 0
pdef = XSIFactory.CreateParamDef("Zip", c.siDouble, 0, c.siPersistable|c.siAnimatable, "", "",0,0,10,0,1)
op.AddParameter(pdef)
pdef = XSIFactory.CreateParamDef("Bias", c.siDouble, 0, c.siPersistable|c.siAnimatable, "", "",0.5,0,1,0,1)
op.AddParameter(pdef)
# pdef = XSIFactory.CreateParamDef("Smooth", c.siDouble, 0, c.siPersistable|c.siAnimatable, "", "",0,0,1,0,1)
# op.AddParameter(pdef)
pdef = XSIFactory.CreateParamDef("Type", c.siDouble, 0, c.siPersistable|c.siAnimatable, "", "",0,0,1,0,1)
op.AddParameter(pdef)
pdef = XSIFactory.CreateParamDef("CurveCombo", c.siInt4, 0, c.siPersistable, "", "",0,0,1,0,1)
op.AddParameter(pdef)
pdef = XSIFactory.CreateFCurveParamDef("Start_FCurve")
op.AddParameter(pdef)
pdef = XSIFactory.CreateFCurveParamDef("Speed_FCurve")
op.AddParameter(pdef)
return True
# Layout =================================================
def gear_ZipperOp_OnInit():
type_items = ["Points", 0, "Percentage", 1]
curve_items = ["Start", 0, "Speed", 1]
layout = PPG.PPGLayout
layout.Clear()
layout.AddGroup("Zip")
layout.AddItem("Mute", "Mute")
layout.AddItem("Zip", "Zip")
layout.EndGroup()
layout.AddGroup("Options")
layout.AddEnumControl("Type", type_items, "Type", c.siControlCombo)
layout.AddItem("Bias")
layout.EndGroup()
layout.AddGroup("Profile")
item = layout.AddEnumControl("CurveCombo", curve_items, "Type", c.siControlCombo)
item.SetAttribute(c.siUINoLabel, True)
if PPG.CurveCombo.Value == 0:
item = layout.AddFCurve("Start_FCurve")
item.SetAttribute(c.siUIFCurveLabelX, "Points")
item.SetAttribute(c.siUIFCurveLabelY, "Start")
item.SetAttribute(c.siUIFCurveViewMinX,-.1)
item.SetAttribute(c.siUIFCurveViewMaxX,1.1)
item.SetAttribute(c.siUIFCurveViewMinY,-.1)
item.SetAttribute(c.siUIFCurveViewMaxY,1.1)
item.SetAttribute(c.siUIFCurveGridSpaceX, .1)
item.SetAttribute(c.siUIFCurveGridSpaceY, .1)
else:
item = layout.AddFCurve("Speed_FCurve")
item.SetAttribute(c.siUIFCurveLabelX, "Points")
item.SetAttribute(c.siUIFCurveLabelY, "Speed")
item.SetAttribute(c.siUIFCurveViewMinX,-.1)
item.SetAttribute(c.siUIFCurveViewMaxX,2.1)
item.SetAttribute(c.siUIFCurveViewMinY,-.1)
item.SetAttribute(c.siUIFCurveViewMaxY,1.1)
item.SetAttribute(c.siUIFCurveGridSpaceX, .1)
item.SetAttribute(c.siUIFCurveGridSpaceY, .1)
layout.EndGroup()
PPG.Refresh()
return True
def gear_ZipperOp_CurveCombo_OnChanged():
gear_ZipperOp_OnInit()
# Update =================================================
def gear_ZipperOp_Update(ctxt):
# Inputs -----------------------------------------------
OutPort = ctxt.OutputPort
crv_geo_A = ctxt.GetInputValue(0, 0, 0).Geometry
ncrv_A = crv_geo_A.Curves(0)
crv_geo_B = ctxt.GetInputValue(1, 0, 0).Geometry
ncrv_B = crv_geo_B.Curves(0)
zip = ctxt.GetParameterValue("Zip")
bias = ctxt.GetParameterValue("Bias")
zip_type = ctxt.GetParameterValue("Type")
start_fcv = ctxt.GetParameterValue("Start_FCurve")
speed_fcv = ctxt.GetParameterValue("Speed_FCurve")
pnt_count_A = crv_geo_A.Points.Count
pnt_count_B = crv_geo_B.Points.Count
pos_tuple_A = crv_geo_A.Points.PositionArray
pos_tuple_B = crv_geo_B.Points.PositionArray
if zip_type == 0:
pos_A = [pos_tuple_A[j][i] for i in range(len(pos_tuple_A[0])) for j in range(len(pos_tuple_A))]
pos_B = [pos_tuple_B[j][i] for i in range(len(pos_tuple_B[0])) for j in range(len(pos_tuple_B))]
else:
step = 100 / (pnt_count_B-1.0)
a = [ncrv_A.EvaluatePositionFromPercentage(i*step)[0].Get2() for i in range(pnt_count_B)]
pos_A = [ a[j][i] for j in range(len(a)) for i in range(len(a[0])) ]
step = 100 / (pnt_count_A-1.0)
a = [ncrv_B.EvaluatePositionFromPercentage(i*step)[0].Get2() for i in range(pnt_count_A)]
pos_B = [ a[j][i] for j in range(len(a)) for i in range(len(a[0])) ]
mid_pos = [(pos_A[i]*bias+pos_B[i]*(1-bias)) for i in range(len(pos_A))]
# Process -----------------------------------------------
if OutPort.Index == 2:
t = pos_tuple_A
p = pnt_count_A
else:
t = pos_tuple_B
p = pnt_count_B
pos = []
v = XSIMath.CreateVector3()
for i in range(p):
step = 1/(p-1.0)
v0 = XSIMath.CreateVector3(t[0][i], t[1][i], t[2][i])
v1 = XSIMath.CreateVector3(mid_pos[i*3+0], mid_pos[i*3+1], mid_pos[i*3+2])
v.Sub(v1, v0)
d = start_fcv.Eval(i*step)
if zip < d:
y = 0
else:
y = changerange(zip, d, d+step, 0, 1)
y = speed_fcv.Eval(y)
v.ScaleInPlace(y)
pos.append(v0.X + v.X)
pos.append(v0.Y + v.Y)
pos.append(v0.Z + v.Z)
# Output ------------------------------------------------
Out = ctxt.OutputTarget
Out.Geometry.Points.PositionArray = pos
def changerange(x, a, b, c, d):
return c + ( x - a ) * (( d-c) / (b-a+0.0))
# Execute ================================================
def gear_ApplyZipperOp_Execute():
if xsi.Selection.Count < 2:
gear.log("Select 2 curve", gear.sev_error)
return
crv_A = xsi.Selection(0)
crv_B = xsi.Selection(1)
if crv_A.Type not in ["crvlist"] or crv_B.Type not in ["crvlist"]:
gear.log("Select 2 curve", gear.sev_error)
return
# Apply Operator ----------------------
op = XSIFactory.CreateObject("gear_ZipperOp")
op.AddIOPort(crv_A.ActivePrimitive)
op.AddIOPort(crv_B.ActivePrimitive)
pStart_fcv = op.Parameters("Start_FCurve").Value
fcv.drawFCurve(pStart_fcv, [[0,0],[1,1]], c.siLinearKeyInterpolation)
pSpeed_fcv = op.Parameters("Speed_FCurve").Value
fcv.drawFCurve(pStart_fcv, [[0,0],[1,1]], c.siLinearKeyInterpolation)
op.Connect()
xsi.InspectObj(op)
return op
|
[
"geerem@hotmail.com"
] |
geerem@hotmail.com
|
3cfbf21556fcfdb61b05267fd0a89e6829891eaf
|
3b1b5a931e44566c97c6e4661d6c1c1d4ff1e9f5
|
/8queen.py
|
dfadd235e5487d174297d13b870cabf4e8467540
|
[] |
no_license
|
Mahnoor507/8-Queen-Problem
|
b32530291cc1a9422839e4d2c99ecf036acd4777
|
88a09e91b60242b6e4878cb24a30ec1fba9d702c
|
refs/heads/main
| 2023-02-20T00:22:01.167763
| 2021-01-16T11:34:24
| 2021-01-16T11:34:24
| 330,147,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,467
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[456]:
from random import randint
import numpy as np
for j in range(8):
k=randint(0,7)
cl1[j]=k
for j in range(8):
k=randint(0,7)
cl2[j]=k
for j in range(8):
k=randint(0,7)
cl3[j]=k
for j in range(8):
k=randint(0,7)
cl4[j]=k
#defining intial population of 4
#l containing 4 boards ... initial population
print(cl1,cl2,cl3,cl4)
def fitnessfun(queen):
count =0
i=0
#creating a board
new=np.zeros((8,8),dtype=int)
#placing queens on that bord
for x in range(8):
y=int(queen[x])
new[y,x]=1
for col in range(8):
verticalchk=0;
horizontalchk=0;
fdiagonal=0;
bdiagonal=0;
row=queen[col];
#horizontally checking queens
for c in range(8):
if(new[int(row),int(c)] == 1 and c != col):
horizontalchk=1
#vertically checking queens
for r in range(8):
if(new[r,col] == 1 and r != row):
verticalchk=1
#forward diagonal queens
frow = int(row)
fcol = int(col)
while (frow > 0 and fcol > 0):
frow = frow - 1
fcol = fcol - 1
while (frow < 8 and fcol < 8):
if(new[frow,fcol] == 1 and frow != row and fcol != col):
fdiagonal=1
frow = frow + 1
fcol = fcol + 1
#backward diagonal queens
brow = int(row)
bcol = int(col)
while (brow > 0 and bcol < 7):
brow = brow - 1
bcol = bcol + 1
while (brow < 8 and bcol >= 0):
if(new[brow,bcol] == 1 and brow != row and bcol != col):
bdiagonal=1
brow = brow + 1
bcol = bcol - 1
if horizontalchk!=1 and verticalchk!=1 and fdiagonal!=1 and bdiagonal!=1 :
count=count+1
#total no of fitness queens or fitness val of a board
return count;
#for counting iterations
count=0
#list for holding boards
blue=[cl1,cl2,cl3,cl4]
print("Finding best possible solution")
flag=False;
while flag!=True:
#array for holding fitness valuess
green=np.array([0,0,0,0])
y=0;
for x in blue:
green[y]=fitnessfun(x)
y=y+1
#array for holding sorted fitness values
red=np.array([0,0,0,0])
t=green.copy()
red=t
red.sort()
#success yayyy found solution
if 8 in green:
print("yayyyy solutionnnnnnnnnnnnnn")
for i in range(4):
k=green[i]
if(k==8):
print("Required solution is: ")
print(blue[i])
print("Total no of iterations: ")
print(count)
hen=np.zeros((8,8),dtype=int)
var=0
for x in blue[i]:
hen[x,var]=1
var=var+1
print("Board look like this")
print(hen)
flag=True
break;
#re arranging boardss
for i in range(4):
j=red[i]
for b in range(4):
h=green[b]
if(h==j):
temp[i]=blue[b]
blue=temp
#new arrays for working
apple=mango=np.zeros(8,dtype=int)
grapes=np.zeros(8,dtype=int)
peach=np.zeros(8,dtype=int)
#apple mango are parents best ones that kept
apple=blue[3];
mango=blue[2];
#crossover for new child
for x in range(4):
grapes[x]=apple[x]
peach[x]=mango[x]
for x in range(4,8):
grapes[x]=mango[x]
peach[x]=apple[x]
#mutation
ran1=randint(0,7)
while True:
rn=randint(0,7)
if grapes[ran1]!=rn:
grapes[ran1]=rn
break;
ran2=random.randint(0,7)
while True:
rn=random.randint(0,7)
if peach[ran2]!=rn:
peach[ran2]=rn
break;
blue=[apple,mango,grapes,peach]
count=count+1
if(count==10000):
print("best possible solution in 10000 iterationss is: ")
print(blue[3])
print("The fitness is: ")
print(red[3])
hen=np.zeros((8,8),dtype=int)
var=0
for x in blue[3]:
hen[x,var]=1
var=var+1
print("Board look like this")
print(hen)
flag=True;
break;
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"noreply@github.com"
] |
Mahnoor507.noreply@github.com
|
d19c3c52e2d9e87f01762f3ff3c0cbad3fdf54ad
|
505b9e87b297db12ad805979dd4455f4dbe0a38a
|
/techtrends/app.py
|
e512abcd87fd7d748c205cb6c823d17dec0a1c00
|
[] |
no_license
|
sahil8060/TechTrends
|
0f18f878f0a5078892669d2bc9b996c61865cb13
|
666ff9e4670dc854508f27c8907d1d785dde4759
|
refs/heads/main
| 2023-03-05T11:11:39.785775
| 2021-02-18T09:14:13
| 2021-02-18T09:14:13
| 333,056,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,619
|
py
|
import sqlite3
from flask import Flask, jsonify, json, render_template, request, url_for, redirect, flash
from werkzeug.exceptions import abort
import logging
# Function to get a database connection.
# This function connects to database with the name `database.db`
db_file = "database.db"
number_of_connections = 0
def get_db_connection():
connection = sqlite3.connect('database.db')
connection.row_factory = sqlite3.Row
return connection
# Function to get a post using its ID
def get_post(post_id):
connection = get_db_connection()
post = connection.execute('SELECT * FROM posts WHERE id = ?',
(post_id,)).fetchone()
connection.close()
return post
def get_number_of_posts():
connection = get_db_connection()
post = connection.execute('SELECT COUNT(*) FROM posts').fetchall()
connection.close()
return post
# Define the Flask application
app = Flask(__name__)
app.config['SECRET_KEY'] = 'your secret key'
# Define the main route of the web application
@app.route('/')
def index():
global number_of_connections
number_of_connections += 1
connection = get_db_connection()
posts = connection.execute('SELECT * FROM posts').fetchall()
connection.close()
return render_template('index.html', posts=posts)
# Define how each individual article is rendered
# If the post ID is not found a 404 page is shown
@app.route('/<int:post_id>')
def post(post_id):
global number_of_connections
number_of_connections += 1
post = get_post(post_id)
if post is None:
app.logger.info("A non-existing article is accessed")
return render_template('404.html'), 404
else:
app.logger.info('Article "%s" retrieved!', post[2])
return render_template('post.html', post=post)
# Define the About Us page
@app.route('/about')
def about():
app.logger.info('The "About Us" page is retrieved')
return render_template('about.html')
# Define the post creation functionality
@app.route('/create', methods=('GET', 'POST'))
def create():
if request.method == 'POST':
title = request.form['title']
content = request.form['content']
if not title:
app.logger.info('Tentative to create an article without a title')
flash('Title is required!')
else:
connection = get_db_connection()
connection.execute('INSERT INTO posts (title, content) VALUES (?, ?)',
(title, content))
connection.commit()
connection.close()
global number_of_connections
number_of_connections += 1
app.logger.info('A new article "%s" is created ', title)
return redirect(url_for('index'))
return render_template('create.html')
@app.route('/healthz')
def healthcheck():
response = app.response_class(
response=json.dumps({"result":"OK - healthy"}),
status=200,
mimetype='application/json'
)
app.logger.info('Health request successfull')
return response
@app.route('/metrics')
def metrics():
posts = list(map(tuple, get_number_of_posts()))
dict_data = {"db_connection_count": str(number_of_connections), "post_count": str(posts[0][0])}
response = app.response_class(
response=json.dumps(dict_data),
status=200,
mimetype='application/json'
)
app.logger.info('Metrics request successfull')
return response
# start the application on port 3111
if __name__ == "__main__":
app.run(host='0.0.0.0', port='3111', debug=True)
|
[
"sahilgoyal8060@gmail.com"
] |
sahilgoyal8060@gmail.com
|
1c48c3df7db99ebc81d5a25661d1e37ddb9d6d58
|
a1b5036b5397fccd537bf492b83f52cede2ae254
|
/assistenza/migrations/0009_auto_20210222_1044.py
|
27a2467c2520bd782bf93adcc262ee095b0b3a47
|
[] |
no_license
|
mat0ccdeekk/biomedicalservice
|
a78160902d5698724620b35b7d5a7f56f6cbd935
|
2e8a1306ab01460cff26a8bbdc20eeb8f8ce146a
|
refs/heads/main
| 2023-03-08T15:58:42.054744
| 2021-02-26T10:16:36
| 2021-02-26T10:16:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
# Generated by Django 2.0 on 2021-02-22 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assistenza', '0008_verifica_tornaindietro'),
]
operations = [
migrations.AddField(
model_name='verifica',
name='ceiMultiple',
field=models.CharField(blank=True, default='C.E.I.', max_length=20, null=True),
),
migrations.AlterField(
model_name='verifica',
name='cei',
field=models.ManyToManyField(blank=True, related_name='has_verifica', to='assistenza.normativaCodice', verbose_name='CEI'),
),
migrations.AlterField(
model_name='verifica',
name='noteFinali',
field=models.CharField(blank=True, max_length=300, null=True, verbose_name='Note finali'),
),
migrations.AlterField(
model_name='verifica',
name='noteIniziali',
field=models.CharField(blank=True, max_length=300, null=True, verbose_name='Note iniziali'),
),
migrations.AlterField(
model_name='verifica',
name='verificaElettrica',
field=models.PositiveIntegerField(blank=True, default='0', null=True, verbose_name='Verifica elettrica'),
),
migrations.AlterField(
model_name='verifica',
name='verificaFunzionale',
field=models.PositiveIntegerField(blank=True, default='0', null=True, verbose_name='Verifica funzionale'),
),
]
|
[
"mattia.lavecchia@gmail.com"
] |
mattia.lavecchia@gmail.com
|
db2f1df0ecb4adf327b4598cd6d38a501510a22f
|
d44b82d6477f965509a537db45c2d06215169724
|
/tests/files/isort/permuted_imports/mine.py
|
faab7f66854264eddf2242fd7b46a6c700ac1aff
|
[] |
no_license
|
reef-technologies/reefmerge
|
caaf4a1c70cb51c86ed97bd80f825afeafb749a5
|
19a634627283a52ae5f1fc9528adbc2bd63cab35
|
refs/heads/master
| 2021-03-24T12:39:05.733618
| 2018-01-27T12:50:12
| 2018-01-27T12:50:12
| 100,640,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
import a
import b
import c
import d
|
[
"grzegorz.konefal@reef.pl"
] |
grzegorz.konefal@reef.pl
|
97ba5f2f05e3c6e16b08094510599f2617d5f3cb
|
7c611e76eb9f1fe527365c11fc4ca4dcbfa3391b
|
/tests/project/polls/tests.py
|
81fa971bc9dc8accfc13c5cffbfa331b847bf714
|
[] |
no_license
|
Scott-Wheeler/django-scenic
|
41b911039cf02a9f3bfb4513699c0bc23c913876
|
74465c7df68a535b6b9dfd35f0af06ea21e1c9e8
|
refs/heads/master
| 2020-04-02T01:04:53.308960
| 2016-01-10T17:26:18
| 2016-01-10T17:26:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.timezone import now
from .models import Poll, Choice
STATUS_OK = 200
STATUS_REDIRECT = 302
class MainIndexViewTests(TestCase):
def test_index(self):
response = self.client.get(reverse('main:index'))
self.assertEqual(response.status_code, STATUS_OK)
class IndexViewTests(TestCase):
def test_index(self):
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, STATUS_OK)
class DetailViewTests(TestCase):
def test_detail(self):
poll = Poll.objects.create(
pub_date=now()
)
response = self.client.get(reverse('polls:detail', args=[poll.id]))
self.assertEqual(response.status_code, STATUS_OK)
class VoteViewTests(TestCase):
def test_vote(self):
poll = Poll.objects.create(
pub_date=now()
)
Choice.objects.create(
poll=poll,
votes=0
)
post_data = {
'choice': 1
}
response = self.client.post(reverse('polls:vote', args=[poll.id]), post_data)
self.assertEqual(response.status_code, STATUS_REDIRECT)
def test_invalid_vote(self):
poll = Poll.objects.create(
pub_date=now()
)
post_data = {
'choice': 100
}
response = self.client.post(reverse('polls:vote', args=[poll.id]), post_data)
self.assertEqual(response.status_code, STATUS_OK)
class ResultsViewTests(TestCase):
def test_results(self):
poll = Poll.objects.create(
pub_date=now()
)
response = self.client.get(reverse('polls:results', args=[poll.id]))
self.assertEqual(response.status_code, STATUS_OK)
|
[
"m.pricejones@gmail.com"
] |
m.pricejones@gmail.com
|
202b670956a705c801ec7dd05df6991ae81bf928
|
11b4989e30bfbc4867a2fd3a2bcefb1eb290ec43
|
/quantrocket/cli/subcommands/blotter.py
|
0758fea01e41246bda5884adcddd14eb4401f131
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/quantrocket-client
|
138f0045eac553df3c1d998179669f96fcb66b84
|
a2eba9f5819c76a3327f85a8f3667101240eea99
|
refs/heads/master
| 2023-05-08T18:18:48.001971
| 2021-06-02T20:29:18
| 2021-06-02T20:29:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,372
|
py
|
# Copyright 2018 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from quantrocket.cli.utils.parse import dict_str
def add_subparser(subparsers):
_parser = subparsers.add_parser("blotter", description="QuantRocket blotter CLI", help="Place orders and track executions")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
examples = """
Place one or more orders.
Returns a list of order IDs, which can be used to cancel the orders or check
their status.
Examples:
Place orders from a CSV file.
quantrocket blotter order -f orders.csv
Place orders from a JSON file.
quantrocket blotter order -f orders.json
Place an order by specifying the order parameters on the command line:
quantrocket blotter order --params Sid:FIBBG123456 Action:BUY Exchange:SMART TotalQuantity:100 OrderType:MKT Tif:Day Account:DU12345 OrderRef:my-strategy
"""
parser = _subparsers.add_parser(
"order",
help="place one or more orders",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument(
"-f", "--infile",
metavar="INFILE",
dest="infilepath_or_buffer",
help="place orders from this CSV or JSON file (specify '-' to read file "
"from stdin)")
source_group.add_argument(
"-p", "--params",
nargs="*",
type=dict_str,
metavar="PARAM:VALUE",
help="order details as multiple key-value pairs (pass as 'param:value', for "
"example OrderType:MKT)")
parser.set_defaults(func="quantrocket.blotter._cli_place_orders")
examples = """
Cancel one or more orders by order ID, sid, or order ref.
Examples:
Cancel orders by order ID:
quantrocket blotter cancel -d 6002:45 6001:46
Cancel orders by sid:
quantrocket blotter cancel -i FIBBG123456
Cancel orders by order ref:
quantrocket blotter cancel --order-refs my-strategy
Cancel all open orders:
quantrocket blotter cancel --all
"""
parser = _subparsers.add_parser(
"cancel",
help="cancel one or more orders by order ID, sid, or order ref",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-d", "--order-ids",
metavar="ORDER_ID",
nargs="*",
help="cancel these order IDs")
parser.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="cancel orders for these sids")
parser.add_argument(
"-r", "--order-refs",
nargs="*",
metavar="ORDER_REF",
help="cancel orders for these order refs")
parser.add_argument(
"-a", "--accounts",
nargs="*",
metavar="ACCOUNT",
help="cancel orders for these accounts")
parser.add_argument(
"--all",
action="store_true",
default=False,
dest="cancel_all",
help="cancel all open orders")
parser.set_defaults(func="quantrocket.blotter._cli_cancel_orders")
examples = """
Download order statuses.
Examples:
Download order status by order ID and save to file:
quantrocket blotter status -d 6002:45 6001:46 -o statuses.csv
Download order status for all open orders and display in terminal:
quantrocket blotter status --open | csvlook
Download order status with extra fields and display as YAML:
quantrocket blotter status --open --fields Exchange LmtPrice --json | json2yaml
Download order status of open orders by sid:
quantrocket blotter status -i FIBBG123456 --open
Download order status of open orders by order ref:
quantrocket blotter status --order-refs my-strategy --open
"""
parser = _subparsers.add_parser(
"status",
help="download order statuses",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-d", "--order-ids",
metavar="ORDER_ID",
nargs="*",
help="limit to these order IDs")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to orders for these sids")
filters.add_argument(
"-r", "--order-refs",
nargs="*",
metavar="ORDER_REF",
help="limit to orders for these order refs")
filters.add_argument(
"-a", "--accounts",
nargs="*",
metavar="ACCOUNT",
help="limit to orders for these accounts")
filters.add_argument(
"--open",
action="store_true",
dest="open_orders",
help="limit to open orders")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="limit to orders submitted on or after this date")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to orders submitted on or before this date")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"-f", "--fields",
metavar="FIELD",
nargs="*",
help="return these fields in addition to the default fields (pass '?' or any invalid "
"fieldname to see available fields)")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
outputs.add_argument(
"-j", "--json",
action="store_const",
const="json",
dest="output",
help="format output as JSON (default is CSV)")
parser.set_defaults(func="quantrocket.blotter._cli_download_order_statuses")
examples = """
Query current positions.
There are two ways to view positions: blotter view (default) and broker view.
The default "blotter view" returns positions by account, sid, and order ref. Positions
are tracked based on execution records saved to the blotter database.
"Broker view" (using the `--broker` option) returns positions by account and sid (but
not order ref) as reported directly by the broker.
Examples:
Query current positions:
quantrocket blotter positions
Save current positions to CSV file:
quantrocket blotter positions --outfile positions.csv
Query positions for a single order ref:
quantrocket blotter positions --order-refs my-strategy
Query positions using broker view:
quantrocket blotter positions --broker
"""
parser = _subparsers.add_parser(
"positions",
help="query current positions",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"-r", "--order-refs",
nargs="*",
metavar="ORDER_REF",
help="limit to these order refs (not supported with broker view)")
filters.add_argument(
"-a", "--accounts",
nargs="*",
metavar="ACCOUNT",
help="limit to these accounts")
filters.add_argument(
"--diff",
action="store_true",
help="limit to positions where the blotter quantity and broker quantity "
"disagree (requires --broker)")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"--broker",
action="store_const",
dest="view",
const="broker",
help="return 'broker' view of positions (by account and sid) instead "
"of default 'blotter' view (by account, sid, and order ref)")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
outputs.add_argument(
"-j", "--json",
action="store_const",
const="json",
dest="output",
help="format output as JSON (default is CSV)")
parser.set_defaults(func="quantrocket.blotter._cli_download_positions")
examples = """
Generate orders to close positions.
Doesn't actually place any orders but returns an orders file that can be placed
separately. Additional order parameters can be appended with the `--params` option.
This endpoint can also be used to generate executions for marking a position as
closed due to a tender offer, merger/acquisition, etc. (See `quantrocket blotter record`
for more info.)
Examples:
Generate MKT orders to close positions for a particular strategy:
quantrocket blotter close --order-refs my-strategy --params OrderType:MKT Tif:DAY Exchange:SMART
Generate orders and also place them:
quantrocket blotter close -r my-strategy -p OrderType:MKT Tif:DAY Exchange:SMART | quantrocket blotter order -f -
After receiving 23.50 per share in a tender offer for a position, record the execution
in the blotter in order to mark the position as closed:
quantrocket blotter close --sids FIBBG123456 --params Price:23.50 | quantrocket blotter record -f -
"""
parser = _subparsers.add_parser(
"close",
help="generate orders to close positions",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"-r", "--order-refs",
nargs="*",
metavar="ORDER_REF",
help="limit to these order refs")
filters.add_argument(
"-a", "--accounts",
nargs="*",
metavar="ACCOUNT",
help="limit to these accounts")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
outputs.add_argument(
"-p", "--params",
nargs="*",
type=dict_str,
metavar="PARAM:VALUE",
help="additional parameters to append to each row in output "
"(pass as 'param:value', for example OrderType:MKT)")
outputs.add_argument(
"-j", "--json",
action="store_const",
const="json",
dest="output",
help="format output as JSON (default is CSV)")
parser.set_defaults(func="quantrocket.blotter._cli_close_positions")
examples = """
Query executions from the executions database.
Examples:
Get a CSV of all executions:
quantrocket blotter executions -o executions.csv
"""
parser = _subparsers.add_parser(
"executions",
help="query executions from the executions database",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"-r", "--order-refs",
nargs="*",
metavar="ORDER_REF",
help="limit to these order refs")
filters.add_argument(
"-a", "--accounts",
nargs="*",
metavar="ACCOUNT",
help="limit to these accounts")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="limit to executions on or after this date")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to executions on or before this date")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
parser.set_defaults(func="quantrocket.blotter._cli_download_executions")
examples = """
Record executions that happened outside of QuantRocket's knowledge.
This endpoint does not interact with the broker but simply adds one or more
executions to the blotter database and updates the blotter's record of current
positions accordingly. It can be used to bring the blotter in line with the broker
when they differ. For example, when a position is liquidated because of a tender
offer or merger/acquisition, you can use this endpoint to record the price
received for your shares.
Returns a list of execution IDs inserted into the database.
Examples:
After receiving 23.50 per share in a tender offer for a position, record the execution
in the blotter in order to mark the position as closed:
quantrocket blotter close --sids FIBBG123456 --params Price:23.50 | quantrocket blotter record -f -
Record executions from a CSV file:
quantrocket blotter record -f executions.csv
Record an execution by specifying the parameters on the command line:
quantrocket blotter record --params Sid:FIBBG123456 Action:BUY TotalQuantity:100 Account:DU12345 OrderRef:my-strategy Price:23.50
The required params are:
- Account
- Action ("BUY" or "SELL")
- OrderRef
- Price
- Sid
- TotalQuantity
Optional params (rarely needed):
- Commission (default is 0)
- OrderId (default is an auto-generated ID)
- Time (the time of execution, default is now)
"""
parser = _subparsers.add_parser(
"record",
help="record executions that happened outside of QuantRocket's knowledge",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument(
"-f", "--infile",
metavar="INFILE",
dest="infilepath_or_buffer",
help="record executions from this CSV or JSON file (specify '-' to read file "
"from stdin)")
source_group.add_argument(
"-p", "--params",
nargs="*",
type=dict_str,
metavar="PARAM:VALUE",
help="execution details as multiple key-value pairs (pass as 'param:value', for "
"example Price:23.50)")
parser.set_defaults(func="quantrocket.blotter._cli_record_executions")
examples = """
Query trading performance and return a PDF tearsheet or CSV of results.
Trading performance is broken down by account and order ref and optionally by
sid.
Examples:
Get a Moonchart PDF of all trading performance PNL:
quantrocket blotter pnl -o pnl.pdf --pdf
Get a PDF for a single account and order ref, broken down by sid:
quantrocket blotter pnl --accounts U12345 --order-refs mystrategy1 --details --pdf -o pnl_details.pdf
Get a CSV of performance results for a particular date range:
quantrocket blotter pnl -s 2018-03-01 -e 2018-06-30 -o pnl_2018Q2.csv
"""
parser = _subparsers.add_parser(
"pnl",
help="query trading performance and return a PDF tearsheet or CSV of results",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"-r", "--order-refs",
nargs="*",
metavar="ORDER_REF",
help="limit to these order refs")
filters.add_argument(
"-a", "--accounts",
nargs="*",
metavar="ACCOUNT",
help="limit to these accounts")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="limit to pnl on or after this date")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to pnl on or before this date")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"-d", "--details",
action="store_true",
help="return detailed results for all securities instead of aggregating to "
"account/order ref level (only supported for a single account and order ref "
"at a time)")
outputs.add_argument(
"-t", "--timezone",
help="return execution times in this timezone (default UTC)")
outputs.add_argument(
"--pdf",
action="store_const",
const="pdf",
dest="output",
help="return a PDF tear sheet of PNL (default is to return a CSV)")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
parser.set_defaults(func="quantrocket.blotter._cli_download_pnl")
|
[
"brian@quantrocket.com"
] |
brian@quantrocket.com
|
b2de7e068783a9d963576481237545608664344f
|
3ede6ba53725febc6b30de297297656e4f41a259
|
/Neural Networks/nueral_nets/neuralnet.py
|
c86c19452301ff6e79cc94ceac911ede6db8073a
|
[] |
no_license
|
PranathiPeri/Machine-Learning
|
8265201cd83f6511a6c97120dba81a18e315e28c
|
c8030f50747e2cff2dc2bead18a1deebd4cdf547
|
refs/heads/master
| 2020-03-13T16:24:54.842678
| 2018-04-26T19:05:15
| 2018-04-26T19:05:15
| 131,197,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,960
|
py
|
import csv
import sys
import os
import math
import random
import re
import numpy as np
# Reading Input File and different parameters
neurons=list()
input_file=sys.argv[1]
error_tol = float(sys.argv[3])
train_per = float(sys.argv[2])
no_of_layers = int(sys.argv[4])
for k in range(no_of_layers):
neurons = neurons + [int(sys.argv[5+k])]
infile = input_file
fh = csv.reader(open(infile))
data = list()
train_data = list()
test_data = list()
eta=0.1
error=1.0
counter=0
#Sigmoid Function
def sigmoid(x):
try:
sig = 1.0/(1+math.exp(-x))
return sig
except OverflowError:
if(x>0):
return 1
else:
return 0
for row in fh:
if("\n" not in row):
data.append(row)
#selecting training_set and test set
train_per = train_per/100.0
train = int(train_per*len(data)+0.5)
random.shuffle(data)
train_data = data[:train]
test_data = data[train:]
input_train=list()
target_train=list()
input_test=list()
target_test=list()
for row in train_data:
input_train.append(row[:-1])
target_train.append(row[len(data[0])-1])
for row in test_data:
input_test.append(row[:-1])
target_test.append(row[len(data[0])-1])
weights=list()
input=len(input_train[0])+1
#generating random weights for hidden layer
for i in range(no_of_layers):
output=neurons[i];
temp=list()
for j in range(input*output):
temp.append(random.uniform(0,1))
weights.append(temp)
input=output+1
temp=list()
output=1
#generating random weights for output layer
for j in range(input*output):
temp.append(random.uniform(0,1))
weights.append(temp)
#Back propagation Algorithm for training_set
while((error>error_tol)&(counter<2000)):
counter+=1
error=0.0
#Forward propagation
for z in range(len(input_train)):
temp_weights=list()
temp_train = list()
output_result=list()
delta = list()
a=1
temp_train.append([a]+input_train[z])
output_result.append(temp_train)
temp_train=np.array(temp_train)
for k in range(no_of_layers):
for i in range(neurons[k]):
temp_weights.append(weights[k][(i*(len(weights[k])/neurons[k])):(i+1)*(len(weights[k])/neurons[k])])
temp_weights=np.array(temp_weights)
temp_train=temp_train.astype(np.float)
temp_train=np.transpose(temp_train)
temp_result=np.matmul(temp_weights,temp_train)
temp_result=np.transpose(temp_result)
temp_result=temp_result.tolist()
temp_result=temp_result[0]
net_result=list()
for i in range(len(temp_result)):
b=(sigmoid(temp_result[i]))
net_result=net_result+[b]
temp_weights=list()
temp_train = list()
temp_train.append([a]+net_result)
output_result.append(temp_train)
temp_train=np.array(temp_train)
temp_weights.append(weights[k+1])
temp_weights=np.array(temp_weights)
temp_train=temp_train.astype(np.float)
temp_train=np.transpose(temp_train)
temp_result=np.matmul(temp_weights,temp_train)
temp_result=np.transpose(temp_result)
temp_result=temp_result.tolist()
temp_result=temp_result[0]
net_result=list()
for i in range(len(temp_result)):
b=(sigmoid(temp_result[i]))
net_result=net_result+[b]
temp_delta=(net_result[0])*(1-net_result[0])*(float(target_train[z])-net_result[0])
#training set error calculation
error+=((float(target_train[z])-net_result[0])*(float(target_train[z])-net_result[0])*0.5/len(input_train))
delta.append(temp_delta)
#backward propagation
for j in range(no_of_layers):
temp1_delta=list()
new_weights=list()
for d in range(neurons[len(neurons)-j-1]+1):
h=weights[len(weights)-j-1]
temp_h = list()
temp_delta=list()
for i in range(len(h)/len(delta)):
temp_h.append(h[(i*(len(delta))):(i+1)*(len(delta))])
temp_h=np.array(temp_h)
temp_h=np.transpose(temp_h)
temp_delta.append(delta)
temp_delta=np.array(temp_delta)
h1=np.matmul(temp_delta,temp_h)
h1=h1.tolist()
h1=h1[0][d]
temp_delta1=(output_result[len(output_result)-j-1][0][d])*(1-(output_result[len(output_result)-j-1][0][d]))*(float(h1))
temp1_delta.append(temp_delta1)
for d2 in range(len(delta)):
for d1 in range(neurons[len(neurons)-j-1]+1):
temp_new_weight=eta*delta[d2]*(output_result[len(output_result)-j-1][0][d1])
new_weights.append(temp_new_weight)
for i in range(len(weights[len(weights)-j-1])):
weights[len(weights)-j-1][i]=weights[len(weights)-j-1][i]+new_weights[i]
delta=temp1_delta[1:]
for d2 in range(len(delta)):
for d1 in range(len(input_train[0])):
temp_new_weight=eta*delta[d2]*(float(input_train[z][d1]))
new_weights.append(temp_new_weight)
for i in range(len(weights[0])):
weights[0][i]=weights[0][i]+new_weights[i]
error_test=0.0
#back propagation algorithm for test_set
for z in range(len(input_test)):
temp_weights=list()
temp_train = list()
output_result=list()
delta = list()
a=1
temp_train.append([a]+input_test[z])
output_result.append(temp_train)
temp_train=np.array(temp_train)
for k in range(no_of_layers):
for i in range(neurons[k]):
temp_weights.append(weights[k][(i*(len(weights[k])/neurons[k])):(i+1)*(len(weights[k])/neurons[k])])
temp_weights=np.array(temp_weights)
temp_train=temp_train.astype(np.float)
temp_train=np.transpose(temp_train)
temp_result=np.matmul(temp_weights,temp_train)
temp_result=np.transpose(temp_result)
temp_result=temp_result.tolist()
temp_result=temp_result[0]
net_result=list()
for i in range(len(temp_result)):
b=(sigmoid(temp_result[i]))
net_result=net_result+[b]
temp_weights=list()
temp_train = list()
temp_train.append([a]+net_result)
output_result.append(temp_train)
temp_train=np.array(temp_train)
temp_weights.append(weights[k+1])
temp_weights=np.array(temp_weights)
temp_train=temp_train.astype(np.float)
temp_train=np.transpose(temp_train)
temp_result=np.matmul(temp_weights,temp_train)
temp_result=np.transpose(temp_result)
temp_result=temp_result.tolist()
temp_result=temp_result[0]
net_result=list()
for i in range(len(temp_result)):
b=(sigmoid(temp_result[i]))
net_result=net_result+[b]
temp_delta=(net_result[0])*(1-net_result[0])*(float(target_train[z])-net_result[0])
#test set error calculation
error_test+=((float(target_test[z])-net_result[0])*(float(target_test[z])-net_result[0])*0.5/len(input_test))
delta.append(temp_delta)
for i in range(no_of_layers):
print("")
print("Hidden Layer"+str(i+1)+":")
print('\t'),
for j in range(neurons[i]):
print ("Neuron"+str(j+1) + ":"),
print weights[i][j*len(weights[i])/neurons[i]:(j+1)*len(weights[i])/neurons[i]]
print("\t"),
i+=1
print("")
print("Output Layer"+":")
print('\t'),
print ("Neuron1"+ ":"),
print weights[i]
print ("train error:"),
print (error)
print ("test error:"),
print (error_test)
|
[
"noreply@github.com"
] |
PranathiPeri.noreply@github.com
|
b0f2deb61589ddb276a4b53f9daa48f992546e8b
|
153b794b5c142065be19ada86177258d80b8d991
|
/scripts/isb_curl.py
|
b95a716334ec1d17c940062b2e3becb09428b7f6
|
[
"Apache-2.0"
] |
permissive
|
isb-cgc/ISB-CGC-API
|
248baa26894f09ea68cb6b16d95f9ae63d674789
|
df3c7cbf3ef3a1cfc2d4dad6f90a75efb7f52481
|
refs/heads/master
| 2023-07-10T08:16:56.465584
| 2023-06-23T17:17:32
| 2023-06-23T17:17:32
| 45,219,634
| 2
| 1
|
Apache-2.0
| 2023-08-29T19:11:02
| 2015-10-30T00:41:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,449
|
py
|
#! /usr/bin/python2.7
'''
Copyright 2015, Institute for Systems Biology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
isb_curl can be called by commandline or used as a library
URL = https://isb-cgc.appspot.com/_ah/api/{API-NAME}/{VERSION}/{ENDPOINT}?{QUERYSTRING-PARAMS}
e.g. for the "cohorts_list" endpoint:
https://isb-cgc.appspot.com/_ah/api/cohort_api/v1/cohorts_list
A. Command Line:
python isb_auth.py # saves the user's credentials to their root directory
python isb_curl.py URL
note: if the endpoint takes a resource in the request body, such as the save_cohort endpoint, use the following:
python isb_curl.py https://isb-cgc.appspot.com/_ah/api/cohort_api/v1/save_cohort?name={YOUR-COHORT-NAME} \
-d '{"Study": "BRCA"}' -H "Content-Type: application/json"
B. Python:
import isb_auth
import isb_curl
import requests
url = 'https://isb-cgc.appspot.com/_ah/api/cohort_api/v1/cohorts_list'
token = isb_curl.get_access_token()
head = {'Authorization': 'Bearer ' + token}
# for GET requests
resp = requests.get(url, headers=head)
# querystring parameters can be added to either the url itself...
url += '?cohort_id=1'
resp = requests.get(url, headers=head)
# ... or passed in with the params kwarg
url = 'https://isb-cgc.appspot.com/_ah/api/cohort_api/v1/cohorts_list'
params = {'cohort_id': 1}
resp = requests.get(url, headers=head, params=params)
# if the endpoint takes a resource in the request body, such as the save_cohort endpoint...
url = https://isb-cgc.appspot.com/_ah/api/cohort_api/v1/save_cohort?name=my-new-cohort'
head.update({'Content-Type': 'application/json'})
payload = {"SampleBarcode": "TCGA-02-0001-01C,TCGA-02-0001-10A,TCGA-01-0642-11A"}
resp = requests.post(url, headers=head, json=payload)
# if requests version < 2.4.2
import json
resp = requests.post(url, headers=head, data=json.dumps(payload))
'''
import httplib2
import os
import sys
from oauth2client.file import Storage
import json
CREDENTIALS_LOC_ENV = 'ISB_CREDENTIALS'
DEFAULT_CREDENTIALS_LOC = os.path.join(os.path.expanduser("~"), '.isb_credentials')
def check(assertion, msg):
if not assertion:
error(msg)
def error(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def get_credentials_location():
credentials_location = os.environ.get(CREDENTIALS_LOC_ENV, DEFAULT_CREDENTIALS_LOC)
check(credentials_location, 'Couldn\'t locate the credentials file at {} - run isb_auth.py or check the DEFAULT_CREDENTIALS_LOC at the top of this script.'.format(credentials_location))
return credentials_location
def load_credentials(credentials_location):
storage = Storage(credentials_location)
credentials = storage.get()
check(credentials and not credentials.invalid,
'Couldn\'t locate the credentials file at {} - run isb_auth.py or check the DEFAULT_CREDENTIALS_LOC at the top of this script.'.format(credentials_location))
return credentials
# Although we can use load_credentials to check the expiration (and re-up if needed), we need the
# encrypted ID token, NOT the access_token, to do a request via the ESP. To this end we load the
# file as JSON and pull the provided encrypted token there (it can also be reconstituted).
def get_id_token(credentials_location=get_credentials_location()):
credentials = load_credentials(credentials_location)
if credentials.access_token_expired:
credentials.refresh(httplib2.Http())
creds_json = open(credentials_location, "r")
token = json.loads(creds_json.read())
return token['token_response']['id_token']
def main():
args = sys.argv[1:]
check(args, 'usage: isb_curl.py <curl arguments>')
id_token = get_id_token()
curl_args = ['curl', '-H', 'Authorization: Bearer ' + id_token] + args
os.execvp('curl', curl_args)
# this allows us to call this from command line
if __name__ == '__main__':
main()
|
[
"spaquett@systemsbiology.org"
] |
spaquett@systemsbiology.org
|
97f73f1b895eab9fa6064e7d70d372d8016c0db5
|
0ccc31838ee8a357264fea9037f9d70bbf523b99
|
/printTable.py
|
3cb9f518c77550567c745ee635dd4c15fb0ff3c9
|
[] |
no_license
|
AnjaliG1999/Python-Mini-Projects
|
2062734969ede62d3561d337dd9de540880b3ab3
|
13577b511cefaa0c29a189ff89e087e357c340d2
|
refs/heads/master
| 2022-11-07T07:46:29.998363
| 2020-06-28T12:12:09
| 2020-06-28T12:12:09
| 273,784,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
#! python3
#take a list of lists of strings and display it in a well-organized table with each column right-justified
tableData = [['apples', 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
colWidths = list()
for lst in tableData:
lrg = 0
for l in lst:
if len(l) > lrg:
lrg = len(l)
colWidths.append(lrg)
print(colWidths)
for tbl in range(len(tableData[0])): #4 times
for i in range(len(tableData)): #3 times
print(tableData[i][tbl].rjust(colWidths[i]), end = " ")
print()
|
[
"noreply@github.com"
] |
AnjaliG1999.noreply@github.com
|
4ba11d700b9938fd578d7d235221b919038e955c
|
4db34f4ac120894642ff9404a2d06af2e3d5a3f3
|
/backends/src/apollo/hiddenOfSafeInfo/sharingInfo.py
|
2fed72e7e736d977760eada4d8c5c474ce62aa4f
|
[] |
no_license
|
zhanrui/apollo
|
a233449f27d7469e9d33d76bd4709bd05815ddc1
|
a43c007c9db340797b56649bc2b396d21ca50023
|
refs/heads/master
| 2020-05-19T18:55:37.053102
| 2015-05-24T22:45:54
| 2015-05-24T22:45:54
| 29,225,236
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
#! /usr/bin/python
#-*-coding:utf-8-*-
import sys
import os
sys.path.append(os.path.dirname(os.getcwd()))
from common.utils.log import log4py
from apollo.commHandler import CommHandler
class SharingInfo(CommHandler):
def __init__(self):
CommHandler.__init__(self)
pass
def getSharingInfo(self):
return ""
if __name__ == "__main__":
objectTemp=SharingInfo()
try:
raise Exception
dataReportMsg=objectTemp.orgDataReportMsg(objectTemp.getSharingInfo())
objectTemp.sendMsgToUI(dataReportMsg)
progReportMsg=objectTemp.orgProgReportMsg("100", "共享信息检查完毕.")
objectTemp.sendMsgToUI(progReportMsg)
except Exception,e:
print e
log4py.error("共享信息检查出错." )
errReportMsg=objectTemp.orgErrReportMsg("共享信息检查出错.")
objectTemp.sendMsgToUI(errReportMsg)
|
[
"yuxiangyang@rongshangsoft.com"
] |
yuxiangyang@rongshangsoft.com
|
2c93a899ee9b847648dc183b13db47e7f6d55298
|
135c04de6b62c94e0f444689cf6cce9bd7f15754
|
/demo/migrations/0003_auto_20201128_1756.py
|
6e90bd83ae026ac8b248363efd27f78f3e5b8250
|
[] |
no_license
|
nikolata/React-and-Django-Full-Stack
|
00781bee75fbf71ae5f75084a2cdf5e17d301112
|
ebd815b191d61330e255f0abe721483a7359b116
|
refs/heads/main
| 2023-01-27T23:35:59.894326
| 2020-12-01T23:37:28
| 2020-12-01T23:37:28
| 316,780,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
# Generated by Django 3.1.3 on 2020-11-28 17:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0002_auto_20201128_1745'),
]
operations = [
migrations.AddField(
model_name='book',
name='cover',
field=models.ImageField(blank=True, upload_to='covers/'),
),
migrations.AddField(
model_name='book',
name='description',
field=models.TextField(blank=True, max_length=256),
),
migrations.AddField(
model_name='book',
name='is_published',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='book',
name='price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
),
migrations.AddField(
model_name='book',
name='published',
field=models.DateField(blank=True, null=True),
),
]
|
[
"npmetodiev@gmail.com"
] |
npmetodiev@gmail.com
|
d27210c3e09681fd8068bbef5a96b896d1101486
|
4bfc3c184e736bb68dccbb6d5657f11c950df002
|
/tests/common/test_op/prelu_grad.py
|
252d3596ca9e9a3489c806cc07e116a7f75ad335
|
[
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
laekov/akg
|
159aa64ef6135222b5af784c408731275dfa9bdb
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
refs/heads/master
| 2022-12-01T04:09:03.548063
| 2020-08-19T08:38:57
| 2020-08-19T08:41:28
| 288,678,192
| 0
| 0
|
Apache-2.0
| 2020-08-19T08:41:30
| 2020-08-19T08:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 10,574
|
py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function:prelu"""
import akg.tvm
import akg.topi
from akg.utils import validation_check as vc_util
from akg.utils import custom_tiling as ct_util
from akg.utils import kernel_exec as utils
add_set_dim_map = {
#
str(((1, 64, 112, 112), (64,), "float16")): ((1, 1), (64, 1), (112, 1), (112, 1)),
str(((1, 64, 56, 56), (64,), "float16")): ((1, 1), (64, 1), (56, 1), (56, 1)),
str(((1, 128, 56, 56), (128,), "float16")): ((1, 1), (128, 1), (56, 1), (56, 1)),
str(((1, 128, 28, 28), (128,), "float16")): ((1, 1), (128, 1), (28, 1), (28, 1)),
str(((1, 256, 28, 28), (256,), "float16")): ((1, 1), (256, 1), (28, 1), (28, 1)),
str(((1, 256, 14, 14), (256,), "float16")): ((1, 1), (256, 1), (14, 1), (14, 1)),
str(((1, 512, 14, 14), (512,), "float16")): ((1, 1), (512, 1), (14, 1), (14, 1)),
str(((1, 512, 7, 7), (512,), "float16")): ((1, 1), (512, 1), (7, 1), (7, 1)),
#
str(((1, 64, 112, 112), (1,), "float16")): ((1, 1), (64, 1), (112, 1), (112, 1)),
str(((1, 64, 56, 56), (1,), "float16")): ((1, 1), (64, 1), (56, 1), (56, 1)),
str(((1, 128, 56, 56), (1,), "float16")): ((1, 1), (128, 1), (56, 1), (56, 1)),
str(((1, 128, 28, 28), (1,), "float16")): ((1, 1), (128, 1), (28, 1), (28, 1)),
str(((1, 256, 28, 28), (1,), "float16")): ((1, 1), (256, 1), (28, 1), (28, 1)),
str(((1, 256, 14, 14), (1,), "float16")): ((1, 1), (256, 1), (14, 1), (14, 1)),
str(((1, 512, 14, 14), (1,), "float16")): ((1, 1), (512, 1), (14, 1), (14, 1)),
str(((1, 512, 7, 7), (1,), "float16")): ((1, 1), (512, 1), (7, 1), (7, 1)),
#
str(((1, 64, 112, 112), (64,), "float32")): ((1, 1), (64, 1), (112, 1), (112, 1)),
str(((1, 64, 56, 56), (64,), "float32")): ((1, 1), (64, 1), (56, 1), (56, 1)),
str(((1, 128, 56, 56), (128,), "float32")): ((1, 1), (128, 1), (56, 1), (56, 1)),
str(((1, 128, 28, 28), (128,), "float32")): ((1, 1), (128, 1), (28, 1), (28, 1)),
str(((1, 256, 28, 28), (256,), "float32")): ((1, 1), (256, 1), (28, 1), (28, 1)),
str(((1, 256, 14, 14), (256,), "float32")): ((1, 1), (256, 1), (14, 1), (14, 1)),
str(((1, 512, 14, 14), (512,), "float32")): ((1, 1), (512, 1), (14, 1), (14, 1)),
str(((1, 512, 7, 7), (512,), "float32")): ((1, 1), (512, 1), (7, 1), (7, 1)),
#
str(((1, 64, 112, 112), (1,), "float32")): ((1, 1), (64, 1), (112, 1), (112, 1)),
str(((1, 64, 56, 56), (1,), "float32")): ((1, 1), (64, 1), (56, 1), (56, 1)),
str(((1, 128, 56, 56), (1,), "float32")): ((1, 1), (128, 1), (56, 1), (56, 1)),
str(((1, 128, 28, 28), (1,), "float32")): ((1, 1), (128, 1), (28, 1), (28, 1)),
str(((1, 256, 28, 28), (1,), "float32")): ((1, 1), (256, 1), (28, 1), (28, 1)),
str(((1, 256, 14, 14), (1,), "float32")): ((1, 1), (256, 1), (14, 1), (14, 1)),
str(((1, 512, 14, 14), (1,), "float32")): ((1, 1), (512, 1), (14, 1), (14, 1)),
str(((1, 512, 7, 7), (1,), "float32")): ((1, 1), (512, 1), (7, 1), (7, 1)),
#
str(((128, 64, 112, 112), (64,), "float16")): ((1, 1), (1, 1), (16, 1), (112, 1)),
str(((128, 64, 56, 56), (64,), "float16")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 56, 56), (128,), "float16")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 28, 28), (128,), "float16")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 28, 28), (256,), "float16")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 14, 14), (256,), "float16")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 14, 14), (512,), "float16")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 7, 7), (512,), "float16")): ((1, 1), (1, 1), (7, 1), (7, 1)),
#
str(((128, 64, 112, 112), (1,), "float16")): ((1, 1), (1, 1), (112, 1), (112, 1)),
str(((128, 64, 56, 56), (1,), "float16")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 56, 56), (1,), "float16")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 28, 28), (1,), "float16")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 28, 28), (1,), "float16")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 14, 14), (1,), "float16")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 14, 14), (1,), "float16")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 7, 7), (1,), "float16")): ((1, 1), (1, 1), (7, 1), (7, 1)),
#
str(((128, 64, 112, 112), (64,), "float32")): ((1, 1), (1, 1), (112, 1), (112, 1)),
str(((128, 64, 56, 56), (64,), "float32")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 56, 56), (128,), "float32")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 28, 28), (128,), "float32")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 28, 28), (256,), "float32")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 14, 14), (256,), "float32")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 14, 14), (512,), "float32")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 7, 7), (512,), "float32")): ((1, 1), (1, 1), (7, 1), (7, 1)),
#
str(((128, 64, 112, 112), (1,), "float32")): ((1, 1), (1, 1), (112, 1), (112, 1)),
str(((128, 64, 56, 56), (1,), "float32")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 56, 56), (1,), "float32")): ((1, 1), (1, 1), (56, 1), (56, 1)),
str(((128, 128, 28, 28), (1,), "float32")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 28, 28), (1,), "float32")): ((1, 1), (1, 1), (28, 1), (28, 1)),
str(((128, 256, 14, 14), (1,), "float32")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 14, 14), (1,), "float32")): ((1, 1), (1, 1), (14, 1), (14, 1)),
str(((128, 512, 7, 7), (1,), "float32")): ((1, 1), (1, 1), (7, 1), (7, 1)),
}
def add_set_dim_func(dy, A, w):
shape1 = [x.value for x in dy.shape]
shape2 = [x.value for x in w.shape]
hash_key = gen_set_dim_key(dy, shape1, shape2)
return [ct_util.set_dims_by_key(hash_key, add_set_dim_map), hash_key]
def gen_set_dim_key(dy, shape1, shape2):
key = str((tuple(shape1), tuple(shape2), dy.dtype))
return key
@ct_util.reg_set_dim_func(add_set_dim_func)
def prelu_grad(dy, A, w):
"""
brief Computes backgrad prelu value of a tensor.
\f[
dw = sum(dy * \\partial(prelu(A)) / \\partial w)
dA = A > 0 ? dy : dy * w
\f]
param inputs akg.tvm.Tensor of type float16, float32
return akg.tvm.Tensor of same type and shape as inputs
"""
shape = [x.value for x in dy.shape]
dtype = dy.dtype
shape1 = [x.value for x in A.shape]
dtype1 = A.dtype
shape2 = [x.value for x in w.shape]
dtype2 = w.dtype
assert len(shape) == 4, "only support 4-dim pooling" # NCHW
assert len(shape1) == 4, "only support 4-dim pooling" # NCHW
assert len(shape2) == 1, "only support 1-dim a"
assert (shape2[0] == shape1[1] or shape2[0] == 1), "there is only two values are legitimate: 1, or the number of channels at input. Default: 1"
assert (shape[0] == shape1[0] and shape[1] == shape1[1] and shape[2] == shape1[2] and shape[3] == shape1[3]), "dim number must be equal"
check_list = ["float16", "float32"]
if not (dtype1.lower() in check_list and dtype2.lower() in check_list and dtype.lower() in check_list):
raise RuntimeError("tile_cce only support %s while dtype is %s and %s and %s" % (",".join(check_list), dtype, dtype1, dtype2))
vc_util.check_shape(shape)
vc_util.check_shape(shape1)
vc_util.check_shape(shape2)
def grad_dsl():
w_reshape = akg.topi.reshape(w, (1, shape2[0], 1, 1))
w_broadcast = akg.topi.broadcast_to(w_reshape, shape1)
dA = akg.tvm.compute(shape,
lambda *i: akg.tvm.if_then_else(
A(*i) >= akg.tvm.const(0, dtype),
dy(*i), dy(*i) * w_broadcast(*i)
))
# dy * \partial(prelu(A)) / \partial w
dw_intermediate = akg.tvm.compute(shape,
lambda *i: akg.tvm.if_then_else(
A(*i) >= akg.tvm.const(0, dtype),
akg.tvm.const(0, dtype), dy(*i) * A(*i)
))
# hybrid accuracy: sum use float32, other use fp16
# if dtype.lower() is not "float32":
# dw_intermediate = akg.topi.cast(dw_intermediate, "float32")
if shape2[0] == 1:
# all channel share one w
#dw = akg.topi.sum(dw_intermediate)
dw = akg.topi.sum(dw_intermediate, axis=3)
dw = akg.topi.sum(dw, axis=2)
dw = akg.topi.sum(dw, axis=1)
dw = akg.topi.sum(dw, axis=0)
# dw = akg.topi.sum(dw_intermediate, axis=1)
# dw = akg.topi.sum(dw, axis=2)
# dw = akg.topi.sum(dw, axis=1)
# dw = akg.topi.sum(dw, axis=0)
#dw = akg.tvm.compute(shape, lambda *indice: akg.tvm.sum(dw_intermediate(*indice), axis=[0,1,2,3]), name="dw")
#dw = akg.lang.cce.sum(dw_intermediate, axis=3, keepdims=False)
#dw = akg.lang.cce.sum(dw_intermediate, axis=2, keepdims=False)
#dw = akg.lang.cce.sum(dw_intermediate, axis=1, keepdims=False)
#dw = akg.lang.cce.sum(dw_intermediate, axis=0, keepdims=False)
else:
# all channel use separate w
# dw = akg.topi.sum(dw_intermediate, axis=[0,2,3]) # Accuracy is not up to standard
dw = akg.topi.sum(dw_intermediate, axis=3)
dw = akg.topi.sum(dw, axis=2)
dw = akg.topi.sum(dw, axis=0)
# dw = akg.topi.sum(dw_intermediate, axis=1)
# dw = akg.topi.sum(dw, axis=1)
# dw = akg.topi.sum(dw, axis=0)
# hybrid accuracy: sum use float32, other use fp16
# if dtype.lower() is not "float32":
# dw = akg.topi.cast(dw, "float16")
return dA, dw
attrs = {"pragma_checkcoincident": 0, "pragma_modshift": 1}
return grad_dsl(), attrs
|
[
"ckey.chengbin@huawei.com"
] |
ckey.chengbin@huawei.com
|
ee3adc81c5be7820b0c771c7d02a4e8adaeef85c
|
ac16a937f32602cf16114463f8e875a972f64c27
|
/docs/dolfin/1.4.0/python/source/demo/documented/neumann-poisson/python/demo_neumann-poisson.py
|
ac37eacd166e14d35fa167b4c5851f1a81f8b70c
|
[] |
no_license
|
mparno/fenics-web
|
2073248da6f9918ffedbe9be8a3433bc1cbb7ffb
|
7202752da876b1f9ab02c1d5a5f28ff5da526528
|
refs/heads/master
| 2021-05-05T04:45:46.436236
| 2016-12-06T20:25:44
| 2016-12-06T20:25:44
| 118,628,385
| 2
| 0
| null | 2018-01-23T15:21:47
| 2018-01-23T15:21:46
| null |
UTF-8
|
Python
| false
| false
| 1,948
|
py
|
"""
This demo program illustrates how to solve Poisson's equation
- div grad u(x, y) = f(x, y)
on the unit square with pure Neumann boundary conditions:
du/dn(x, y) = -sin(5*x)
and source f given by
f(x, y) = 10*exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02)
Since only Neumann conditions are applied, u is only determined up to
a constant c by the above equations. An addition constraint is thus
required, for instance
\int u = 0
This can be accomplished by introducing the constant c as an
additional unknown (to be sought in the space of real numbers)
and the above constraint.
"""
# Copyright (C) 2010 Marie E. Rognes
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg 2011
#
# First added: 2010-05-10
# Last changed: 2012-11-12
# Begin demo
from dolfin import *
# Create mesh and define function space
mesh = UnitSquareMesh(64, 64)
V = FunctionSpace(mesh, "CG", 1)
R = FunctionSpace(mesh, "R", 0)
W = V * R
# Define variational problem
(u, c) = TrialFunction(W)
(v, d) = TestFunctions(W)
f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)")
g = Expression("-sin(5*x[0])")
a = (inner(grad(u), grad(v)) + c*v + u*d)*dx
L = f*v*dx + g*v*ds
# Compute solution
w = Function(W)
solve(a == L, w)
(u, c) = w.split()
# Plot solution
plot(u, interactive=True)
|
[
"johannr@simula.no"
] |
johannr@simula.no
|
b474c5650b5e463828451cdff9bd01b8d5ecb122
|
32c915adc51bdb5d2deab2a592d9f3ca7b7dc375
|
/Chapter_6_programming_tasks/printscreen_students.py
|
bd8ab316a53d216d9d384dd18f15599fc81cefa2
|
[] |
no_license
|
nervig/Starting_Out_With_Python
|
603c2b8c9686edcf92c1a90596d552b873fe6229
|
d617ee479c7c77038331b5f262e00f59e8e90070
|
refs/heads/master
| 2023-02-25T07:14:12.685417
| 2021-02-02T18:45:00
| 2021-02-02T18:45:00
| 335,391,362
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
def main():
read_students = open('students.txt', 'r')
name_student = read_students.readline()
while name_student != '':
score_student = int(read_students.readline())
name_student = name_student.rstrip('\n')
# show the record
print("Name of students: ", name_student)
print("Numbers of score of students: ", score_student)
name_student = read_students.readline()
# close the file
read_students.close()
# call main function
main()
|
[
"solide@yandex.ru"
] |
solide@yandex.ru
|
a89627ea6bf41889f9a0abb3633d45f5089c6a2a
|
a46e07b2914dbbf9f350a9280a8436305584a16b
|
/app_comments.py
|
a8de2f83633ed74f561660f8492652491641554f
|
[] |
no_license
|
kllelndhlm/kllelndhlm-lists
|
04bcf7fa5ffc739ec075e37a83f641df1afb9ff6
|
cfe89c5f342df655a765d228f425047dd3d2f5f1
|
refs/heads/main
| 2023-04-25T03:39:53.487096
| 2021-05-09T19:53:38
| 2021-05-09T19:53:38
| 349,972,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from flask import session, render_template, redirect, request
from db import db
from werkzeug.security import check_password_hash, generate_password_hash
import secrets
from datetime import datetime, timezone
def send_comment(username, list_name, content, visible):
sent_at = datetime.now(timezone.utc)
sql = "INSERT INTO message (username, list_name, content, visible, sent_at) VALUES (:username, :list_name, :content, :visible, :sent_at)"
db.session.execute(sql, {"username":username, "list_name":list_name, "content":content, "visible":visible, "sent_at":sent_at})
db.session.commit()
return True
def hide_comment(comment_id, visible):
sql = "UPDATE message SET visible=0 WHERE id=:comment_id;"
db.session.execute(sql, {"comment_id":comment_id})
db.session.commit()
return True
|
[
"kalle.lindholm@helsinki.fi"
] |
kalle.lindholm@helsinki.fi
|
b138033de087935a641b4d090a3a255f40998e4f
|
5413990914fbfd5eb928a5aae57c5554ccd07264
|
/HRD/HRD/settings.py
|
0e1bdc528e227dea1a993cc95648e13f2f9e88f0
|
[] |
no_license
|
jhoveID/DsignV2
|
16095165199cdc709568ddf60808c6f2055fc1a9
|
495d17ff17726c1e2d602c41ba26d7fb359272f6
|
refs/heads/master
| 2023-04-15T09:04:14.497653
| 2021-04-23T00:42:41
| 2021-04-23T00:42:41
| 360,357,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,285
|
py
|
"""
Django settings for HRD project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-2kxm=q0ic@#1+-06abwofecrgxkh_3t@#!as=r1xbd)8l@8)qs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'mypage',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'HRD.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR /"templates"
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'HRD.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"jhoveHQ@gmail.com"
] |
jhoveHQ@gmail.com
|
f489ad3f083ebeb1bc6342c14459de37ca0830a5
|
04f6dac45bfba48d14f00f7b9d05e1071bc2f064
|
/baselines/deepq/simple_conservative.py
|
968a0bd7db48f601dae88147531e514889fdf0b1
|
[
"MIT"
] |
permissive
|
masonwright14/baselines
|
d34eea5ff6b17d57e2e18ba5a5f687c9e4bbfb32
|
e7f08bdf154cc22bcba4ff76d53ca08f544cc15f
|
refs/heads/master
| 2020-03-13T16:39:26.580214
| 2018-08-08T20:10:49
| 2018-08-08T20:10:49
| 131,203,102
| 0
| 0
| null | 2018-04-26T19:45:34
| 2018-04-26T19:45:34
| null |
UTF-8
|
Python
| false
| false
| 61,739
|
py
|
import os
import tempfile
import zipfile
import tensorflow as tf
import cloudpickle
import numpy as np
import baselines.common.tf_util as U
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
@staticmethod
def load_for_multiple_nets_with_scope(path, scope):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act_params["scope"] = scope
cur_graph = tf.Graph()
with cur_graph.as_default():
act = deepq.build_act(**act_params)
sess = tf.Session(graph=cur_graph)
with sess.as_default():
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params), cur_graph, sess
@staticmethod
def load_for_multiple_nets(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
cur_graph = tf.Graph()
with cur_graph.as_default():
act = deepq.build_act(**act_params)
sess = tf.Session(graph=cur_graph)
with sess.as_default():
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params), cur_graph, sess
@staticmethod
def load(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
cur_graph = tf.Graph()
with cur_graph.as_default():
act = deepq.build_act(**act_params)
sess = tf.Session(graph=cur_graph)
with sess.as_default():
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
@staticmethod
def load_with_scope(path, scope):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act_params["scope"] = scope
cur_graph = tf.Graph()
with cur_graph.as_default():
act = deepq.build_act(**act_params)
sess = tf.Session(graph=cur_graph)
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save_with_sess(self, sess, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
with sess.as_default():
# print("Saving state now")
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def save(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def load_for_multiple_nets_with_scope(path, scope):
return ActWrapper.load_for_multiple_nets_with_scope(path, scope)
def load_for_multiple_nets(path):
return ActWrapper.load_for_multiple_nets(path)
def load_with_scope(path, scope):
return ActWrapper.load_with_scope(path, scope)
def load(path):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load(path)
def learn(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
ep_mean_length=100,
scope="deepq_train"):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = tf.Session()
sess.__enter__()
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space_shape = env.observation_space.shape
def make_obs_ph(name):
return U.BatchInput(observation_space_shape, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise,
scope=scope
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
with sess.as_default():
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
with sess.as_default():
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
with sess.as_default():
update_target()
mean_ep_reward = round(np.mean(episode_rewards[(-ep_mean_length - 1):-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean " + str(ep_mean_length) + " episode reward", mean_ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > ep_mean_length and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_ep_reward))
with sess.as_default():
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
with sess.as_default():
U.load_state(model_file)
return act
def learn_multiple_nets(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
ep_mean_length=100):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
cur_graph = tf.Graph()
with cur_graph.as_default():
sess = tf.Session(graph=cur_graph)
sess.__enter__()
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space_shape = env.observation_space.shape
def make_obs_ph(name):
return U.BatchInput(observation_space_shape, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_ep_reward = round(np.mean(episode_rewards[(-ep_mean_length - 1):-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean " + str(ep_mean_length) + " episode reward", mean_ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > ep_mean_length and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
U.load_state(model_file)
return act
def learn_and_save(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
ep_mean_length=100,
scope="deepq_train",
path_for_save=None):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = tf.Session()
sess.__enter__()
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space_shape = env.observation_space.shape
def make_obs_ph(name):
return U.BatchInput(observation_space_shape, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise,
scope=scope
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
with sess.as_default():
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
# print("Reset env")
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# print("Minimize error")
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
with sess.as_default():
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
with sess.as_default():
# print("update target")
update_target()
mean_ep_reward = round(np.mean(episode_rewards[(-ep_mean_length - 1):-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean " + str(ep_mean_length) + " episode reward", mean_ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
# print("Checkpoint")
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > ep_mean_length and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_ep_reward))
with sess.as_default():
# print("Saving current state")
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
with sess.as_default():
# print("Loading old state")
U.load_state(model_file)
# for var in tf.global_variables():
# print('all variables: ' + var.op.name)
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
if path_for_save is not None:
act.save_with_sess(sess, path=path_for_save)
return act
def print_debug_info(scope_old, scope_new, sess):
vars_in_scope_old = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_old)
print("Variables in scope: " + scope_old)
for v in vars_in_scope_old:
print(v)
vars_in_scope_new = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_new)
print("\nVariables in scope: " + scope_new)
for v in vars_in_scope_new:
print(v)
trainables = sess.run(tf.trainable_variables())
print("\nTrainable variables:")
for v in trainables:
print(v)
def print_old_and_new_weights(scope_old, scope_new, sess):
old_weights_name = scope_old + "/q_func/fully_connected/weights:0"
old_weights = [v for v in tf.global_variables() if v.name == old_weights_name][0]
print("old weights:")
print(old_weights)
old_value = sess.run(old_weights)
print(old_value)
new_weights_name = scope_new + "/q_func/fully_connected/weights:0"
new_weights = [v for v in tf.global_variables() if v.name == new_weights_name][0]
print("new weights:")
print(new_weights)
new_value = sess.run(new_weights)
print(new_value)
# copy_into_new_weights = new_weights.assign(old_weights)
# sess.run(copy_into_new_weights)
# new_value_updated = sess.run(new_weights)
# print("updated value of new weights:")
# print(new_value_updated)
def overwrite_new_net_with_old(scope_old, scope_new, sess):
net_name_suffixes = [
"eps:0",
"/q_func/fully_connected/weights:0",
"/q_func/fully_connected/biases:0",
"/q_func/fully_connected_1/weights:0",
"/q_func/fully_connected_1/biases:0",
"/q_func/fully_connected_2/weights:0",
"/q_func/fully_connected_2/biases:0"
]
for suffix in net_name_suffixes:
old_var_name = scope_old + suffix
new_var_name = scope_new + suffix
old_var = [v for v in tf.global_variables() if v.name == old_var_name][0]
new_var = [v for v in tf.global_variables() if v.name == new_var_name][0]
copy_var = new_var.assign(old_var)
sess.run(copy_var)
old_var_value = sess.run(old_var)
updated_new_var_value = sess.run(new_var)
if old_var_value != updated_new_var_value:
raise ValueError("Not equal: " + str(old_var_value) + "\n" + \
str(updated_new_var_value))
def retrain_and_save(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.5,
exploration_initial_eps=0.3,
exploration_final_eps=0.03,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
ep_mean_length=100,
scope_old="deepq_train",
scope_new="deepq_train_retrained",
prefix_for_save=None,
save_count=4):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
if save_count < 1:
raise ValueError("save_count must be positive")
sess = tf.Session()
sess.__enter__()
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space_shape = env.observation_space.shape
def make_obs_ph(name):
return U.BatchInput(observation_space_shape, name=name)
act, train, update_target, _ = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise,
scope=scope_new
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from exploration_initial_eps.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=exploration_initial_eps,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
# overwrite q_func values
# with values from the inner "q_func_old" of old network,
# where q_func is a lambda around a network with fully_connected() and relu() parts.
# q_func_old has scope of scope_old, and q_func has scope of scope_new.
# print_debug_info(scope_old, scope_new, sess)
overwrite_new_net_with_old(scope_old, scope_new, sess)
# print_old_and_new_weights(scope_old, scope_new, sess)
update_target()
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
save_iter = 1
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between
# perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with
# eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration,
# Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t)
+ exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
with sess.as_default():
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
# print("Reset env")
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# print("Minimize error")
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
with sess.as_default():
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
with sess.as_default():
# print("update target")
update_target()
mean_ep_reward = round(np.mean(episode_rewards[(-ep_mean_length - 1):-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean " + str(ep_mean_length) + " episode reward", mean_ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
# print("Checkpoint")
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > ep_mean_length and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_ep_reward))
with sess.as_default():
# print("Saving current state")
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_ep_reward
if t > save_iter * (max_timesteps / save_count):
cur_save_path = prefix_for_save + "_r" + str(save_iter) + ".pkl"
act.save_with_sess(sess, path=cur_save_path)
save_iter += 1
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
with sess.as_default():
# print("Loading old state")
U.load_state(model_file)
# for var in tf.global_variables():
# print('all variables: ' + var.op.name)
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
if prefix_for_save is not None:
cur_save_path = prefix_for_save + "_r" + str(save_count) + ".pkl"
act.save_with_sess(sess, path=cur_save_path)
return act
def learn_retrain_and_save(env,
q_func,
lr=5e-4,
max_timesteps_init=100000,
buffer_size=50000,
exploration_fraction=0.5,
exploration_final_eps=0.03,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
ep_mean_length=100,
scope="deepq_train",
path_for_save=None,
retrain_exploration_initial_eps=0.3,
retrain_exploration_final_eps=0.03,
retrain_save_count=4,
max_timesteps_retrain=100000,
retrain_config_str=None,
prefix_for_save=None):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = tf.Session()
sess.__enter__()
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space_shape = env.observation_space.shape
def make_obs_ph(name):
return U.BatchInput(observation_space_shape, name=name)
act, train, update_target, _ = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise,
scope=scope
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps_init + max_timesteps_retrain
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps_init),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
model_file = os.path.join(td, "model")
for t in range(max_timesteps_init + max_timesteps_retrain):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
phase_t = t % max_timesteps_init
if not param_noise:
update_eps = exploration.value(phase_t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(phase_t) + exploration.value(phase_t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
with sess.as_default():
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
# print("Reset env")
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# print("Minimize error")
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
with sess.as_default():
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
with sess.as_default():
# print("update target")
update_target()
mean_ep_reward = round(np.mean(episode_rewards[(-ep_mean_length - 1):-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean " + str(ep_mean_length) + " episode reward", mean_ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(phase_t)))
logger.dump_tabular()
# print("Checkpoint")
# for var in tf.trainable_variables():
# print('normal variable: ' + var.op.name)
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > ep_mean_length and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_ep_reward))
with sess.as_default():
# print("Saving current state")
U.save_state(model_file)
saved_mean_reward = mean_ep_reward
if t == max_timesteps_init:
# change exploration curve
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps_retrain),
initial_p=retrain_exploration_initial_eps,
final_p=retrain_exploration_final_eps)
# change opponent
env.update_for_retrain(retrain_config_str)
# reset state as if new game
obs = env.reset()
reset = True
elif t - 1000 == max_timesteps_init:
print("saving trained net: " + path_for_save)
act.save_with_sess(sess, path=path_for_save)
elif t > max_timesteps_init and (t - max_timesteps_init) % (max_timesteps_retrain // retrain_save_count) == 0:
# save current network under new name
save_iter = (t - max_timesteps_init) // (max_timesteps_retrain // retrain_save_count)
cur_save_path = prefix_for_save + "_r" + str(save_iter) + ".pkl"
print("saving from retrain: " + cur_save_path)
act.save_with_sess(sess, path=cur_save_path)
return act
|
[
"masonwright14@gmail.com"
] |
masonwright14@gmail.com
|
69531891167d3e1b664371e3748feec5e264a3c5
|
2d9ae5e7493a983e0673f3ee2fb2ee357df38089
|
/wraling_data_with_python/chpt8/scr_wiki.py
|
1ff6ff3f1cb9586e7bbc2e89b253c0f71bd8fe0b
|
[] |
no_license
|
saksim/python_data_analysis
|
e836a8e0f6e08a232725d9a4e427aa2665ac4f46
|
e5db206aeb95f028f725a2854fd1fc6f52916d37
|
refs/heads/master
| 2021-01-18T15:53:26.097424
| 2017-08-25T17:08:18
| 2017-08-25T17:08:18
| 86,688,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
#!-*-encoding=utf-8-*-
from urllib import urlopen
from bs4 import BeautifulSoup
import re
import datetime
import random
import pymysql
conn = pymysql.connect(host='127.0.0.1', user='root', passwd='MYFyxy5hww21', database='python_test', port=28080,
charset='utf8')
cur = conn.cursor()
def crete_table():
statement = "CREATE TABLE pages (id BIGINT(7) NOT NULL AUTO_INCREMENT,title VARCHAR(200),content VARCHAR(10000),created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,PRIMARY KEY(id));"
# print statement
cur.execute(statement)
cur.connection.commit()
def store(title, content):
cur.execute("INSERT INTO pages (title, content) VALUES (\"%s\",\"%s\")", (title,content))
cur.connection.commit()
def getLinks(articleurl):
html = urlopen("http://en.wikipedia.org/wiki" + articleurl)
bsobj = BeautifulSoup(html,"html5lib")
title = bsobj.find("h1").get_text()
for i in bsobj.find("div",{"id":"mw-content-text"}).find("p"):
content = i.get_text()
store(title,content)
return bsobj.find("div",{"id":"bodyContent"}).findall("a",href =re.compile("^(/wiki/)"))
links = getLinks("/Climate")
if __name__ == "__main__":
random.seed(datetime.datetime.now())
# crete_table()
try:
while len(links) > 0:
newarticle = links[random.randint(0,len(links)-1)].attrs["href"]
print(newarticle)
links = getLinks(newarticle)
finally:
cur.close()
conn.close()
|
[
"heweiwei0107@163.com"
] |
heweiwei0107@163.com
|
41102e0fbd612f25a5a9c254b74f9855d5ecc203
|
a4d577b74e80c79ac94e0ded13ff25f06aa3bc13
|
/mysite/mysite/urls.py
|
60ea7f134a8c840077228d29e48f344d82a7a5ac
|
[] |
no_license
|
lmdcma27/Progra-2-Practica1
|
1572ae02144ec7aad14a3946e5af8a28b86cff4b
|
f9b9943b45749844cbda746667b88146800f119c
|
refs/heads/master
| 2020-05-03T19:26:15.608056
| 2019-06-08T18:33:42
| 2019-06-08T18:33:42
| 178,783,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls'))
]
|
[
"lmdcma27@gmail.com"
] |
lmdcma27@gmail.com
|
1d988a601334f866cf0cfa29b4c28f8a8d0eb042
|
46afea2e3bef90b7fe21e6762c614ac98150df0a
|
/SMS19/wsgi.py
|
edea5478acd78f8cfc31ec2d6a0e340e6f4bc52a
|
[
"MIT"
] |
permissive
|
bitsacm/SMS19
|
7dca5dc3a1f1683850e9a7fd548252e876dff315
|
03148e03165e06e97196ed8221c19151a68db9ea
|
refs/heads/master
| 2021-06-17T02:18:21.049015
| 2019-04-15T10:16:38
| 2019-04-15T10:16:38
| 184,020,771
| 0
| 2
|
MIT
| 2021-03-20T19:04:11
| 2019-04-29T07:22:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for SMS19 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SMS19.settings')
application = get_wsgi_application()
|
[
"siddhantrkhandelwal@gmail.com"
] |
siddhantrkhandelwal@gmail.com
|
d91bbfd4fdd6e554f03a9e6e836d1a9c3766be73
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_service_tags_operations.py
|
f9bdf3fc48b69402169eb1941b8801212dcb620f
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,510
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceTagsOperations(object):
"""ServiceTagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ServiceTagsListResult"
"""Gets a list of service tag information resources.
:param location: The location that will be used as a reference for version (not as a filter
based on location, you will get the list of service tags with prefix details across all regions
but limited to the cloud that your subscription belongs to).
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceTagsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ServiceTagsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceTagsListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceTagsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/serviceTags'} # type: ignore
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
bbf9ef3f31be84a570b427f39ccb042200d4cd05
|
42792862e02664907a137b2db84f6c4337ca5ac2
|
/main.py
|
dc1d5c9b407c5a0168036027187dbadcedaa5691
|
[] |
no_license
|
AlexeyBurkov/statisticLabs
|
87b4e07f02284d386b8ea6bc80bc163196588d04
|
7cdee9cb4d0f0fdeb0d6ca1c5ec608977cbdedea
|
refs/heads/master
| 2023-04-24T06:48:16.480240
| 2021-05-09T02:39:44
| 2021-05-09T02:39:44
| 358,225,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
import numpy as np
from lab1 import do_hist_research
from lab2 import do_characteristics_research
from lab3 import do_boxplot_research
from lab4 import do_func_plus_density_research
from lab5 import do_correlation_research, do_ellipse_research
if __name__ == '__main__':
do_correlation_research()
# do_ellipse_research()
|
[
"rubicks_cube@mail.ru"
] |
rubicks_cube@mail.ru
|
710d650df521be4558296200acfa411b41f16e57
|
350db570521d3fc43f07df645addb9d6e648c17e
|
/0432_All_O`one_Data_Structure/solution.py
|
f9950ad44aecca27f66a9cb7b101ee26ce498043
|
[] |
no_license
|
benjaminhuanghuang/ben-leetcode
|
2efcc9185459a1dd881c6e2ded96c42c5715560a
|
a2cd0dc5e098080df87c4fb57d16877d21ca47a3
|
refs/heads/master
| 2022-12-10T02:30:06.744566
| 2022-11-27T04:06:52
| 2022-11-27T04:06:52
| 236,252,145
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
'''
432. All O`one Data Structure
Level: Hard
https://leetcode.com/problems/all-oone-data-structure
'''
'''
Solution:
'''
class AllOne:
def __init__(self):
"""
Initialize your data structure here.
"""
def inc(self, key: str) -> None:
"""
Inserts a new key <Key> with value 1. Or increments an existing key by 1.
"""
def dec(self, key: str) -> None:
"""
Decrements an existing key by 1. If Key's value is 1, remove it from the data structure.
"""
def getMaxKey(self) -> str:
"""
Returns one of the keys with maximal value.
"""
def getMinKey(self) -> str:
"""
Returns one of the keys with Minimal value.
"""
# Your AllOne object will be instantiated and called as such:
# obj = AllOne()
# obj.inc(key)
# obj.dec(key)
# param_3 = obj.getMaxKey()
# param_4 = obj.getMinKey()
|
[
"benjaminhuanghuang@gmail.com"
] |
benjaminhuanghuang@gmail.com
|
c479330cbce640dd7b11335f7ba3f16c96e55c22
|
f61aa4d5791fc858e52db956f447cfd0af5182cf
|
/break_statement.py
|
7886f134ac68d97318181108c520437d6d9c7a19
|
[] |
no_license
|
AshutoshPanwar/Python_udemy_course
|
70abda4418c4532dd886a2b98c0bfb0bc8fbc138
|
7b4698f47a9a80b4cbe07e2334ccc6bc1427118c
|
refs/heads/master
| 2023-04-19T00:02:48.129265
| 2021-05-05T10:23:52
| 2021-05-05T10:23:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
l = [10, 54 ,2 ,61, 14]
n = int(input('Enter search key:'))
for i in l:
print(i,n)
if i == n:
print('found')
break
#continue statement
l = [10, 54 , 2, 61, 15]
for i in l:
if i % 2 != 0:
continue
print(i)
|
[
"ashupanwar1100@gmail.com"
] |
ashupanwar1100@gmail.com
|
ce94ad704754398daf3643ee705966752a50804f
|
7bbc83f3f84d7e5057cb04f6895082ab3e016e90
|
/ml/m22_pca3_cifar100.py
|
34b9db2a7cd8e68feed22b981d54296f0365123a
|
[] |
no_license
|
osy1223/bit_seoul
|
908f6adf007c0a7d0df2659b4fae75eb705acaea
|
b523d78c7b80d378a2d148b35466304f10bf4af4
|
refs/heads/master
| 2023-02-02T14:26:40.120989
| 2020-12-18T00:46:04
| 2020-12-18T00:46:04
| 311,279,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
import numpy as np
from tensorflow.keras.datasets import cifar100
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from sklearn.preprocessing import MinMaxScaler, StandardScaler
# 1. 데이터
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
print(x_train.shape, x_test.shape) # (50000, 32, 32, 3) (10000, 32, 32, 3)
print(y_train.shape, y_test.shape) # (50000, 1) (10000, 1)
# 1.1 데이터 전처리
# append (데이터 합치기)
x = np.append(x_train, x_test, axis=0)
y = np.append(y_train, y_test, axis=0)
print("x.shape:", x.shape) #x.shape:(60000, 32, 32, 3)
# reshape
x = x.reshape(60000, 32*32*3)
print(x.shape) # (60000, 3072)
# PCA
pca = PCA()
pca.fit(x)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) +1 #d : 우리가 필요한 n_components의 개수
print("n_components:",d) # n_components: 202
pca = PCA(n_components=202)
x2d = pca.fit_transform((x))
print(x2d.shape) #(60000, 202)
pca_EVR = pca.explained_variance_ratio_
print(sum(pca_EVR)) # 1.0000000000000022
# OneHotEncoding
y = to_categorical(y)
# 1.2 train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size=500)
print("split shape:", x_train.shape, x_test.shape) # (500, 3072) (59500, 3072)
# 이미지를 DNN 할 때에는 reshape하고 scaler 하자
# 1.3 Scaler
scaler = StandardScaler()
scaler.fit(x_train) # fit하고
x_train = scaler.transform(x_train) # 사용할 수 있게 바꿔서 저장하자
x_test = scaler.transform(x_test) # 사용할 수 있게 바꿔서 저장하자
# 2. 모델
model = Sequential()
model.add(Dense(10, input_shape=(x_train.shape[1],)))
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100))
model.add(Dropout(0.2))
model.add(Dense(300))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(3))
model.add(Dense(100, activation='softmax'))
# 3. 컴파일, 훈련
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
es = EarlyStopping(
monitor='loss',
patience=20,
mode='auto',
verbose=2)
model.fit(x_train,y_train,
epochs=1000,
batch_size=128,
verbose=1,
validation_split=0.2,
callbacks=[es])
# 4. 평가, 예측
loss,acc = model.evaluate(x_test, y_test, batch_size=128)
print("loss : ",loss)
print("acc : ",acc)
'''
cifar100 dnn
loss : 3.6389894485473633
acc : 0.14429999887943268
cumsum >= 0.95
n_components : 202
loss : 21.481054306030273
acc : 0.01484033651649952
cumsum >= 1
n_components: 3072
loss : 23.00354766845703
acc : 0.016857143491506577
'''
|
[
"osu1223@gmail.com"
] |
osu1223@gmail.com
|
9f9f3712a05a0efffe7c2c99073775c38e808e62
|
a45e8ebc8030eeb080f0b2f483375173b05ccf60
|
/HRank/30days/day9.py
|
d74724f79f8801cea2a6df7426b08fd7e361f10f
|
[] |
no_license
|
santoshr1016/OpenCV
|
436098b8cba2a2a2e955181eab9be096c57cb1ec
|
69456ea144e1586106bba7bdd92369aebc8625e2
|
refs/heads/master
| 2021-06-04T14:51:45.034114
| 2016-09-01T12:34:03
| 2016-09-01T12:34:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
def factorial(n):
if n<=1:
return 1
else:
return n*factorial(n-1)
N = int(input())
print(factorial(N))
|
[
"santy1016@gmail.com"
] |
santy1016@gmail.com
|
34559726c4acdaebfc64114d524060dcad146149
|
d8f5bfee3f54d2d003575323c0ef305653fe77a2
|
/py/helpers/mongo_client.py
|
567044ef3bd07c215e47a7c96998135dad985477
|
[] |
no_license
|
aspotashev/stupid-ids
|
c8ab7abe546a95fd79637c6ba94e6393ae400f8b
|
978c995c7fc0be90d181bc098d36219da333bdd1
|
refs/heads/master
| 2021-01-10T14:00:59.065046
| 2017-05-16T21:22:24
| 2017-05-16T21:22:24
| 44,253,139
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import pymongo
class MongoClient(object):
def __init__(self):
connection = pymongo.MongoClient()
self.__db = connection['stupids_db']['template_parts']
def get_first_id(self, tp_hash):
rows = list(self.__db.find({'_id': tp_hash}))
assert len(rows) <= 1
if len(rows) == 0:
return None
elif len(rows) == 1:
return rows[0]['first_id']
|
[
"aspotashev@gmail.com"
] |
aspotashev@gmail.com
|
20fb69cd069a164cee603d069a7a5f6b096580b3
|
494834a657d7419ebd1757ee4adb9661396c8b98
|
/Face Mask Detection/face_mask.py
|
5c9dee64ce7773e510305a3ac5db5dfc6abe396d
|
[] |
no_license
|
AbhiGupta06/Project-face-mask-detection-
|
a0b05665590c50054e6475ccb69dc7113af2456e
|
4c81a7e187dbb30b8d51777e235823d62bd797e4
|
refs/heads/main
| 2023-03-13T02:04:00.668742
| 2021-02-26T07:52:37
| 2021-02-26T07:52:37
| 342,499,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,612
|
py
|
# -*- coding: utf-8 -*-
"""face_mask.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1vFrGES-YoQk2rS1NFoRYaY_-GCuQBxTN
**Let us first import all the necessary libraries we are going to need.**
"""
# from tensorflow.keras.preprocessing.image import ImageDataGenerator
# from tensorflow.keras.applications import MobileNetV2
# from tensorflow.keras.layers import AveragePooling2D
# from tensorflow.keras.layers import Dropout
# from tensorflow.keras.layers import Flatten
# from tensorflow.keras.layers import Dense
# from tensorflow.keras.layers import Input
# from tensorflow.keras.models import Model
# from tensorflow.keras.optimizers import Adam
# from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
# from tensorflow.keras.preprocessing.image import img_to_array
# from tensorflow.keras.preprocessing.image import load_img
# from tensorflow.keras.utils import to_categorical
# from sklearn.preprocessing import LabelBinarizer
# from sklearn.model_selection import train_test_split
# from imutils import paths
# import matplotlib.pyplot as plt
# import numpy as np
# import os
# """**The next step is to read all the images and assign them to some list. Here we get all the paths associated with these images and then label them accordingly. Remember our dataset is contained in two folders viz- with_masks and without_masks. So we can easily get the labels by extracting the folder name from the path. Also, we preprocess the image and resize it to 224x 224 dimensions.**"""
# imagePaths = list(paths.list_images('/content/drive/MyDrive/dataset'))
# data = []
# labels = []
# # loop over the image paths
# for imagePath in imagePaths:
# # extract the class label from the filename
# label = imagePath.split(os.path.sep)[-2]
# # load the input image (224x224) and preprocess it
# image = load_img(imagePath, target_size=(224, 224))
# image = img_to_array(image)
# image = preprocess_input(image)
# # update the data and labels lists, respectively
# data.append(image)
# labels.append(label)
# # convert the data and labels to NumPy arrays
# data = np.array(data, dtype="float32")
# labels = np.array(labels)
# baseModel = MobileNetV2(weights="imagenet", include_top=False,
# input_shape=(224, 224, 3))
# # construct the head of the model that will be placed on top of the
# # the base model
# headModel = baseModel.output
# headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
# headModel = Flatten(name="flatten")(headModel)
# headModel = Dense(128, activation="relu")(headModel)
# headModel = Dropout(0.5)(headModel)
# headModel = Dense(2, activation="softmax")(headModel)
# # place the head FC model on top of the base model (this will become
# # the actual model we will train)
# model = Model(inputs=baseModel.input, outputs=headModel)
# # loop over all layers in the base model and freeze them so they will
# # *not* be updated during the first training process
# for layer in baseModel.layers:
# layer.trainable = False
# lb = LabelBinarizer()
# labels = lb.fit_transform(labels)
# labels = to_categorical(labels)
# # partition the data into training and testing splits using 80% of
# # the data for training and the remaining 20% for testing
# (trainX, testX, trainY, testY) = train_test_split(data, labels,
# test_size=0.20, stratify=labels, random_state=42)
# # construct the training image generator for data augmentation
# aug = ImageDataGenerator(
# rotation_range=20,
# zoom_range=0.15,
# width_shift_range=0.2,
# height_shift_range=0.2,
# shear_range=0.15,
# horizontal_flip=True,
# fill_mode="nearest")
# INIT_LR = 1e-4
# EPOCHS = 20
# BS = 32
# print("[INFO] compiling model...")
# opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
# model.compile(loss="binary_crossentropy", optimizer=opt,
# metrics=["accuracy"])
# # train the head of the network
# print("[INFO] training head...")
# H = model.fit(
# aug.flow(trainX, trainY, batch_size=BS),
# steps_per_epoch=len(trainX) // BS,
# validation_data=(testX, testY),
# validation_steps=len(testX) // BS,
# epochs=EPOCHS)
# N = EPOCHS
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
# plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
# plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
# plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
# plt.title("Training Loss and Accuracy")
# plt.xlabel("Epoch #")
# plt.ylabel("Loss/Accuracy")
# plt.legend(loc="lower left")
# #To save the trained model
# model.save('mask_recog_ver2.h5')
import cv2
import os
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
import numpy as np
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
model = load_model("C:/Users/Pawan Bharti/Downloads/mask_recog_ver2 (1).h5")
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1,minNeighbors=5, minSize=(60, 60), flags=cv2.CASCADE_SCALE_IMAGE)
faces_list=[]
preds=[]
for (x, y, w, h) in faces:
face_frame = frame[y:y+h,x:x+w]
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_BGR2RGB)
face_frame = cv2.resize(face_frame, (224, 224))
face_frame = img_to_array(face_frame)
face_frame = np.expand_dims(face_frame, axis=0)
face_frame = preprocess_input(face_frame)
faces_list.append(face_frame)
if len(faces_list)>0:
preds = model.predict(faces_list)
for pred in preds:
(mask, withoutMask) = pred
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(frame, label, (x, y- 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (x, y), (x + w, y + h),color, 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
AbhiGupta06.noreply@github.com
|
06f8c7e9b4817b1e660c155e751e1e20b9c1c96c
|
f1dbf2a4ddad50b53dacd2acf4071b12d393f232
|
/programming/basics/python/binary_search.py
|
916cd2ed15302f8a28e0a718b7f23b2cb80f6152
|
[] |
no_license
|
adikabintang/kolor-di-dinding
|
e25b6033c3cc7a5da53681ec7904eb6c015a18ca
|
7450002fc8f9b8c276b021be21b9cd368bc5200e
|
refs/heads/master
| 2021-07-05T10:36:04.616308
| 2021-01-04T16:32:09
| 2021-01-04T16:32:09
| 218,272,042
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
def bin_search(nums: [int], target: int) -> int:
right = len(nums)
middle_index = right // 2
left = 0
found_index = -1
while right > left:
if target < nums[middle_index]:
right = middle_index
middle_index = (right - left) // 2
elif target > nums[middle_index]:
left = middle_index
middle_index = (right + left) // 2
else:
found_index = middle_index
break
return found_index
nums = [1, 2, 3, 4, 5, 6, 7]
for i in nums:
print(bin_search(nums, i))
print(bin_search(nums, -7))
|
[
"adika.bintang@ui.ac.id"
] |
adika.bintang@ui.ac.id
|
7afe18193da612a4967d92db2639a1c5640c40bf
|
a6bdf7aa8f0a6282cc81e1acf97917dcd8821a7e
|
/module1.py
|
7f620c2f9cfa819d81da3a33c60119cb6cd413a9
|
[] |
no_license
|
SNBhushan/BASICS-OF-PYTHON
|
28f66a24c774d24075ccd7ebea19286869ce15c8
|
beb5df71a9ef128b8800d5059778413521dd53f4
|
refs/heads/main
| 2023-05-27T16:57:15.709339
| 2021-06-08T09:26:09
| 2021-06-08T09:26:09
| 374,953,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
import math
print(math.factorial(6))
|
[
"noreply@github.com"
] |
SNBhushan.noreply@github.com
|
5cdd4ba6e42de150b30b0b660ce41f49f79da705
|
1eef92b94fb2a1af46714fc2309adb1eafdbeeaf
|
/shop/urls.py
|
6f5feef1e3659ecc8deb6ee6bbd184d6ac117af2
|
[] |
no_license
|
Luxlorys/armeta
|
0201278ead2702868799863ed82a61576ca7cfec
|
8a8d8941efcd078ac950f2eb41502792757ca2db
|
refs/heads/master
| 2023-06-01T12:13:08.193469
| 2021-06-21T12:29:31
| 2021-06-21T12:29:31
| 375,623,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', include('main.urls')),
path('admin/', admin.site.urls),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"bossbyrcev221@gmail.com"
] |
bossbyrcev221@gmail.com
|
9002d90dcbc6c3137048964b4c1bd3cd7d83fd19
|
cf0d80a7f8a7e89405f2b818af416651d5667298
|
/docker-mapnik-polar/render-polar-tiles-3411.py
|
8c026b48223573f82369e801e56140f0fa22b509
|
[] |
no_license
|
ingmapping/mapnik-stylesheets-polar
|
98ba249ab2be0128b5fa7a3471a40b6b90738c93
|
35e87e041d105f54d4d2baf1f03e4f42ba780c34
|
refs/heads/master
| 2020-03-21T12:47:13.604397
| 2018-07-06T12:22:34
| 2018-07-06T12:22:34
| 138,571,899
| 0
| 0
| null | 2018-06-25T09:21:01
| 2018-06-25T09:21:01
| null |
UTF-8
|
Python
| false
| false
| 10,201
|
py
|
#!/usr/bin/python
#
# Render tiles in an polar projection: EPSG 3411 (https://nsidc.org/data/atlas/epsg_3411.html)
# OriginX:-12400000 / OriginY: 12400000 / Resolution: 96875 / BBOX: 12400000, 12400000, 12400000, 12400000.
#
from optparse import OptionParser
import sys, os, multiprocessing
import Queue
try:
import mapnik
except:
import mapnik2 as mapnik
cairo_exists = True
try:
import cairo
except ImportError:
cairo_exists = False
def main():
style = os.path.dirname(os.path.abspath(__file__))+"/osm.xml"
dir = "tiles"
type = "png"
scale = 22800000
minzoom = 1
maxzoom = 6
threads = 1
context = 3
parser = OptionParser()
parser.add_option("-s", "--style", action="store", type="string", dest="style",
help="path to the mapnik stylesheet xml, defaults to: "+style)
parser.add_option("-d", "--dir", action="store", type="string", dest="dir",
help="path to the destination folder, defaults to "+type)
parser.add_option("-t", "--type", action="store", type="string", dest="type",
help="file type to render (png, png256, jpg), defaults to "+type)
parser.add_option("-z", "--minzoom", action="store", type="int", dest="minzoom",
help="minimum zoom level to render, defaults to "+str(minzoom))
parser.add_option("-Z", "--maxzoom", action="store", type="int", dest="maxzoom",
help="maximum zoom level to render, defaults to "+str(maxzoom))
parser.add_option("-T", "--threads", action="store", type="int", dest="threads",
help="number of threads to launch, defaults to "+str(threads))
parser.add_option("-i", "--only-interesting", action="store_true", dest="onlyinteresting",
help="only render around interesting places (buildings, peaks, islands, ...)")
parser.add_option("-c", "--only-interesting-context", action="store", type="int", dest="context",
help="when rendering tiles around interesting places, how many tiles around those places should be rendered?"+
"0 means that only the tile with the interesting feature will be rendered; "+
"1 means that the 8 surrounding tiles will be rendered for each zoom level, too; "+
"2 adds 24 extra tiles; 3 adds 48 extra tiles; 4 adds 80 extra tiles; "+
"defaults to "+str(context)+", which should fill the most screens")
parser.add_option("-l", "--only-interesting-list", action="store", type="string", dest="listfile",
help="write a GeoJSON-List of interesting places")
parser.add_option("-D", "--db", action="store", type="string", dest="dsn", default="",
help="database connection string used for finding interesting places")
parser.add_option("-e", "--skip-existing", action="store_true", dest="skipexisting",
help="skip existing tiles, only render missing")
(options, args) = parser.parse_args()
if options.style:
style = options.style
if options.dir:
dir = options.dir
if options.type:
type = options.type
if options.minzoom:
minzoom = options.minzoom
if options.maxzoom:
maxzoom = options.maxzoom
if options.threads:
threads = options.threads
if options.context != None:
context = options.context
queue = multiprocessing.JoinableQueue(32)
lock = multiprocessing.Lock()
renderers = {}
print "Starting %u render-threads" % (threads)
for i in range(threads):
renderer = RenderThread(i, queue, style, scale, dir, type, lock)
render_thread = multiprocessing.Process(target=renderer.run)
render_thread.start()
renderers[i] = render_thread
if options.onlyinteresting:
import psycopg2
tileset = set()
features = []
con = psycopg2.connect(options.dsn)
sql = """
SELECT 'point' AS type, osm_id, name, ST_X(way), ST_Y(way), ST_X(ST_Transform(way, 3411)), ST_Y(ST_Transform(way, 3411)) FROM ant_point
WHERE (place IS NOT NULL AND place IN ('hamlet', 'town', 'isolated_dwelling', 'cape', 'locality', 'island', 'islet'))
OR building IS NOT NULL
OR aeroway IS NOT NULL
OR ("natural" IS NOT NULL AND "natural" IN ('volcano', 'ridge', 'cliff', 'cape', 'peak', 'valley', 'bay'))
UNION ALL
SELECT 'line' AS type, osm_id, name, ST_X(ST_Centroid(way)), ST_Y(ST_Centroid(way)), ST_X(ST_Transform(ST_Centroid(way), 3411)), ST_Y(ST_Transform(ST_Centroid(way), 3411)) FROM ant_line
WHERE (place IS NOT NULL AND place IN ('hamlet', 'town', 'isolated_dwelling', 'cape', 'locality', 'island', 'islet'))
OR building IS NOT NULL
OR aeroway IS NOT NULL
UNION ALL
SELECT 'polygon' AS type, osm_id, name, ST_X(ST_Centroid(way)), ST_Y(ST_Centroid(way)), ST_X(ST_Transform(ST_Centroid(way), 3411)), ST_Y(ST_Transform(ST_Centroid(way), 3411)) FROM ant_polygon
WHERE (name IS NOT NULL AND place IS NOT NULL AND place IN ('hamlet', 'town', 'isolated_dwelling', 'cape', 'locality', 'island', 'islet'))
OR building IS NOT NULL
OR aeroway IS NOT NULL;
""";
cur = con.cursor()
cur.execute(sql)
lock.acquire()
print "found %u interesting nodes" % (cur.rowcount)
lock.release()
i = 0
for record in cur:
(obj_type, osm_id, name, lat, lng, xmeter, ymeter) = record
lock.acquire()
i += 1
print "found interesting %s %u of %u: #%u (%s)" % (obj_type, i, cur.rowcount, osm_id, name)
lock.release()
if(options.listfile):
features += ({
"type": "Feature",
"properties": {
"osm_id": osm_id,
"name": name
},
"geometry": {
"type": "Point",
"coordinates" : [ lat, lng ]
}
},)
for z in range(minzoom, maxzoom+1):
n = 2**z
n2 = n/2
tilesz = float(scale) / float(n)
xoff = float(xmeter) / tilesz
yoff = float(ymeter) / tilesz
x = int(xoff + n2)
y = int(n2 - yoff)
for xctx in range(-context, context+1):
for yctx in range(-context, context+1):
absx = x+xctx
absy = y+yctx
t = (z, absx, absy)
if absx >= 0 and absx < n and absy >= 0 and absy < n and not t in tileset:
queue.put(t)
tileset.add(t)
if(options.listfile):
import json
f = open(options.listfile, "w")
f.write(json.dumps({
"type": "FeatureCollection",
"features": features
}
))
f.close()
else:
for z in range(minzoom, maxzoom+1):
n = 2**z
for x in range(0, n):
for y in range(0, n):
if options.skipexisting and os.path.exists(dir + "/" + str(z) + "/" + str(x) + "/" + str(y) + "." + type):
continue
t = (z, x, y)
queue.put(t)
# Signal render threads to exit by sending empty request to queue
for i in range(threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(threads):
renderers[i].join()
class RenderThread:
def __init__(self, threadnum, queue, style, scale, dir, type, lock):
self.threadnum = threadnum
self.queue = queue
self.scale = scale
self.dir = dir
self.type = type
self.lock = lock
self.style = style
self.lock.acquire()
print "Thread #%u created" % (threadnum)
self.lock.release()
def run(self):
self.lock.acquire()
print "Thread #%u started" % (self.threadnum)
self.lock.release()
m = mapnik.Map(256,256)
mapnik.load_map(m, self.style, True)
if(m.buffer_size < 32):
m.buffer_size = 32
while True:
r = self.queue.get()
if (r == None):
self.queue.task_done()
self.lock.acquire()
print "Thread #%u: closing" % (self.threadnum)
self.lock.release()
break
else:
(z, x, y) = r
render_tile(m, z, x, y, self.scale, self.dir, self.type, self.lock, self.threadnum)
self.queue.task_done()
def render_tile(m, z, x, y, scale, dir, type, lock=None, threadnum=None):
n = 2**z
n2 = n/2
x2n = x-n2
y2n = (n-y-1)-n2
tilesize = float(scale) / float(n);
bbox = [
tilesize * x2n,
tilesize * y2n,
tilesize * (x2n+1),
tilesize * (y2n+1)
]
pdir = dir + "/" + str(z) + "/" + str(x)
if lock:
lock.acquire()
print "Thread #%u: z=%u x=%u y=%u -> (%f,%f,%f,%f)" % (threadnum, z, x, y, bbox[0], bbox[1], bbox[2], bbox[3])
if not os.path.exists(pdir):
os.makedirs(pdir)
lock.release()
else:
if not os.path.exists(pdir):
os.makedirs(pdir)
print "z=%u x=%u y=%u -> (%f,%f,%f,%f)" % (z, x, y, bbox[0], bbox[1], bbox[2], bbox[3])
if mapnik.Box2d:
e = mapnik.Box2d(*bbox)
else:
e = mapnik.Envelope(*bbox)
# zoom map to bounding box
m.zoom_to_box(e)
file = dir + "/" + str(z) + "/" + str(x) + "/" + str(y) + "." + type
s = mapnik.Image(256, 256)
mapnik.render(m, s)
view = s.view(0, 0, 256, 256)
view.save(file, type)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
ingmapping.noreply@github.com
|
5cb97df2409a65e5747704f8b77e52f8006b7bfd
|
9f95df6184ddc375d625a0655533f5cae3fc0fa4
|
/car/get_credentials.py
|
8022c4fdf356eb0640692f04e71940c3f9edb8f5
|
[
"BSD-3-Clause"
] |
permissive
|
stvhay/car
|
401721e03a2f49ee5aaec0dbab003e9776cadf6b
|
c4e1810d6fa8869e33fcc77ac85614fd05620498
|
refs/heads/master
| 2022-04-09T13:59:48.862986
| 2020-03-17T00:14:58
| 2020-03-17T00:14:58
| 238,547,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
"""Get credentials function."""
import getpass
import yaml
def get_credentials(args):
"""
Load credentials from file or arguments.
:param password: Password string.
@return: (username, password)
"""
try:
with open('.credentials') as f:
c = yaml.safe_load(f.read())
password = c.get('password')
if not password:
getpass.getpass("Password")
username = c.get('username')
if not username:
username = input("Username: ")
if args.username:
username = args.username
except IOError:
username = args.username
password = getpass.getpass()
return username, password
|
[
"hay.steve@gmail.com"
] |
hay.steve@gmail.com
|
44816a213498e7601004b6e75d6fd842b6107828
|
1d5a8f91ca705482ac0d0d196797f7d9f4c4c669
|
/pico-ups/picofu.py
|
e4beb24ab3c222425d08a1b43999e1544b3408d9
|
[] |
no_license
|
johkin/ohserver-install
|
a06c50e29be5992a22e927fa645c8336d8559879
|
58ee4be0e77d21db7b9f807f2d8f9c8eb8f764a9
|
refs/heads/master
| 2021-01-10T09:52:14.094275
| 2015-11-19T23:25:26
| 2015-11-19T23:25:26
| 46,526,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,360
|
py
|
#!/usr/bin/python
# -*- coding: iso8859_2 -*-
#===============================================================================
#
# USAGE: picofu.py -f <fw_file> [ -v | -h | -s | -p serial | --force ]
#
# DESCRIPTION:
# This script uploads firmware to UPS PIco. Only mandatory input is new UPS PIco firmware.
#
# RETURN CODES:
# 0 - Sucessfull update
# 1 - Failed to parse command line arguments
# 2 - Failed to establish communication with the UPS PIco
# 3 - Incompatible UPS PIco powering mode (DISABLED FOR NOW)
# 4 - Failed to validate firmware file
# 5 - Failed during the FW upload
#
# OPTIONS: ---
# REQUIREMENTS:
# python-serial
# python-smbus
# Propoer HW setup and setup of Pi to enable Serial/I2C communication based on the UPS PIco manual
# BUGS: ---
# NOTES: Updated for the UPS PIco by www.pimodules.com
# AUTHOR: Vit SAFAR <PIco@safar.info>
# VERSION: 1.4 adopted for UPS PIco December 2014 by PiModules
# CREATED: 2.6.2014
# REVISION:
# v1.0 16.4.2014 - Vit SAFAR
# - Initial release
# v1.1 17.4.2014 - Vit SAFAR
# - Added code documentation
# - Some speed-up optimisations
# v1.2 19.4.2014 - Vit SAFAR
# - Disabled the power detection, until automatic switch to bootloader mode is enabled
# v1.3 2.6.2014 - Vit SAFAR
# - Fixed communication issue by adding dummy ';HELLO' command
#
# TODO: - Detect FW version
# - Automatic switch to bootloader mode using @command when available
# - Automatically enable of the I2C sw components in Pi (load kernel modules) if not done
# - Perform optimisation of the FW file to speed up the upload process
# - Make the switch to bootloader mode interactive for users who does not have the I2C interface available.
# - Show UPS PIco @status after firmware update
# - Detect progress of the factory reset, not just wait :)
# - Set UPS PIco RTC clock after factory reset to the system time
#
#===============================================================================
import sys
import time
import datetime
import os
import re
import getopt
class FWUpdate(object):
"""
Only class performing the FW update
The class performs following tasks
1) Check the command line arguments and performs validation of the expected/required parameters
2) Pereform detection of the Pi powering scheme via I2C or Serial interface
3) Perform validation of the FW file
4) Verify the connectivity to UPS PIco bootloader is working
5) Perform FW update
6) Perform UPS PIco factory reset
"""
# running in verbose mode
verbose=False
# force the FW update by skipping prechecks
force=False
# skip validation of the FW
skip=False
# firmware file
filename=False
# default serial port
port='/dev/ttyAMA0'
# serial connection established on bloader level
seria_bloader=False
# status of the i2c serial feature
i2c=False
# detected i2c bus
i2c_bus=False
# default I2C port of UPS PIco control interface
i2c_port=0x69
# is Pi powered via Pi or not
power=False
# if power not via Pi USB and already warned about via Pi powering requirement
power_warned=False
def __init__(self):
# check if smbus module is deployed and load it if possible
try:
import smbus
self.i2c=True
self.smbus=smbus
except:
print 'WARNING: I2C support is missing. Please install smbus support for python to enable additional functionality! (sudo apt-get install python-smbus)'
self.i2c=False
# check if pyserial module is deployed and load it if possible
try:
import serial
self.serial=serial
except:
print 'ERROR: Serial support is missing. Please install pyserial support for python to enable additional functionality! (sudo apt-get install python-serial)'
sys.exit(2)
# parse command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], 'vhf:sp:', ['help', 'force' ])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
self.usage()
sys.exit(1)
for o, a in opts:
# look for verbose argument
if o =="-v":
self.verbose = True
# look for help argument
elif o in ("-h", "--help"):
self.usage()
sys.exit(1)
# look for fw filename argument
elif o == "-f":
self.filename = a
# Check if fw filename really exists
if not os.path.isfile(self.filename):
print 'ERROR: Input file "'+str(self.filename)+'" cannot be found! Make sure file exists and is readable.'
sys.exit(1)
# look for force argument
elif o == "--force":
self.force = True
# look for fw validation skip argument
elif o == "-s":
self.skip = True
# look for serial port definition argument
elif o == "-p":
self.port = a
if not os.path.exists(self.port):
print 'ERROR: Serial port "'+str(self.port)+'" cannot be found! No need to change this value in most of the cases!'
sys.exit(1)
# in case of unknown argument
else:
assert False, "ERROR: Unknown option"
sys.exit(1)
# Check if serial port device exists
if not os.path.exists(self.port):
print 'ERROR: Serial port "'+str(self.port)+'" cannot be found!'
sys.exit(1)
# Check if fw filename is defined
if not self.filename:
print 'ERROR: Firmware filename has to be defined! :)'
sys.exit(1)
# check the powering option is ok
####self.power_detect()
# validsate the provided firmware file
if not self.skip:
self.validate()
else:
if self.verbose: print 'WARNING: Skipping firmware validation'
# verify bootloader connectivity
self.serial_check()
# launch FW upload
self.fw_upload()
# Execute factory reset of UPS PIco
self.factory_reset()
"""
2) Detects the powering status of the Pi
a) Check the power status via I2C bus 0 and 1 (most common way to do it in the future?)
b) In case that no answer found (yes or no), check via serial port.
- We expect to have serial port in the bootloader mode at this time, so @command on serial interface is not available and it will fail in most of the cases
"""
def power_detect(self):
if self.verbose: print 'INFO: Detecting power setup'
# check if the system is powered via Pi USB connector
if self.i2c:
# it's I2C we expect somthing to go wrong :)
try:
if self.verbose: print 'INFO: Probing I2C bus 0'
# open connection to the first I2C bus (applicable mainly for the Rev.1 Pi boards)
bus = self.smbus.SMBus(0)
# read the powering systus byte (reffer to the manual for the meaning)
pwr=bus.read_byte_data(0x6a,0)
# in case we got valid response (0 is not a vlid return value of this interface, so probably not connected :) )
if pwr>0:
self.i2c_bus=0
# if powered via Pi, than ok
if pwr==3:
if self.verbose: print 'INFO: (I2C bus 1) System is powered via the Pi USB port.'
self.power=True
# otherwise powered using unsupported mode...
# if forced to skip this check, lets do it :)
elif self.force:
print 'WARNING: (I2C-0) System is not powered via Pi USB port. There is a UPS PIco reset after a FW update, that would perform hard reset of Pi! (use --force to disable this check)'
self.power_warned=True
else:
print 'ERROR: (I2C-0) System has to be powered via the Pi USB port. There is a PIco reset after a FW update, that would perform hard reset of Pi! (use --force to disable this check)'
sys.exit(3)
except SystemExit as e:
sys.exit(e)
except:
pass
if not self.power:
try:
if self.verbose: print 'INFO: Probing I2C bus 1'
# open connection to the first I2C bus (applicable mainly for the Rev.2 Pi boards)
bus = self.smbus.SMBus(1)
# read the powering systus byte (reffer to the manual for the meaning)
pwr=bus.read_byte_data(0x6a,0)
# in case we got valid response (0 is not a vlid return value of this interface, so probably not connected :) )
if pwr>0:
self.i2c_bus=1
# if powered via Pi, than ok
if pwr==3:
if self.verbose: print 'INFO: (I2C bus 1) System is powered via the Pi USB port.'
self.power=True
# otherwise powered using unsupported mode...
# if forced to skip this check, lets do it :)
elif self.force:
print 'WARNING: (I2C-1) System is not powered via Pi USB port. There is a UPS PIco reset after a FW update, that would perform hard reset of Pi! (use --force to disable this check)'
self.power_warned=True
else:
print 'ERROR: (I2C-1) System has to be powered via the Pi USB port. There is a UPS PIco reset after a FW update, that would perform hard reset of Pi! (use --force to disable this check)'
sys.exit(3)
except SystemExit as e:
sys.exit(e)
except:
pass
# in case power status not ok and we have not detected wrong power status already, check via Serial as a failback method (even though it is expected to fail also due to the bootloader mode requirement)
if not self.power and not self.power_warned:
if self.verbose: print 'INFO: Probing serial port'
# Set up the connection to the UPS PIco
PIco= self.serial.Serial(port=self.port,baudrate=38400,timeout=0.005,rtscts=0,xonxoff=0)
# empty the input buffer
for line in PIco:
pass
# get status of power via serial from PIco
PIco.write('@PM\r')
# wait for the answer
time.sleep(0.5)
# for each line in the output buffer (there are some newlines returned)
for line in PIco:
# get rid of the newline characters
line=line.strip()
# is it the answer we are looking for? (yep, should be regexp...)
if line[:16] == 'Powering Source:':
# get the power source (yep, should be regexp...)
ret=line[16:20]
# in case it is RPI, everything is ok :)
if ret == 'RPI':
self.power=True
if self.verbose: print 'INFO: System is powered via the Pi USB port.'
# otherwise powered using unsupported mode...
# if forced to skip this check, lets do it :)
elif self.force:
if not self.power_warned:
print 'WARNING: (Serial) System is not powered via Pi USB port. There is a PIco reset after a FW update, that would perform hard reset of Pi! (use --force to disable this check)'
self.power_warned=True
else:
print 'ERROR: (Serial) System has to be powered via the Pi USB port. There is a PIco reset after a FW update, that would perform hard reset of Pi! (use --force to disable this check)'
sys.exit(3)
# close the connection to PIco via serial
PIco.close()
#print 'pwr:',self.power,' pwrw:',self.power_warned,' pwr',self.power
# in case no power information gathered
if not self.power:
if self.force:
if not self.power_warned:
print 'WARNING: System powering mode not detected. There is a PIco reset after a FW update, that would perform hard reset of Pi! Use --force to disable this check.'
else:
print 'ERROR: System powering mode not detected. System has to be powered via the Pi USB port since here is a PIco reset after a FW update, that would perform hard reset of Pi! Make a proper HW/Pi setup of Serial interface or PiCo interface(I2C) to enable auto-detection. This can happen also in case that PIco is already in the bootload mode having PIco RED led lid. Use --force to disable this check.'
sys.exit(3)
"""
3) Check that there is a PIco bootloader connected to the other side of the serial interface :)
- Send dummy command and get the confirmation from the bootloader
"""
def serial_check(self):
print "Checking communication with bootloader:",
status=False
try:
# Set up the connection to the PIco
PIco = self.serial.Serial(port=self.port,baudrate=38400,timeout=0.005,rtscts=0,xonxoff=True)
# empty the input buffer
for line in PIco:
pass
# send dummy command
PIco.write(":020000040000FA\r")
except:
print "KO\nERROR: Unable to establish communication with PIco bootloader via port:",self.port,'Please verify that the serial port is availble.'
sys.exit(2)
try:
# set the wait iterations for the bootloader response
rcnt=1000
# loop and wait for the response
while rcnt>0:
# in case there is something waiting on the serial line
for resp in PIco:
# get rid of the nwlines
resp=resp.strip()
# check if the response is the expected value or not :)
if ord(resp[0])==6:
print "OK"
status=True
rcnt=1
else:
print "KO\nERROR: Invalid response from PIco:",ord(resp[0])," Please retry the FW upload process."
sys.exit(2)
break
rcnt-=1
except:
print "KO\nERROR: Something wrong happened during verification of communication channel with PIco bootloader via port:",self.port,'Please verify that the serial port is availble and not used by some other application.'
sys.exit(2)
# in case communication not verified
if not status:
if self.force:
print "KO\nWARNING: Unable to verify communication with bootloader in PIco. Is the PIco in the bootloader mode? (Red LED lid on PIco)"
else:
print "KO\nERROR: Failed to establish communication with bootloader in PIco. Is the PIco in the bootloader mode? (Red LED lid on PIco)"
sys.exit(2)
# close the channel to PIco
PIco.close()
"""
4) Verify the content of the provided FW file by:
a) validating crc
b) validating format
c) validating passed data syntax
"""
def validate(self):
print "Validating firmware:",
valid=False
#count number of lines
lnum=1
# open the FW file
f = open(self.filename)
# for each file line
for line in f:
#static LEN ADDR1 ADDR2 TYPE DATA CKSUM
#: 04 05 00 00 50EF2EF0 9A
# parse the line
target = re.match( r"^:([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]*)([a-fA-F0-9]{2}).$", line, re.M|re.I|re.DOTALL)
# in case the data field does not have correct size
if len(target.group(5))%2!=0:
print "KO\nLine",lnum,': Invalid bytecode message!'
sys.exit(4)
# get the CRC valucalculate CRC
crc1=int(line[-4:-1],16)
# calculate the CRC value of the data read
crc2=0
for i in range(1, len(line)-5, 2):
#print line[i:i+2]
crc2+=int(line[i:i+2],16)
# python cannot simulate byte overruns, so ugly math to be done
crc2%=256
crc2=255-crc2+1
crc2%=256
# validate the CRC :)
if crc1!=crc2:
print "KO\nLine",lnum,': Invalid bytecode checksum! Defined:', crc1,'Calculated:', crc2
sys.exit(4)
# in case that the done command is detected, than finish
if target.group(4)=='01':
valid=True
break
lnum+=1
# close the FW file
f.close()
if not valid:
print "KO\n Termination signature not found in the firmware file."
sys.exit(4)
else:
print 'OK'
"""
5) Upload the FW to PIco
"""
def fw_upload(self):
print "Uploading firmware: 0% ",
# count the number fo lines in the file for the progress bar
with open(self.filename) as f:
lnum=len(list(f))
# open the FW file
f = open(self.filename)
# Set up the connection to the PIco
PIco = self.serial.Serial(port=self.port,baudrate=38400,timeout=0.005,rtscts=0,xonxoff=True)
# empty the input buffer
for line in PIco:
pass
status=False
# send the data to PIco
PIco.write(";HELLO\r")
rcnt=100
# loop and wait for the response
while rcnt>0:
# in case there is something waiting on the serial line
for resp in PIco:
# get rid of the nwlines
resp=resp.strip()
# check if the response is the expected value or not :)
if ord(resp[0])==6:
#print "Response OK:",ord(resp)
status=True
rcnt=1
else:
print "KO\nERROR: Invalid status word from PIco (",ord(resp),') when processing initial line! Please retry the FW upload process.'
sys.exit(5)
break
rcnt-=1
if not status:
print "KO\nERROR: No status word from PIco revcieved when processing initial line! Please check possible warnings above and retry the FW upload process."
sys.exit(5)
# calculate 5% progress bar step
lnumx=lnum/100*5
# count the processed lines
lnum2=1
# for each line in the FW file
for line in f:
status=False
# strp the \r\n and add only \r
line=line.strip()+"\r"
# send the data to PIco
PIco.write(line)
#print "Written:",line
# set the wait iterations for the bootloader response
rcnt=100
lrcnt=0
# loop and wait for the response
while rcnt>0:
# in case there is something waiting on the serial line
for resp in PIco:
# get rid of the nwlines
resp=resp.strip()
# check if the response is the expected value or not :)
if ord(resp[0])==6:
#print "Response OK:",ord(resp)
#print "Waited:",rcnt
status=True
lrcnt=rcnt
rcnt=1
else:
print "KO\nERROR: Invalid status word from PIco (",ord(resp),') when processing line',lnum2,' Please retry the FW upload process.'
sys.exit(5)
break
rcnt-=1
if not status:
print "KO\nERROR: No status word from PIco revcieved when processing line",lnum2,' Please check possible warnings above and retry the FW upload process.'
sys.exit(5)
# in case that the done command is detected, than finish
if line[7:9]=='01':
break
lnum2+=1
# show the update progress and show percentages of the process ssometimes
if lnum2%lnumx==0:
print ' '+str(round(float(100*lnum2/lnum)))+'% ',
else:
if lrcnt>80:
sys.stdout.write('.')
elif lrcnt>60:
sys.stdout.write(',')
elif lrcnt>40:
sys.stdout.write('i')
elif lrcnt>20:
sys.stdout.write('|')
else:
sys.stdout.write('!')
sys.stdout.flush()
print ' Done uploading...'
# close the FW file
f.close()
"""
6) Perform factory reset of the PIco
"""
def factory_reset(self):
#time.sleep(1)
print "Invoking factory reset of PIco..."
time.sleep(5)
status=False
# Set up the connection to the PIco
PIco = self.serial.Serial(port=self.port,baudrate=38400,timeout=0.005,rtscts=0,xonxoff=True)
# empty the input buffer
for line in PIco:
pass
# send factory reset command
PIco.write('@factory\r')
time.sleep(5)
# close the channel to PIco
PIco.close()
print 'ALL Done :) Ready to go...'
def usage(self):
print "\n",sys.argv[0],' -f <fw_file> [ -v | -h | --force | -s | -p serial | -b i2c_bus_number ]',"\n"
sys.exit(1)
if __name__ == "__main__":
FWUpdate()
|
[
"johan.kindgren@acrend.se"
] |
johan.kindgren@acrend.se
|
e9c9f4f5309528d037dad01dd4af1472ed2e2ea5
|
49a9317592e84cac5e9cea791f33423bc2222462
|
/tests/test_full_stack.py
|
d8c4eb4b8b2c6f5785a5a298c558cb9acc6025fa
|
[
"Apache-2.0"
] |
permissive
|
juju-solutions/matrix
|
874291b867c500968facdad05a60bbc60db03f7d
|
60ff9290591f034dbf92a1cd9494c02d51f28929
|
refs/heads/master
| 2021-01-11T02:09:55.674968
| 2017-04-28T14:10:14
| 2017-04-28T14:10:14
| 70,099,250
| 8
| 6
| null | 2017-04-28T13:56:10
| 2016-10-05T21:02:43
|
Python
|
UTF-8
|
Python
| false
| false
| 479
|
py
|
from pathlib import Path
import pytest
import unittest
from matrix.main import main
from matrix.utils import new_event_loop
class TestFullStack(unittest.TestCase):
def test_stack(self):
controller = pytest.config.getoption('--controller')
if not controller:
raise unittest.SkipTest()
with new_event_loop():
bundle = Path(__file__).parent / 'basic_bundle'
main(['-c', controller, '-p', str(bundle), '-s', 'raw'])
|
[
"petevg@gmail.com"
] |
petevg@gmail.com
|
3ccb2810111cc7c17fff1237b94360a7db98e23c
|
a573577081d7d33b4f8f52a7e5cfc5878a82161c
|
/backend/General/linear_search.py
|
6108582343e7f1646d079d93e632600befd7675c
|
[] |
no_license
|
akshaygoregaonkar/Interview_prep
|
38d320aa92e0bfba5e6d8c36ad8f93bff6a11e97
|
abf6f2ffb969c7e1cd6c80cb79f41d4659de53ad
|
refs/heads/main
| 2023-06-18T00:47:51.102679
| 2021-07-19T06:51:30
| 2021-07-19T06:51:30
| 387,366,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
my_list=[1,2,4,5,6,7,8,1,2]
def linear_search(arr,num):
for i in range(len(arr)):
if num==arr[i]:
return f'fount at and index {i}'
else:
return "not found"
print('hee')
if __name__ =='__main__':
print(linear_search(my_list,8))
|
[
"akshaygoregaonkar@gmail.com"
] |
akshaygoregaonkar@gmail.com
|
d776256b9c0888939a4ddb81ebaea252992d1067
|
be2105e01881d4806c78e79fa05b73133a7a1d0b
|
/ABC/core/storage/storage.py
|
9331481eea8884445d2595c60f9b615911b82a2f
|
[] |
no_license
|
git-alice/auto_bin_classification
|
8cbea0a5d356a0e041c92cc43f3f11ff68e9ac4b
|
0f07005cc99ca08437219a587d1a3d276503caab
|
refs/heads/master
| 2023-01-12T19:19:38.819864
| 2020-11-22T22:28:17
| 2020-11-22T22:28:17
| 313,606,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,874
|
py
|
import cloudpickle as pickle
from pathlib import Path
from typing import Dict, Any, List, Union
from ABC.core.storage.pickled import Pickled
class Storage:
root: Path = Path('./data/')
verbose: bool = True
_verbose_print_margin: str = '\t'
@classmethod
def set_root(cls, root: Union[str, Path]) -> None:
cls.root = Path(root)
@classmethod
def save(cls, item: Any, filename: Union[str, Path], sub_storage: Union[str, Path] = '', **kwargs: Dict[str, Any]) -> None:
print('Сохранение:')
filename = Path(filename)
if cls.verbose:
print(f'{cls._verbose_print_margin}Текущая директория: {Path.cwd()}')
print(f'{cls._verbose_print_margin}Корень базы данных: {cls.root}')
print(f'{cls._verbose_print_margin}Хранилище/Подхранилище: {sub_storage or cls.root}')
cls.root.mkdir(parents=True, exist_ok=True)
if not filename.is_absolute():
(cls.root / sub_storage / filename).parent.mkdir(parents=True, exist_ok=True)
try:
filename = Path(f'{filename}.pickle')
pickled_item = Pickled(item=item)
pickled_item.save_data_hook(**kwargs)
with open(cls.root / sub_storage / filename, 'wb') as f:
pickle.dump(pickled_item, f)
except Exception as e:
print(f'{cls._verbose_print_margin}Тип данных не соответвует базе данных.')
print(f'{cls._verbose_print_margin}Тип объекта: ', type(item))
print(f'{cls._verbose_print_margin}Исключение:', e)
@classmethod
def load(cls, filename: str, sub_storage: Union[str, Path] = '') -> Pickled:
print('Загрузка:')
try:
filename = Path(f'{filename}.pickle')
print(f'{cls._verbose_print_margin}Загрузка из: {cls.root / sub_storage / filename}') if cls.verbose else None
with open(cls.root / sub_storage / filename, 'rb') as f:
pickled = pickle.load(f)
pickled.load_data_hook()
return pickled.item
except FileNotFoundError:
print(f'{cls._verbose_print_margin}Объект отсутствует в базе данных.')
@classmethod
def get_storages(cls) -> List[str]:
return [p.stem for p in cls.root.glob('*') if p.is_dir()]
@classmethod
def get_all_names_from_storage(cls, storage: Union[str, Path] = '', ext: str = 'pickle') -> List[str]:
return [f.stem for f in cls.root.glob(f'{storage}/*.{ext}') if f.is_file()]
@classmethod
def load_all_from_storage(cls, storage: Union[str, Path] = '') -> List[Pickled]:
objs = [cls.load(f) for f in cls.get_all_names_from_storage(storage)]
return objs
|
[
"art.oxbow@gmail.com"
] |
art.oxbow@gmail.com
|
08f1569e594d0509eedf93ee0f374cdca24efc59
|
59478c1c4146a49efbf3af2e86e0c6c76a7b6f47
|
/section16_DataAnalysis/IdentifyUniqueValueInSeries.py
|
80fe125b4bf7d1a3e032f062463e77c15135ff05
|
[] |
no_license
|
CRUZEAAKASH/PyCharmProjects
|
ac7c0cc49a0be07d25723401609dc09692908e68
|
4037ce9022d1fa76f4e6b906c34718942b3bc01c
|
refs/heads/master
| 2020-06-06T15:22:13.442484
| 2019-07-19T04:09:23
| 2019-07-19T04:09:23
| 192,776,045
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
from pandas import Series
series = Series([100, 200, 300, 400, 500], index=[1, 1, 2, 2, 3])
print(series)
print(series.index.is_unique)
|
[
"aakash.shinghal@gmail.com"
] |
aakash.shinghal@gmail.com
|
0c7a9e9338398106e5ed1e1b8552fce1ccd6c84b
|
97b9e22b3177975ab14e2559dd2ab4b3078dc54a
|
/NEAL/NEAL/settings.py
|
c967198a4c4c42d7652970280bf135d1f5dbfe64
|
[
"Apache-2.0"
] |
permissive
|
ankitshah009/Never_Ending_Learning_of_Sound
|
c969cbaa7827cf2349cf5212bbe112aed687bcd3
|
4f9425e8c16145def9e5d2fc9b5a397ad4c7d6ac
|
refs/heads/master
| 2021-01-21T19:19:13.586834
| 2015-03-28T13:15:16
| 2015-03-28T13:15:16
| 28,266,575
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
"""
Django settings for NEAL project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tcktrq3#jbe9^pity=zdaz%@2tg9-n0j^9p(+sao)pg)gyd^$e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'neal_main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'NEAL.urls'
WSGI_APPLICATION = 'NEAL.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(os.path.abspath(__file__), './static/')
STATIC_URL = '/static/'
MEDIA_URL = 'media/'
MEDIA_ROOT = os.path.join(os.path.abspath(__file__), './../../media/')
#MEDIA_ROOT = '/home/rohan/Desktop/rohan/NITK/NEAL/media/'
|
[
"rohan.badlani@gmail.com"
] |
rohan.badlani@gmail.com
|
b870395af63b1f010c5ef303162cc4266d545fe5
|
47bbbb5819fb18274a2a1b48ead23d62f22f0480
|
/Infection_vs_Inflammation/Code/Process_Data_V2.py
|
8c814d9d58ad9d0c75a1163e09e732ffc3a95126
|
[
"MIT"
] |
permissive
|
MosabAboidrees/Machine-Learning-4-MRI
|
84df6def566d33d142f9e372183e3f4d2bf1ec39
|
973196063d69115048bfa97f213dd6ff0400f74d
|
refs/heads/master
| 2021-06-19T21:50:49.283163
| 2017-07-28T18:40:27
| 2017-07-28T18:40:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,314
|
py
|
# Import Modules as needed
import numpy as np
#import seaborn as sn
import pandas as pd
from mylocal_functions import *
import matplotlib.pyplot as plt
# ======== CEST============= #
# Make list of all CEST.txt files
CEST_list=get_ipython().getoutput('ls ../Study_03_CBA/*CEST.txt')
# create function to normalize data
def normalize_data(DataMatrix):
rows,cols = DataMatrix.shape
newData = np.zeros_like(DataMatrix)
for row in range(rows):
newData[row,:]=DataMatrix[row,:]/DataMatrix[row,8]
return newData
# extract CEST data as a 4 X 110 matrix
Z=np.zeros((4,110))
for names in CEST_list:
D=txt_2_array(names); #Convert txt file to array
Zn=normalize_data(D.T)
Z=np.concatenate((Z,Zn))
Z=Z[4::,9::]
# define offsets in ppm
a1=np.linspace(-55,-50,9); ppm=np.linspace(-8,8,101); full_ppm = np.concatenate((a1, ppm))
# Fit data to center it
CEST_centered=np.zeros_like(Z); rows,cols = CEST_centered.shape;
CEST_integral=np.zeros((rows,1))
for i in range(rows):
p=fit_L2_scale(ppm,Z[i,:])
L=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
CEST_centered[i,:]=L
CEST_integral[i,0]=np.sum(L)
# create tissue label
TissueLabel=np.zeros((rows,1))
for i in range(1,4):
TissueLabel[i::4]=i
CEST_integral_df= pd.DataFrame(data=CEST_integral,columns=["CEST_integral"]);
CEST_integral_df["Tissue"]=TissueLabel
Y=np.zeros((16,4))
for i in range(4):
df=CEST_integral_df[CEST_integral_df["Tissue"]==i]
Y[:,i]=df.CEST_integral.values;
# ======== T2 MSME============= #
# Make list of all T2.txt files
T2_list = get_ipython().getoutput('ls ../Study_03_CBA/*S3*T2.txt')
T2_matrix=np.zeros( (len(T2_list),4) )
TR=np.linspace(.012,.012*12,12)
# Fit T2
for i in range(len(T2_list)):
YDataMatrix=txt_2_array(T2_list[i])
#Estimate T2
T2time=fitT2(TR,YDataMatrix)
T2_matrix[i,:]=T2time.T
#======== create violing plots ============= #
Tissues=["Infected","Healthy R","Sterile Infl.","Healthy K"]
fig = plt.figure(); ax = fig.add_subplot(111)
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(Tissues)
plt.violinplot(Y); plt.ylabel("CEST Integral")
# create violing plot
fig = plt.figure(); ax = fig.add_subplot(111)
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(Tissues)
plt.violinplot(T2_matrix); plt.ylabel("T2 time")
|
[
"cardenaj@email.arizona.edu"
] |
cardenaj@email.arizona.edu
|
e100368d8011cacb0b33df604df945f44851ff6d
|
4e82346452869030bd22c6c529fd8ce93ed9218b
|
/venv/bin/pip
|
faf00e9528fcca23fe1a046975f73e108eccfe7e
|
[] |
no_license
|
rhofvendahl/factor_graph
|
155df6a11ee6c5c1217c79f14e205bebf7c73455
|
2ab9d05eadeb13340adb03b1798859dd7df68a76
|
refs/heads/master
| 2020-04-08T01:43:58.466840
| 2018-11-24T06:05:29
| 2018-11-24T06:05:29
| 158,907,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
#!/home/russell/factor_graph/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rhofvendahl@gmail.com"
] |
rhofvendahl@gmail.com
|
|
09f9604f9c8ea22ee6f81a3c1d977d3508559134
|
f1738cd603e0b2e31143f4ebf7eba403402aecd6
|
/ucs/management/univention-directory-manager-modules/univention-cli-client
|
4d826e3ec396991d416322d0414d98e410fa4de7
|
[] |
no_license
|
m-narayan/smart
|
92f42bf90d7d2b24f61915fac8abab70dd8282bc
|
1a6765deafd8679079b64dcc35f91933d37cf2dd
|
refs/heads/master
| 2016-08-05T17:29:30.847382
| 2013-01-04T04:50:26
| 2013-01-04T04:50:26
| 7,079,786
| 8
| 6
| null | 2015-04-29T08:54:12
| 2012-12-09T14:56:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,125
|
#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
#
# Univention Admin Modules
# the command line client program
#
# Copyright 2004-2012 Univention GmbH
#
# http://www.univention.de/
#
# All rights reserved.
#
# The source code of this program is made available
# under the terms of the GNU Affero General Public License version 3
# (GNU AGPL V3) as published by the Free Software Foundation.
#
# Binary versions of this program provided by Univention to you as
# well as other copyrighted, protected or trademarked materials like
# Logos, graphics, fonts, specific documentations and configurations,
# cryptographic keys etc. are subject to a license agreement between
# you and Univention and not subject to the GNU AGPL V3.
#
# In the case you use this program under the terms of the GNU AGPL V3,
# the program is provided in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License with the Debian GNU/Linux or Univention distribution in file
# /usr/share/common-licenses/AGPL-3; if not, see
# <http://www.gnu.org/licenses/>.
import locale
import socket
import time
import os
import sys
import string
import codecs
from univention.config_registry import ConfigRegistry
socket_dir='/tmp/admincli_%s/' % os.getuid()
socket_filename='sock'
socket_path=(socket_dir+socket_filename)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
cmd=''
data=''
output=[]
logfile=''
pos = 0
for arg in sys.argv:
pos += 1
if '--logfile' == arg[:9]:
if len(arg) > 10 and arg[9] == "=":
logfile = arg[10:]
else:
try:
logfile=sys.argv[pos]
except:
print "E: Option --logfile requires an argument"
sys.exit(1)
try:
s.connect(socket_path)
except:
pid=os.fork()
if not pid:
null = os.open('/dev/null', os.O_RDWR)
os.dup2(null, sys.stdin.fileno())
os.dup2(null, sys.stdout.fileno())
os.dup2(null, sys.stderr.fileno())
if len(logfile) >0:
os.execv('/usr/share/univention-directory-manager-tools/univention-cli-server', ['univention-cli-server', logfile])
else:
os.execv('/usr/share/univention-directory-manager-tools/univention-cli-server', ['univention-cli-server'])
else:
os.waitpid(pid, os.P_NOWAIT)
ucr = ConfigRegistry()
ucr.load()
socket_timeout = int(ucr.get('directory/manager/cmd/sockettimeout', '50'))
stime=int(time.time())
while not os.path.exists('%s' % socket_path):
time.sleep(0.1)
if int(time.time()) > stime+socket_timeout:
print 'E: Can`t find running daemon after %s Seconds. (No socketfile)' % socket_timeout
sys.exit(1)
connection_timeout=30 # this took a long time if getfqdn(host) was used in cli-server
stime=int(time.time())
socking=0
while socking == 0:
try:
s.connect(socket_path)
socking=1
except:
time.sleep(0.1)
if int(time.time()) > stime+connection_timeout:
print 'E: Can`t connect daemon after %s seconds.' % connection_timeout
sys.exit(1)
#sys.exit(1)
cmdfile=os.path.basename(sys.argv[0])
if cmdfile == 'univention-passwd':
pwd1='x'
pwd2='y'
while pwd1 != pwd2:
pwd1=raw_input('New password ')
pwd2=raw_input('Re-enter new password ')
if pwd1 != pwd2:
print 'password missmatch'
sys.argv.append('--pwd')
sys.argv.append(pwd1)
s.send(repr(sys.argv)+'\0')
while 1:
buf = s.recv(1024)
if len(buf) <= 0:
print 'E: Daemon died.'
sys.exit(1)
elif buf[-1] == '\0':
buf = buf[0:-1]
data += buf
break
else:
data += buf
rc=0
output = eval(data)
s.close()
if cmdfile == 'univention-passwd':
for line in output:
if line.startswith('passwd error: '):
print line
if line == 'passwd error: password alreay used':
rc=1
elif line.startswith('passwd error: The password is to short'):
rc=2
else:
print line
else:
if output[-1] == "OPERATION FAILED":
rc = 3
output = output [:-1]
for i in output:
if type(i) is unicode:
print i.encode(locale.getpreferredencoding(), 'replace')
else:
print i
sys.exit(rc)
|
[
"kartik@debian.org"
] |
kartik@debian.org
|
|
485b3825671ed5a47eb13c5cc6185a30b8d8dace
|
8ff04ffaff45a852da164acff411b7eca2cc4039
|
/20.02.2017/4.9.5.py
|
63efcc8fd0a90d102def648a12797165cfacfbde
|
[] |
no_license
|
NguyenVietTrung/Techkids-C4E8
|
477c1b8dff70a871374340efe9343a41db7df3bb
|
e54257ce8c1407cde69d9dc93c69df881c7060ae
|
refs/heads/master
| 2021-01-19T09:52:13.944261
| 2017-03-24T17:06:15
| 2017-03-24T17:06:15
| 82,156,797
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
from turtle import *
bgcolor("lightgreen")
color("blue")
pensize(2)
speed(-1)
def ds1(n, side):
for i in range(n):
forward(side)
left(90)
side = side + 1
def ds2(n, side):
for i in range(n):
forward(side)
left(70)
side = side + 1
ds1(100, 1)
ds2(100, 1)
|
[
"trungnv.1710@gmail.com"
] |
trungnv.1710@gmail.com
|
be4ba72cbcb00b5350e68c0fc76a4d61c8bed511
|
1cf9771570cd18202efc94cf42ca93dafdb98a7f
|
/python/pan.py
|
dc948c820c0792bf1f916e71e2bb67fe8ec235aa
|
[] |
no_license
|
webis-de/python_deVel-01-email-authorship-forensics
|
6e95250f2711cd6d2f5b746f42cea6c22de98316
|
4f4725484199f012476f1eb0768a05f6d165478e
|
refs/heads/master
| 2021-08-24T01:55:31.384045
| 2017-12-07T14:57:14
| 2017-12-07T14:57:14
| 107,246,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,768
|
py
|
# Credits to: https://raw.githubusercontent.com/webis-de/savoy13-authorship-attribution/master/pan.py?token=AVfOvTqGcG9_-XTWbTq1SMnuw_-OfUWaks5aKUj1wA%3D%3D
from bs4 import BeautifulSoup
from collections import Counter
from heapq import nlargest
import re
class PAN_training:
def cleanup(self, path):
with open(path, 'r') as infile:
data = re.sub(r'<(?!\/?(text|training|author|body|NAME))', '', infile.read())
with open(path, 'w') as outfile:
outfile.write(data)
def __init__(self, xml_location):
self.cleanup(xml_location)
self.data = {}
with open(xml_location) as fp:
xml_soup = BeautifulSoup(fp, "xml")
root = xml_soup.training
for child in root.findAll('text'):
author = child.find('author').get('id')
text = child.body.text.lower() \
.replace("don't", "do not") \
.replace("doesn't", "does not") \
.replace("didn't", "did not") \
.replace("can't", "cannot") \
.replace("mustn't", "must not") \
.replace("won't", "will not") \
.replace("shouldn't", "should not") \
.replace("couldn't", "could not") \
.replace("wouldn't", "would not") \
.replace("haven't", "have not") \
.replace("hasn't", "has not") \
.replace("wasn't", "was not") \
.replace("'ll", " will")
wordlist = []
pointer = 0
for i, c in enumerate(text):
if not (65 <= ord(c) <= 90 or 97 <= ord(c) <= 122):
if i - pointer >= 1:
wordlist.append(text[pointer:i])
if c in ".,:;-!?'":
wordlist.append(c)
pointer = i + 1
# todocount=child.get("file")
self.data[author] = self.data.get(author, []) + [(wordlist, Counter(wordlist))]
# print(todocount)
def reduce_author_set(self, n=20): # TODO move to superclass
self.data = dict(nlargest(n, self.data.items(), key=lambda a: len(a[1])))
def total_freqs(self):
return sum((self.author_freqs(a) for a in self.data), Counter())
def total(self):
return sum(self.data.values(), [])
def author_freqs(self, author):
return sum((a[1] for a in self.data[author]), Counter())
if __name__ == "__main__":
p = PAN_training("raw/LargeTrain.xml")
p.reduce_author_set(20)
print("Number of articles by the", 20, "most active authors:", sum(len(p.data[v]) for v in p.data))
print(sum(p.total_freqs().values()))
print(p.total_freqs().most_common(200))
p.total()
|
[
"marcel.schliebs@sciencespo.fr"
] |
marcel.schliebs@sciencespo.fr
|
55ff97a0fcc757d4a27ac0c45b8f09b5d4eac8bb
|
c0809283f8747fb39a960a8f0d1ae08e90f738c5
|
/bezman_shop/accounts/decorators.py
|
aa2d89c58a5347458be84a42a0821c61f3f82914
|
[] |
no_license
|
zarina494/BEZMAN
|
df8570b0178603c3ad193e47e3eaa359358ef870
|
279bf93cdab40d795ac00e4fb4200b9bd89bff1d
|
refs/heads/main
| 2023-01-30T11:52:23.954619
| 2020-12-16T05:54:08
| 2020-12-16T05:54:08
| 317,106,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from django.shortcuts import redirect
def admin_only(function):
def wrap(request,*args,**kwargs):
if request.user.is_staff:
return function(request,*args,**kwargs)
else:
return redirect('products')
return wrap
|
[
"Sadykova-zarina@inbox.ru"
] |
Sadykova-zarina@inbox.ru
|
ffd5136b4c1fc3303bbea7946185e8061c4ec266
|
3b91490de7037e81092802b4e4dbbdc5c82d7764
|
/OP_ELM/elm.py
|
919af66ba238a03fef79927f1eb8734e346eb452
|
[] |
no_license
|
tylerwmarrs/elm-skin-segmentation
|
199dc20fef2528ba27fcd662ec34ee64fa6d4ff9
|
88bc06321c462c940b6fd21d86e2f5cad1dfbbfd
|
refs/heads/master
| 2021-01-11T16:30:09.480761
| 2016-12-10T18:41:36
| 2016-12-10T18:41:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,083
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 14:07:48 2013
@author: akusoka1
"""
import numpy as np
from numpy.linalg import inv
from scipy.optimize import minimize
from mrsr import mrsr
from slfn import SLFN
class ELMError(Exception):
def __init__(self, v):
self.val = v
def __str__(self):
return repr(self.val)
class ELM(object):
"""Implementation of ELM.
"""
# global variables
nn = None
op_max_samples = 5000 # limit of samples for mrsr
TOL = np.finfo(np.float32).eps * 50 # tolerance for linear solver
def __init__(self, inputs, outputs=0, mirror=0, lin=0, tanh=0):
"""Starts ELM with parameters.
X - input data OR elm parameters for running,
with loaded parameters no training possible
mirror - number of dimensions copied to hidden neurons,
copy first #mirror dimensions
lin - number of linear combination neurons
tanh - number of tanh neurons
"""
# load trained network
if isinstance(inputs, SLFN):
self.nn = inputs
# train a new elm
else:
# set input and output dimensions
if (mirror<1) and (lin<1) and (tanh<1):
raise ELMError("No neurons selected.")
self.nn = SLFN()
self.nn.d = inputs+1
self.nn.p = outputs
self.nn.k = max(mirror,0) + max(lin,0) + max(tanh,0)
self.nn.feats = np.arange(self.nn.d)
self.nn.W = self._gen_random_weights(mirror, lin, tanh) # random projection matrix
def _norm_X(self, X):
# check saliency
if len(X.shape) != 2:
raise ELMError("X must be 2-dim numpy array.")
if X.shape[1] < np.max(self.nn.feats):
raise ELMError("Wrong input dimension: %d expected, %d found"\
% (np.max(self.nn.feats), X.shape[1]))
# add bial column
X = np.hstack((X, np.ones((X.shape[0],1)))) # add bias
X = X.take(self.nn.feats, axis=1)
return X
def _norm_Y(self, Y, X):
if len(Y.shape) == 1:
# reshape single output to 2-dim form
Y = np.reshape(Y, (-1,1))
if len(Y.shape) != 2:
raise ELMError("Y must be 1-dim or 2-dim numpy array.")
if Y.shape[0] != X.shape[0]:
raise ELMError("X and Y have different number of samples.")
if Y.shape[1] != self.nn.p:
raise ELMError("Wrong output dimension: %d expected, %d found"\
% (self.nn.p, Y.shape[1]))
return Y
def _gen_random_weights(self, mirror, lin, tanh):
"""Generate random projection matrix and mapping functions.
Identity function is 'None'.
"""
d = self.nn.d - 1 # without bias
W = []
self.nn.f = [] # reset features
# add mirrored neurons
if mirror > 0:
mirror = min(mirror, d) # limit to number of inputs
W.append(np.eye(d, mirror))
self.nn.f.extend([0]*mirror)
# add linear neurons
if lin > 0:
W.append(np.random.randn(d, lin))
self.nn.f.extend([0]*lin)
# add tanh neurons
if tanh > 0:
W.append(np.random.randn(d, tanh))
self.nn.f.extend([1]*tanh)
# add bias
self.nn.f = np.array(self.nn.f)
W = np.vstack((np.hstack(W), np.random.randn(1, self.nn.k)))
return W
def _press(self, H, Y, lmd=None):
"""According to Momo's article.
Extended case for multiple outputs, 'W' is 2-dimensional.
"""
# no lambda version of PRESS
if lmd is None:
return self._press_basic(H,Y)
X = H
N = X.shape[0]
U,S,V = np.linalg.svd(X, full_matrices=False)
A = np.dot(X, V.T)
B = np.dot(U.T, Y)
# function for optimization
def lmd_opt(lmd, S, A, B, U, N):
Sd = S**2 + lmd
C = A*(S/Sd)
P = np.dot(C, B)
D = np.ones((N,)) - np.einsum('ij,ji->i', C, U.T)
e = (Y - P) / D.reshape((-1,1))
MSE = np.mean(e**2)
return MSE
res = minimize(lmd_opt, lmd, args=(S,A,B,U,N), method="Powell")
if not res.success:
print "Lambda optimization failed: (using basic results)"
print res.message
MSE = lmd_opt(lmd, S, A, B, U, N)
self.nn.lmd = None
else:
lmd = res.x
MSE = res.fun
self.nn.lmd = lmd
return MSE
def _press_basic(self, H, Y):
"""According to Momo's article, fast version with no L2-regularization.
Extended case for multiple outputs, 'W' is 2-dimensional.
"""
X = H
N = X.shape[0]
C = inv(np.dot(X.T, X))
P = X.dot(C)
W = C.dot(X.T).dot(Y)
D = np.ones((N,)) - np.einsum('ij,ji->i', P, X.T)
e = (Y - X.dot(W)) / D.reshape((-1,1))
MSE = np.mean(e**2)
return MSE
def _op_core(self, X, Y, lmd=None, Kmax=0):
"""Core OP-ELM function, used in other methods.
"""
if Kmax == 0:
Kmax = self.nn.k
N = X.shape[0]
if N > self.op_max_samples:
idx = np.arange(N)
np.random.shuffle(idx)
idx = idx[:self.op_max_samples]
X = X.take(idx, axis=0)
Y = Y.take(idx, axis=0)
H = self.nn._project(X)
# rank all neurons wrt their usefullness
rank = mrsr(Y, H, kmax=Kmax)
"""
tree-like close-to-optimal discrete function optimization
evaluate 3 middle points every time, then reduce search range twice
delta is distance between a <-> b1 <-> b2 <-> b3 <-> c
note that b3 <-> c may be larger than delta!
|-------------------------------------|
a0 b1 b2 b3 c0 < say smallest error at b3
|------------------|
a1 b1 b2 b3 c1
etc...
"""
a = 0
c = Kmax
b = c
E = np.ones((Kmax,))
while True:
delta = max((c-a)/4, 1)
# if delta==1, evaluate all points in range and exit
if delta < 2:
for i in range(a,b):
if E[i] == 1: # if we have not calculated this yet
Hi = H.take(rank[:i+1], axis=1)
E[i] = self._press(Hi, Y, lmd)
break
# check 3 middle points
for b in [a+delta, a+2*delta, a+3*delta]:
if E[b] == 1: # if we have not calculated this yet
Hi = H.take(rank[:b], axis=1)
E[b] = self._press(Hi, Y, lmd)
b = np.argmin(E)
a = b-delta
if b != (a+3*delta): # don't change the upper bound <Kmax>
c = b+delta
self.best_idx = rank[:np.argmin(E)+1] # indices of best neurons
return E.min()
def _update_nn(self):
"""Update ELM parameters using calculated best indices.
"""
best_idx = self.best_idx
self.nn.k = len(best_idx)
self.nn.W = self.nn.W.take(best_idx, axis=1)
self.nn.f = np.array([self.nn.f[i] for i in best_idx], dtype=np.int)
if len(self.nn.B) > 0:
self.nn.B = self.nn.B.take(best_idx, axis=0)
def _train_op(self, X, Y, Kmax=0):
"""Perform optimal pruning of ELM.
kmax - maximum amount of samples for pruninig
step - step for checking the results
"""
H = self.nn._project(X)
E_min = self._op_core(X,Y,Kmax=Kmax)
self._update_nn()
H_new = self.nn._project(X)
self.nn._solve(H_new, Y)
return E_min
def _train_trop(self, X, Y, Kmax=0):
"""Perform TR-optimized Optimal Pruning of ELM.
kmax - maximum amount of samples for pruninig
"""
E_min = self._op_core(X, Y, lmd=1E-3, Kmax=Kmax)
self._update_nn()
H_new = self.nn._project(X)
self.nn._solve(H_new, Y)
return E_min
def _train_tr(self, X, Y, Kmax=0):
"""Tikhonov-regularized ELM.
"""
H = self.nn._project(X)
E = self._press(H, Y, 1E-5) # this finds optimal lambda
self.nn._solve(H, Y)
return E
def _train_basic(self, X, Y, Kmax=0):
"""Train a basic version of ELM.
"""
H = self.nn._project(X)
self.nn._solve(H, Y)
##############################################################################
def train(self, X, Y, method='none', Kmax=0):
"""Training wrapper.
"""
X = self._norm_X(X)
Y = self._norm_Y(Y,X)
methods = {"basic": self._train_basic,
"tr": self._train_tr,
"op": self._train_op,
"trop": self._train_trop}
E = methods[method.lower()](X,Y,Kmax)
return E
def get_nn(self):
return self.nn
def set_nn(self, nn):
self.nn = nn
def run(self, X, Y=None):
X = self._norm_X(X)
H = self.nn._project(X)
Yh = H.dot(self.nn.B)
if Y is None:
return Yh
else:
Y = self._norm_Y(Y,X)
E = self._press(H, Y, self.nn.lmd)
return Yh, E
def try1():
ins = 50
outs = 5
N = 1000
X = np.random.randn(N,ins)
Y = np.random.rand(N,outs)
elm = ELM(ins, outs, mirror=ins, lin=0, tanh=100)
elm.train(X, Y, method='op')
Yh = elm.run(X)
print "mse: ", np.mean((Y-Yh)**2)
print "Done!"
if __name__ == "__main__":
print "numpy version: ", np.__version__
try1()
|
[
"noreply@github.com"
] |
tylerwmarrs.noreply@github.com
|
688f53121ca85a7233c64051dc740b97edb52db7
|
c4b618ae721abc13862c617f91c6ccf0f86fc01b
|
/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py
|
f1d5fd016160e72e2c73b888deb23d4dd724b8ba
|
[
"Apache-2.0"
] |
permissive
|
virtualopensystems/neutron
|
2c3938375d02e3b80a0a32640573e3ed0dffa11d
|
067acd95ab6042ca5d123342abd420a2a938acd2
|
refs/heads/master
| 2020-07-02T03:22:36.448097
| 2019-04-18T09:46:34
| 2019-04-18T09:46:34
| 22,715,796
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""embrane_lbaas_driver
Revision ID: 33dd0a9fa487
Revises: 19180cf98af6
Create Date: 2014-02-25 00:15:35.567111
"""
# revision identifiers, used by Alembic.
revision = '33dd0a9fa487'
down_revision = '19180cf98af6'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
u'embrane_pool_port',
sa.Column(u'pool_id', sa.String(length=36), nullable=False),
sa.Column(u'port_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'],
name=u'embrane_pool_port_ibfk_1'),
sa.ForeignKeyConstraint(['port_id'], [u'ports.id'],
name=u'embrane_pool_port_ibfk_2'),
sa.PrimaryKeyConstraint(u'pool_id'))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table(u'embrane_pool_port')
|
[
"ivar@embrane.com"
] |
ivar@embrane.com
|
402eeea77c85ca02c197b2159a413415b91cc1e2
|
776a40cd263156146da7d9efb425daa9a06f1de1
|
/manage.py
|
e7dde1f7f0f8e42e7c318c92ef0fbd32222b5063
|
[] |
no_license
|
VikkyyChavan/pdazure
|
5d5367785e17713466e409caf8eaa6a58106f48e
|
2fc91979f2b03a516d7ddc40de5b16fbb5b44861
|
refs/heads/master
| 2023-01-03T19:37:38.986642
| 2020-11-02T08:37:39
| 2020-11-02T08:37:39
| 309,304,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pddjango01.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"prafula.chavan@in.abb.com"
] |
prafula.chavan@in.abb.com
|
6b7aef28c6bdd7ea5e6a9d231eeed282eb8e4034
|
5ed4a18dda71dcfe082326a2737a3c4d92a14b02
|
/blog/models.py
|
86fad0f01f134e963368845bb7fece31fa8178a1
|
[] |
no_license
|
Shubham7567/blog-example
|
67b4f29981b4b0db08a635c13fbf65e9f61c41a8
|
8f1df3b391a5a499427d2ad858818f78b5c54e99
|
refs/heads/main
| 2023-04-09T00:15:14.318510
| 2021-04-23T04:37:37
| 2021-04-23T04:37:37
| 359,691,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
from django.db import models
from django.db.models.expressions import OrderBy
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
from taggit.managers import TaggableManager
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager,self).get_queryset().filter(status = 'published')
class Post(models.Model):
STATUS_CHOICES = (
('draft','Draft'),
('published','Published')
)
title = models.CharField(max_length=250)
slug = models.SlugField(max_length=250,unique_for_date='publish')
author = models.ForeignKey(User,on_delete=models.CASCADE,related_name='blog_posts')
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=10,choices=STATUS_CHOICES)
tags = TaggableManager()
objects = models.Manager() #The default manager
published = PublishedManager() #our custom manager
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("blog:post-detail", args=[self.publish.year,self.publish.month,self.publish.day,self.slug])
class Comment(models.Model):
post = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='comments')
name = models.CharField(max_length=80)
email = models.EmailField()
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('created',)
def __str__(self):
return f"Comment by {self.name} on {self.post}"
|
[
"shubhamrathore7567@gmail.com"
] |
shubhamrathore7567@gmail.com
|
1a813f9881b069bb5007acf25da307c780bbc231
|
90914b7d84d69a86652e69d1ad72888af363367b
|
/sale_invoice_grouping/sale.py
|
2440c23b6557ffd7e215463fa95a4bb599b1af5b
|
[] |
no_license
|
emperadorxp1/TrytonModules
|
754a3ca92c0ac7b2db9165208b1bc5fda5fe4a73
|
33ef61752e1c5f490e7ed4ee8a3f0cff63a8fc89
|
refs/heads/master
| 2020-12-19T18:41:05.260174
| 2020-01-23T15:32:57
| 2020-01-23T15:32:57
| 235,815,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.pool import PoolMeta
from trytond.transaction import Transaction
__all__ = ['Sale']
class Sale(metaclass=PoolMeta):
__name__ = 'sale.sale'
@property
def invoice_grouping_method(self):
return self.party.sale_invoice_grouping_method
@property
def _invoice_grouping_fields(self):
return ('state', 'company', 'type', 'journal', 'party',
'invoice_address', 'currency', 'account', 'payment_term')
def _get_grouped_invoice_order(self):
"Returns the order clause used to find invoice that should be grouped"
return None
def _get_grouped_invoice_domain(self, invoice):
"Returns a domain that will find invoices that should be grouped"
Invoice = invoice.__class__
invoice_domain = [
('lines.origin', 'like', 'sale.line,%'),
]
defaults = Invoice.default_get(self._invoice_grouping_fields,
with_rec_name=False)
for field in self._invoice_grouping_fields:
invoice_domain.append(
(field, '=', getattr(invoice, field, defaults.get(field)))
)
return invoice_domain
def _get_invoice_sale(self):
transaction = Transaction()
context = transaction.context
invoice = super(Sale, self)._get_invoice_sale()
if (not context.get('skip_grouping', False)
and self.invoice_grouping_method):
with transaction.set_context(skip_grouping=True):
invoice = self._get_invoice_sale()
Invoice = invoice.__class__
domain = self._get_grouped_invoice_domain(invoice)
order = self._get_grouped_invoice_order()
grouped_invoices = Invoice.search(domain, order=order, limit=1)
if grouped_invoices:
invoice, = grouped_invoices
return invoice
|
[
"joxua.1995@gmail.com"
] |
joxua.1995@gmail.com
|
3ce5dbffdf45eda8cfd3efc1430f079848517ab7
|
2b62026b80f5f7ec9ccf179739baecd390ca4e05
|
/AuthApp/admin.py
|
0e19232a04fe17634a7236d8040224f222487679
|
[
"MIT"
] |
permissive
|
ashwani202/DjangoProject
|
8abb51ccb552683199d71163bf1fe4e05fa6c8b7
|
b97ca37ad023c0ecba62a3fd63cf1ad55dd7f65f
|
refs/heads/master
| 2023-05-27T21:20:43.002211
| 2021-06-09T11:50:39
| 2021-06-09T11:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
class MyUserAccountAdmin(admin.ModelAdmin):
list_display = ('email', 'first_name', 'last_name', 'created_on',)
search_fields = ['email', 'first_name', 'last_name']
list_filter = ('email',)
admin.site.register(MyUserAccount, MyUserAccountAdmin)
class EmailAdmin(admin.ModelAdmin):
list_display = ('email_id', 'subject', 'uuid', 'token', 'is_verify', 'sent_on', 'user')
search_fields = ['email_id', 'subject']
list_filter = ('email_id',)
admin.site.register(emailHandler, EmailAdmin)
# class FollowAdmin(admin.ModelAdmin):
# list_display = ('follower', 'following')
# search_fields = ['follower', 'following']
# list_filter = ('follower',)
#
#
# admin.site.register(Follow, FollowAdmin)
|
[
"48996332+creativeweb-aj@users.noreply.github.com"
] |
48996332+creativeweb-aj@users.noreply.github.com
|
8dea84c38579a1a042af9682a8b70c4e2f0de8a0
|
493b97dd77b094024188279a35de715716d6f63c
|
/main.py
|
62c66ffd894557e1dd69502f063e6e521791339b
|
[] |
no_license
|
ztyoung86/brain-hole
|
2b58f95b16a996c4dac27ca59508cc158e47b336
|
d27e9249c9c3dc4d6c3462f9a4c8f87cd3b31987
|
refs/heads/master
| 2022-12-04T23:20:40.547801
| 2022-11-28T03:42:05
| 2022-11-28T03:42:05
| 12,968,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
# -*- coding: utf-8 -*-
import jieba
import operator
jieba.enable_parallel(4)
words = {}
novel = open('lingyu.txt')
for line in novel.readlines():
for word in jieba.cut(line):
if words.has_key(word):
words[word] = words[word] + 1
else:
words[word] = 1
novel.close()
# sort words
sorted_words = sorted(words.items(), key=operator.itemgetter(1),reverse=True)
words = dict(sorted_words)
output = open('dict.txt','w')
out_str = ""
for word in sorted_words:
out_str = word[0]+" "+str(word[1])+"\n"
# print out_str,
output.write(out_str.encode("utf-8"))
output.close()
|
[
"albertyang@keep.edu.hk"
] |
albertyang@keep.edu.hk
|
62c18da37f3b5289cbc746707a3236dfb64489f3
|
5994b1b618d59ae057b66eb4e619c28bfe7ccf05
|
/books/learn-python-the-hard-way/ex_0/ex25.py
|
b18559041c9fb506533c8992241ce1a728eec1f9
|
[] |
no_license
|
NBR-hugh/python
|
3e2c821232c7b1f5e461fbffe209a0d8ba9084ca
|
68ce4140f5c8e39fc2582ee3c072ed5875f3e152
|
refs/heads/master
| 2021-01-13T15:47:58.057862
| 2017-08-17T08:40:41
| 2017-08-17T08:40:41
| 76,856,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after poping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
|
[
"574168029@qq.com"
] |
574168029@qq.com
|
148cbbaeaa8bac4d568f343d244063e18bc01502
|
0aa5749bffebd13598452a9ca589bd5eaadeb3e0
|
/usuario/admin.py
|
951e261d668bedd366325d8f3e053e182365e711
|
[] |
no_license
|
BrenBc/subastaonlineproyecto
|
f1711196a3afba7d67ee4320e09308a07af4bdcc
|
a21311b8f0db19cabb189ef59b9b174e4a613270
|
refs/heads/master
| 2023-01-11T08:32:44.963799
| 2020-11-14T11:26:40
| 2020-11-14T11:26:40
| 312,799,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
from django.contrib import admin
from . models import Banco, Cliente, Proveedor, Tipotarjeta, Venta, Tarjeta, Puja
from . models import *
# Register your models here.
admin.site.register(Banco)
admin.site.register(Cliente)
admin.site.register(Proveedor)
admin.site.register(Tipotarjeta)
admin.site.register(Venta)
admin.site.register(Tarjeta)
admin.site.register(Puja)
admin.site.register(Vehiculo)
admin.site.register(Fotografia)
admin.site.register(Categoria)
|
[
"bren_tn5@hotmail.es"
] |
bren_tn5@hotmail.es
|
370dc6f0e4efc80901dc654ec759b386b86d8c68
|
a30166ac71e4b1c1e07d67d07a07c99b12811005
|
/Topicos Especiais em Programação/rede_social/manage.py
|
a8297a1acdc2495c0a01a0ece10dbb175f394b53
|
[] |
no_license
|
WalterBluman/Tecnologo-ADS
|
3615422a36d4b3169f7534601c8bbc9abe25f1ef
|
cebf94e482fa0e348aeda0f66b34ca3f26a2aa27
|
refs/heads/master
| 2020-12-03T19:15:28.025905
| 2018-07-18T18:22:54
| 2018-07-18T18:22:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rede_social.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"sywrahgabriella@hotmail.com"
] |
sywrahgabriella@hotmail.com
|
94efcfe4d501d4833645593f4f7ae01f97cfe608
|
aad55a3fbb66d05408c69e032c9188dc870a3df2
|
/TestCase/__init__.py
|
26588af7267c2c2ccd51ca85644b8b8dfdd7b3ce
|
[] |
no_license
|
zjunbin/InternetThingsplatform
|
f2f86ca2e81a5edf732c677aa2e9e072eb784875
|
a8710f66f87fba0213fce62e3bd6483605a3ec8b
|
refs/heads/master
| 2020-05-24T05:59:43.446369
| 2019-07-05T09:16:56
| 2019-07-05T09:16:56
| 187,130,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
# coding utf-8
# @time :2019/5/615:02
# @Author :zjunbin
# @Email :648060307@qq.com
# @File :__init__.py.py
|
[
"648260307@qq.com"
] |
648260307@qq.com
|
b3dcb41902a8b4ff489f8df77bc9a2de43e6826e
|
4a12ee57e670bfc574a1da1d6d61be1072d7c603
|
/interrogation/admin.py
|
d4f3790546036edff8b57d1687bc780181afc8ed
|
[] |
no_license
|
Ganin-Alexey/TZ
|
cab1639cc6595b7608c214faf78d04c52c425231
|
3f62391a470c0705d08435192fc650c3ab804ad8
|
refs/heads/master
| 2023-06-12T12:00:31.376963
| 2021-07-05T11:44:00
| 2021-07-05T11:44:00
| 382,894,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
from django.contrib import admin
from .models import Interrogation, Question, Answer, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 0
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
list_display = [field.attname for field in Question._meta.fields]
list_filter = ['type']
search_fields = ('text', 'type')
list_display_links = ['text', 'id']
list_editable = ['type']
inlines = [ChoiceInline]
class QuestionInline(admin.TabularInline):
model = Question
extra = 0
@admin.register(Interrogation)
class InterrogationAdmin(admin.ModelAdmin):
fields = ('title', 'description', 'is_active', 'start', 'stop')
list_display = ['title', 'description', 'is_active', 'start', 'stop']
list_filter = ('is_active', 'start', 'stop')
search_fields = ('title', 'description', 'is_active', 'start', 'stop')
readonly_fields = (
'start',
)
list_display_links = ('title', 'description')
list_editable = ['is_active']
inlines = [QuestionInline]
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin):
list_display = (
'user',
'question',
'choice',
)
list_filter = ('user',)
|
[
"anonim.minona.80@mail.ru"
] |
anonim.minona.80@mail.ru
|
8db5fccd818f11b5aa9b4aa8217eea91ebb4770c
|
4beaf162bb9dc4fb555d06777b64ad8c21b4c607
|
/api/models.py
|
11f57128718f6d11492188a975e44546fcf78222
|
[] |
no_license
|
fedotovdmitriy14/django_react_spotify
|
fd858fa8ff11a2a9bbf5516e443f08026d19e82b
|
6be94500a79f8b8eb7f4907a376bc580f91925cc
|
refs/heads/master
| 2023-08-23T00:33:16.329821
| 2021-10-24T14:26:59
| 2021-10-24T14:26:59
| 419,827,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
from django.db import models
import string
import random
def generate_unique_code():
length = 6
while True:
code = "".join(random.choices(string.ascii_uppercase, k=length))
if Room.objects.filter(code=code).count() == 0:
break
return code
class Room(models.Model):
code = models.CharField(max_length=8, default=generate_unique_code, unique=True)
host = models.CharField(max_length=50, unique=True)
guest_can_pause = models.BooleanField(null=False, default=False)
votes_to_skip = models.IntegerField(null=False, default=1)
created_at = models.DateTimeField(auto_now_add=True)
|
[
"fedotovdmitriy14@gmail"
] |
fedotovdmitriy14@gmail
|
3a41eb79f7221cfd5e494b7d0967592147d0d134
|
0560aee2a84c507d2e91cb712393b37a7cfd4117
|
/trunk/testing/multi-mechanize_1.010/projects/constellation/test_scripts/mech_browsers.py
|
93c701b32c74ceb152c97324e5f0252a5dd9a409
|
[] |
no_license
|
Mujj/Constellation
|
804df1190c7bf09c7498e0b6bcff247f5ae9c376
|
6bd9413cdd6bc835c0eddc48b1288b4132293cb6
|
refs/heads/master
| 2020-12-03T05:12:41.365567
| 2013-06-20T20:29:02
| 2013-06-20T20:29:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,805
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2010 Corey Goldberg (corey@goldb.org)
# License: GNU LGPLv3
#
# This file is part of Multi-Mechanize
import sys
import re
import mechanize
import cookielib
import json
import time
import random
import uuid
from datetime import date
from datetime import datetime
class Transaction(object):
def __init__(self):
self.custom_timers = {}
def run(self):
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
#br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
#br.set_debug_http(True)
#br.set_debug_redirects(True)
#br.set_debug_responses(True)
domain = 'http://test.constellation.tv'
films = [ 4, 14, 15 ]
film = str(random.choice( films ))
# User-Agent (this is cheating, ok?)
br.addheaders = [('User-agent', 'Mechanize Bot')]
# start homepage view
start_timer = time.time()
r = br.open(domain + '/')
r.read()
latency = time.time() - start_timer
self.custom_timers['Homepage'] = latency
assert (r.code == 200), 'Bad HTTP Response'
# end homepage
# start homepage service
start_timer = time.time()
r = br.open(domain + '/services/Screenings/upcoming')
r.read()
latency = time.time() - start_timer
self.custom_timers['Homepage Upcoming Service'] = latency
assert (r.code == 200), 'Bad HTTP Response'
# end homepage service
# start homepage select
start_timer = time.time()
today = date.today().strftime("%m/%d/%Y")
r = br.open(domain + '/services/Screenings/date?date='+today+'&film=null')
r.read()
latency = time.time() - start_timer
self.custom_timers['Homepage Upcoming Select'] = latency
assert (r.code == 200), 'Bad HTTP Response'
# end homepage select
# think-time
time.sleep(2)
# start filmpage view
start_timer = time.time()
r = br.open(domain + '/film/' + film)
r.read()
latency = time.time() - start_timer
self.custom_timers['Filmpage View'] = latency
assert (r.code == 200), 'Bad HTTP Response'
# end filmpage view
# start filmpage service
start_timer = time.time()
r = br.open(domain + '/services/Screenings/upcoming?film=' + film)
r.read()
latency = time.time() - start_timer
self.custom_timers['Filmpage Upcoming Service'] = latency
assert (r.code == 200), 'Bad HTTP Response'
# end filmpage service
# start filmpage select
start_timer = time.time()
today = date.today().strftime("%m/%d/%Y")
r = br.open(domain + '/services/Screenings/date?date='+today+'&film=' + film)
r.read()
latency = time.time() - start_timer
self.custom_timers['Filmpage Upcoming Select'] = latency
assert (r.code == 200), 'Bad HTTP Response'
# end filmpage select
# think-time
time.sleep(2)
if __name__ == '__main__':
trans = Transaction()
trans.run()
print trans.custom_timers
|
[
"root@development.motormeme.com"
] |
root@development.motormeme.com
|
36f361356f2ca7bb600caea0da4248477a0b47ce
|
ff398239187d3b911f4c87bc607ec3690dbfa8aa
|
/batch_state.py
|
ae28ba275031aae0fdaa105a72aa9095187efd31
|
[] |
no_license
|
xiefei0117/USDA-census-data-extraction-tool
|
95af513bc0b520ff5ab8c16767b87c2efa6ce99c
|
c8c95d2e32e1f88a97665d982d753fa7e03b6af5
|
refs/heads/main
| 2023-09-01T02:24:27.993687
| 2021-09-14T16:52:48
| 2021-09-14T16:52:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,647
|
py
|
"""
This code creates batch processing of all required data from the USDA census file. Users need downloaded required state USDA file from the census and specifify the state name. Example usage is:
state_batch = Batch_Process_State("California", "06")
state_batch.run()
@author: Fei Xie
"""
import pandas as pd
from process_USDA_Chapter2_02282020 import Farm_County_Process
class Batch_Process_State:
def __init__(self, state_name, state_id):
self.state_name = state_name
self.state_USDA = Farm_County_Process(state_name+"_USDA.txt") #create a object for USDA processing
self.county_list = pd.read_csv(state_name+"_county.csv", dtype=object) #read county list for the target state
self.required_data_county_as_row = pd.read_csv("county_as_row.csv")
self.required_data_county_as_column = pd.read_csv("county_as_column.csv")
self.processed_data_county_as_row = self.required_data_county_as_row
self.processed_data_county_as_column = self.required_data_county_as_column
self.state_id = state_id;
def batch_process_county_as_row(self):
print("processing county as rows: ")
self.processed_data_county_as_row = self.required_data_county_as_row
for index, row in self.county_list.iterrows():
current_county = self.county_list.loc[index, 'CNTY_NAME']
print(current_county)
self.processed_data_county_as_row[current_county] = "NA"
for index2, row2 in self.processed_data_county_as_row.iterrows():
table_name = self.processed_data_county_as_row.loc[index2,"Table"]
column_num = int(self.processed_data_county_as_row.loc[index2,"Column"])
item_name = self.processed_data_county_as_row.loc[index2,"Item"]
self.processed_data_county_as_row.loc[index2,current_county] = self.state_USDA.retreve_value_county_as_row(
table_name, current_county, column_num, item_name)
self.processed_data_county_as_row.to_csv(r""+self.state_id+"_results_column_based_"+self.state_name + ".csv", index = None, header = True)
def batch_process_county_as_column(self):
print("processing county as columns: ")
self.processed_data_county_as_column = self.required_data_county_as_column
for index, row in self.county_list.iterrows():
current_county = self.county_list.loc[index, 'CNTY_NAME']
print(current_county)
self.processed_data_county_as_column[current_county] = "NA"
for index2, row2 in self.processed_data_county_as_column.iterrows():
table_name = self.processed_data_county_as_column.loc[index2,"Table"]
item_name = self.processed_data_county_as_column.loc[index2,"Item"]
unit_name = self.processed_data_county_as_column.loc[index2,"Unit"]
print(table_name)
print(item_name)
print(unit_name)
self.processed_data_county_as_column.loc[index2,current_county] = self.state_USDA.retreve_value_county_as_column(
table_name, current_county, item_name, unit_name)
self.processed_data_county_as_column.to_csv(r""+self.state_id+"_results_row_based_"+self.state_name + ".csv", index = None, header = True)
def run(self):
self.batch_process_county_as_row()
self.batch_process_county_as_column()
def main():
state_batch = Batch_Process_State("California", "06")
state_batch.run()
main()
|
[
"xiefei0117@gmail.com"
] |
xiefei0117@gmail.com
|
af489b1b3636ad41953b1e94d5c6585ab4073d41
|
1e79f9969a0705ecd6558e1169d201a0227e6c38
|
/PageObject/all_sites/RS/Oplata/pagePayRS.py
|
a5281f2541e06aa3533293d3e1b7a97af5344b83
|
[] |
no_license
|
mfcdnepr/Selenium_keyua
|
0d88be4160941b2279a3900999fc4b83f6d5d644
|
097f7a8ad7a026fc89ec532d8a570ce187f66757
|
refs/heads/master
| 2023-02-15T13:24:47.365241
| 2021-01-15T16:43:02
| 2021-01-15T16:43:02
| 313,406,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
def pagePayRS(browser,):
#Payment
browser.find_element_by_xpath('//*[@id="id_ccnum"]').send_keys('5169360004480400')
browser.find_element_by_xpath('//*[@id="id_ccexp_m"]/option[4]').click()
browser.find_element_by_xpath('//*[@id="id_ccexp_y"]/option[8]').click()
# browser.find_element_by_xpath('//*[@id="id_ccnum"]').send_keys('4731185617318578')
# browser.find_element_by_xpath('//*[@id="id_ccexp_m"]/option[2]').click()
# browser.find_element_by_xpath('//*[@id="id_ccexp_y"]/option[10]').click()
browser.find_element_by_xpath('//*[@id="id_addr1"]').send_keys('Address1')
browser.find_element_by_xpath('//*[@id="id_city"]').send_keys('Dnipro')
browser.find_element_by_xpath('//*[@id="id_zip"]').send_keys('49000')
browser.find_element_by_xpath('//*[@id="id_referrer"]').send_keys('gregrublev13899')
# browser.find_element_by_xpath('//*[@id="id_referrer"]').send_keys('deabec55-4e02-41ba-af2d-012f12f0df3b ')
browser.find_element_by_xpath('/html/body/main/div/div[2]/div[1]/div[2]/div/div[1]/form/button').click()
|
[
"zeleniyanton@gmail.com"
] |
zeleniyanton@gmail.com
|
6ef4f5dd5a38840b4bac64b24e2ef3679dea832b
|
10e5ecf13d2fa4e9e3d866fd42d68a99258d28b3
|
/tensor-note/tensor2.10/class5/tf_5_2_baseline.py
|
90bb3d3437ee463d29fbeef4a0b8757bdfd557ca
|
[] |
no_license
|
FictionDk/python-repo
|
2ba20bece0e900040833be305eb81157704533cf
|
41fa3a95d62f16d7cf632cfefb09226ec24f4e1a
|
refs/heads/master
| 2023-04-27T18:39:53.998065
| 2023-04-18T10:33:23
| 2023-04-18T10:33:23
| 91,284,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
# -*- coding: utf-8 -*-
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tf_5_1_data_set import DataSets
import tf_5_0_utils as tf_utils
class Baseline(Model):
def __init__(self):
super(Baseline, self).__init__()
# 卷积 层,Convolutional
self.c1 = Conv2D(filters=6, kernel_size=(5,5), padding='same')
# 批标准化, BN
self.b1 = BatchNormalization()
# 激活 层,Activation
self.a1 = Activation('relu')
# 池化 层,Pooling
self.p1 = MaxPool2D(pool_size=(2,2), strides=2, padding='same')
# dropout 层
self.d1 = Dropout(0.2)
self.flatten = Flatten()
self.f1 = Dense(128, activation='relu')
self.d2 = Dropout(0.2)
self.f2 = Dense(10, activation='softmax')
def call(self, x):
# 卷积-- 特征提取器,CBAPD
x = self.c1(x)
x = self.b1(x)
x = self.a1(x)
x = self.p1(x)
x = self.d1(x)
x = self.flatten(x)
x = self.f1(x)
x = self.d2(x)
y = self.f2(x)
return y
dat = DataSets()
(x_train, y_train), (x_test, y_test) = dat.load_cifar10()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = Baseline()
history = tf_utils.model_train(model, "Baseline", x_train, y_train, x_test, y_test)
model.summary()
file = open('./weighs.txt', 'w')
for v in model.trainable_variables:
file.write(str(v.name) + '\n')
file.write(str(v.shape) + '\n')
file.write(str(v.numpy()) + '\n')
file.close()
tf_utils.history_show(history)
|
[
"ficito.d2k@gmail.com"
] |
ficito.d2k@gmail.com
|
b1f161576f69b29eea2c147ec803d1ac8dfb9f9d
|
a515800cfa247f52edfaebb2b94554a95278cef6
|
/load_weights.py
|
fc126d4045e7b8837d9ce6302ce51fec4a055caf
|
[
"Apache-2.0"
] |
permissive
|
lilun-cheng/vehicle_highway_tracking
|
5cc1111516ad64564b5074102e642c16b6189de3
|
99c9981c2d9f998d90df070d271b7b88f6dc0a35
|
refs/heads/master
| 2023-02-09T22:11:32.889245
| 2021-01-05T01:30:51
| 2021-01-05T01:30:51
| 326,847,720
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
from yolov3_tf2.models import YoloV3, YoloV3Tiny
from yolov3_tf2.utils import load_darknet_weights
flags.DEFINE_string('weights', 'weights/yolov3_custom_train_final.weights', 'path to weights file')
flags.DEFINE_string('output', 'weights/yolov3_car_truck.tf', 'path to output')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('num_classes', 2, 'number of classes in the model')
def main(_argv):
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.summary()
logging.info('model created')
load_darknet_weights(yolo, FLAGS.weights, FLAGS.tiny)
logging.info('weights loaded')
img = np.random.random((1, 320, 320, 3)).astype(np.float32)
output = yolo(img)
logging.info('sanity check passed')
yolo.save_weights(FLAGS.output)
logging.info('weights saved')
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
[
"liluncheng@liluns-mbp-2.lan"
] |
liluncheng@liluns-mbp-2.lan
|
72196bb5c3dbf9d5373b0e4f6e101a46c1e6d59d
|
f2dc4d2c716971b09205ed7c81e4d792a3b892a3
|
/mask-rcnn/libraries/mcoco/coco.py
|
075d578316c16cce04da32e553133d3173c9e945
|
[
"Apache-2.0"
] |
permissive
|
pandamax/current-lane-drivable
|
c0412b5ee814e820b5a8673c11478f34d8718eb9
|
0727b101cec3d5663aa953209abf1f323b062a4f
|
refs/heads/master
| 2022-07-01T07:35:30.797585
| 2020-05-14T02:56:28
| 2020-05-14T02:56:28
| 262,209,978
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,502
|
py
|
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights. Also auto download COCO dataset
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet --download=True
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import sys
import time
import numpy as np
import imgaug # https://github.com/aleju/imgaug (pip3 install imageaug)
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def auto_download(self, dataDir, dataType, dataYear):
"""Download the COCO dataset/annotations if requested.
dataDir: The root directory of the COCO dataset.
dataType: What to load (train, val, minival, valminusminival)
dataYear: What dataset year to load (2014, 2017) as a string, not an integer
Note:
For 2014, use "train", "val", "minival", or "valminusminival"
For 2017, only "train" and "val" annotations are available
"""
# Setup paths and file names
if dataType == "minival" or dataType == "valminusminival":
imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
else:
imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
# Create main folder if it doesn't exist yet
if not os.path.exists(dataDir):
os.makedirs(dataDir)
# Download images if not available locally
if not os.path.exists(imgDir):
os.makedirs(imgDir)
print("Downloading images to " + imgZipFile + " ...")
with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + imgZipFile)
with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
zip_ref.extractall(dataDir)
print("... done unzipping")
print("Will use images in " + imgDir)
# Setup annotations data paths
annDir = "{}/annotations".format(dataDir)
if dataType == "minival":
annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
annFile = "{}/instances_minival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
unZipDir = annDir
elif dataType == "valminusminival":
annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
annFile = "{}/instances_valminusminival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
unZipDir = annDir
else:
annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
unZipDir = dataDir
# print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
# Download annotations if not available locally
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(unZipDir)
print("... done unzipping")
print("Will use annotations in " + annFile)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(CocoDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CocoDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
if info["source"] == "coco":
return "http://cocodataset.org/#explore?id={}".format(info["id"])
else:
super(CocoDataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = CocoConfig()
else:
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download)
dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
dataset_val.load_coco(args.dataset, "minival", year=args.year, auto_download=args.download)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads',
augmentation=augmentation)
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=120,
layers='4+',
augmentation=augmentation)
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=160,
layers='all',
augmentation=augmentation)
elif args.command == "evaluate":
# Validation dataset
dataset_val = CocoDataset()
coco = dataset_val.load_coco(args.dataset, "minival", year=args.year, return_coco=True, auto_download=args.download)
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.limit))
evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.limit))
# segmentation evaluate
evaluate_coco(model, dataset_val, coco, "segm", limit=int(args.limit))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
|
[
"noreply@github.com"
] |
pandamax.noreply@github.com
|
de2c917992ce5b7f0f60750a7cea903d616e6bf6
|
5076013d00eebdcdc74b3de358c3c59bd34847c0
|
/venv/Scripts/pip-script.py
|
4b8ba4f8b61f40c8f1d75a6975b48670a9766b18
|
[] |
no_license
|
arijit05saha/nopcommerceapp
|
ff1708440097d4c8cf608b26d9b247d7bfbb6caa
|
388b375cc9ac1ba2967d8099a412e0dbfb5f39db
|
refs/heads/master
| 2023-03-07T20:15:04.999737
| 2021-02-24T21:09:30
| 2021-02-24T21:09:30
| 342,031,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
#!C:\Users\Arijit\PycharmProjects\noCommerceApp\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"arijit05saha@gmail.com"
] |
arijit05saha@gmail.com
|
96caa9d7593ea74a948e4142a3d16877eaeb220f
|
48ea0bdd3985b76aacee3ab2c6f5334c08aa671b
|
/09day/3三角形.py
|
999372cd39064ff728ec1e114b964e949aed4835
|
[] |
no_license
|
caozongliang/1805
|
d19a7dfe82bdb5bd572aa774c0911b1cb50ad952
|
00ea195af5567a5998c3987ddceaa51134f1a95b
|
refs/heads/master
| 2020-03-19T20:58:07.789690
| 2018-06-25T09:31:03
| 2018-06-25T09:31:03
| 136,141,802
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
i = 1
while i < 6:
t = 0
while t < i:
print("*",end="")
t+=1
print("")
i+=1
|
[
"3141360536@qq.com"
] |
3141360536@qq.com
|
264bd6975be7662b1e729d1229a5ecf0315961d0
|
4ff9529ec12b75a8453fdca7f36fc3d9e03e7615
|
/xcessiv/__init__.py
|
d02b77e2c1371871f43fafedb49a47daaf4c00b3
|
[
"Apache-2.0"
] |
permissive
|
ferplascencia/xcessiv
|
5f1d6feaaf4783e6c0c9c3dce45720b3cb4c1fa3
|
fc7df4e42ee51859fc84b05725f3512d85e71bf4
|
refs/heads/master
| 2021-01-01T06:16:26.094662
| 2017-07-13T09:55:47
| 2017-07-13T09:55:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
from __future__ import absolute_import, print_function, division, unicode_literals
from flask import Flask
__version__ = '0.5.0'
app = Flask(__name__, static_url_path='/static', static_folder='ui/build/static')
app.config.from_object('xcessiv.config')
import xcessiv.views
|
[
"reiichiro.s.nakano@gmail.com"
] |
reiichiro.s.nakano@gmail.com
|
9f476d12b85c867f987ddf81d7744fd5102dcca5
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/memory_stats/ops/gen_memory_stats_ops.py
|
c07803ef193dbda5354cef2be4381457f0307e96
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,855
|
py
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: memory_stats_ops.cc
"""
import collections as _collections
from tensorflow.python.eager import execute as _execute
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
def bytes_in_use(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"BytesInUse", name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
else:
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"BytesInUse", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BytesInUse", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("BytesInUse")(None)
def bytes_limit(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"BytesLimit", name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
else:
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"BytesLimit", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BytesLimit", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("BytesLimit")(None)
def max_bytes_in_use(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"MaxBytesInUse", name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
else:
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"MaxBytesInUse", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxBytesInUse", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("MaxBytesInUse")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "BytesInUse"
# output_arg {
# name: "out"
# type: DT_INT64
# }
# is_stateful: true
# }
# op {
# name: "BytesLimit"
# output_arg {
# name: "out"
# type: DT_INT64
# }
# is_stateful: true
# }
# op {
# name: "MaxBytesInUse"
# output_arg {
# name: "out"
# type: DT_INT64
# }
# is_stateful: true
# }
_op_def_lib = _InitOpDefLibrary(b"\n\030\n\nBytesInUse\032\007\n\003out\030\t\210\001\001\n\030\n\nBytesLimit\032\007\n\003out\030\t\210\001\001\n\033\n\rMaxBytesInUse\032\007\n\003out\030\t\210\001\001")
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
81e057f75dd9cb80d8baccb74262ea4edcf325b8
|
d9d6fbe56103b3372aba402d1472226820b8466b
|
/anagrafiche/myTests_Libro_cap06.py
|
7eb1b59b754c0f41f4e87d35b63659dd9979979a
|
[] |
no_license
|
ghiblin/wms2
|
673731f39ae8d82201acfd7e228bfc1cef6bf126
|
07a97e6136637830ca99ae0bb06d944755eab87c
|
refs/heads/master
| 2021-06-24T06:00:43.207129
| 2018-10-14T22:46:30
| 2018-10-14T22:46:30
| 214,431,330
| 0
| 0
| null | 2021-06-10T22:04:52
| 2019-10-11T12:30:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,389
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.test import TestCase
from lists.models import Item, List
from lists.views import home_page
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/')
self.assertEqual(found.func, home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
expected_html = render_to_string('home.html')
self.assertEqual(response.content.decode(), expected_html)
class NewListTest(TestCase):
def test_saving_a_POST_request(self):
self.client.post(
'/lists/new',
data={'item_text': 'A new list item'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_POST(self):
response = self.client.post(
'/lists/new',
data={'item_text': 'A new list item'}
)
new_list = List.objects.first()
self.assertRedirects(response, '/lists/%d/' % (new_list.id,))
class NewItemTest(TestCase):
def test_can_save_a_POST_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post(
'/lists/%d/add_item' % (correct_list.id,),
data={'item_text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post(
'/lists/%d/add_item' % (correct_list.id,),
data={'item_text': 'A new item for an existing list'}
)
self.assertRedirects(response, '/lists/%d/' % (correct_list.id,))
class ListViewTest(TestCase):
def test_uses_list_template(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertTemplateUsed(response, 'list.html')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertEqual(response.context['list'], correct_list)
def test_displays_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other list item 1', list=other_list)
Item.objects.create(text='other list item 2', list=other_list)
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
self.assertNotContains(response, 'other list item 1')
self.assertNotContains(response, 'other list item 2')
class ListAndItemModelsTest(TestCase):
def test_saving_and_retrieving_items(self):
list_ = List()
list_.save()
first_item = Item()
first_item.text = 'The first (ever) list item'
first_item.list = list_
first_item.save()
second_item = Item()
second_item.text = 'Item the second'
second_item.list = list_
second_item.save()
saved_list = List.objects.first()
self.assertEqual(saved_list, list_)
saved_items = Item.objects.all()
self.assertEqual(saved_items.count(), 2)
first_saved_item = saved_items[0]
second_saved_item = saved_items[1]
self.assertEqual(first_saved_item.text, 'The first (ever) list item')
self.assertEqual(first_saved_item.list, list_)
self.assertEqual(second_saved_item.text, 'Item the second')
self.assertEqual(second_saved_item.list, list_)
|
[
"sergio.morstabilini@studiogammasnc.it"
] |
sergio.morstabilini@studiogammasnc.it
|
86170857d96fa7d35c1f3d3d66444850e3e7b6e3
|
b75fa0885bc3ba3f153225fd3396aadef6c1f97e
|
/slides/pypyjs/lib-py3k/modules/test/test_site.py
|
ff1a5515931093585af46aef3c348e3a831a3f8f
|
[
"MIT"
] |
permissive
|
rfk/talk-pypyjs-what-how-why
|
e084303185167dbc9b704c3568e0c31d0a1f6885
|
1ab62ee32ff9495ae9313ec81e8ee2044212ea71
|
refs/heads/master
| 2016-09-06T05:27:09.800382
| 2015-04-10T03:12:07
| 2015-04-10T03:12:07
| 22,421,369
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,688
|
py
|
"""Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.support import run_unittest, TESTFN, EnvironmentVarGuard
from test.support import captured_stderr, check_impl_detail
import builtins
import os
import sys
import re
import encodings
import subprocess
import sysconfig
from copy import copy
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise unittest.SkipTest("importation of site.py suppressed")
if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
self.old_base = site.USER_BASE
self.old_site = site.USER_SITE
self.old_prefixes = site.PREFIXES
self.old_vars = copy(sysconfig._CONFIG_VARS)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.PREFIXES = self.old_prefixes
sysconfig._CONFIG_VARS = self.old_vars
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.assertEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.assertEqual(abs_dir, norm_dir)
else:
self.assertEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.assertIn(entry, dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.assertIn(pth_file.imported, sys.modules,
"%s not in sys.modules" % pth_file.imported)
self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path)
self.assertFalse(os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def make_pth(self, contents, pth_dir='.', pth_name=TESTFN):
# Create a .pth file and return its (abspath, basename).
pth_dir = os.path.abspath(pth_dir)
pth_basename = pth_name + '.pth'
pth_fn = os.path.join(pth_dir, pth_basename)
pth_file = open(pth_fn, 'w', encoding='utf-8')
self.addCleanup(lambda: os.remove(pth_fn))
pth_file.write(contents)
pth_file.close()
return pth_dir, pth_basename
def test_addpackage_import_bad_syntax(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("import bad)syntax\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: the previous two should be independent checks so that the
# order doesn't matter. The next three could be a single check
# but my regex foo isn't good enough to write it.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), r'import bad\)syntax')
self.assertRegex(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 2")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'ImportError')
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
"error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
@unittest.skipUnless(site.ENABLE_USER_SITE, "requires access to PEP 370 "
"user-site (site.ENABLE_USER_SITE)")
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if sys.platform in ('os2emx', 'riscos'):
self.assertEqual(len(dirs), 1)
wanted = os.path.join('xoxo', 'Lib', 'site-packages')
self.assertEqual(dirs[0], wanted)
elif '__pypy__' in sys.builtin_module_names:
self.assertEquals(len(dirs), 1)
wanted = os.path.join('xoxo', 'site-packages')
self.assertEquals(dirs[0], wanted)
elif (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
# OS X framework builds
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library',
sysconfig.get_config_var("PYTHONFRAMEWORK"),
sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
elif os.sep == '/':
# OS X non-framwework builds, Linux, FreeBSD, etc
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
# other platforms
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print("#import @bad module name", file=FILE)
print("\n", file=FILE)
print("import %s" % self.imported, file=FILE)
print(self.good_dirname, file=FILE)
print(self.bad_dirname, file=FILE)
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
def test_abs_paths(self):
# Make sure all imported modules have their __file__ and __cached__
# attributes as absolute paths. Arranging to put the Lib directory on
# PYTHONPATH would cause the os module to have a relative path for
# __file__ if abs_paths() does not get run. sys and builtins (the
# only other modules imported before site.py runs) do not have
# __file__ or __cached__ because they are built-in.
parent = os.path.relpath(os.path.dirname(os.__file__))
env = os.environ.copy()
env['PYTHONPATH'] = parent
code = ('import os, sys',
# use ASCII to avoid locale issues with non-ASCII directories
'os_file = os.__file__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_file + b"\n")',
'os_cached = os.__cached__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_cached + b"\n")')
command = '\n'.join(code)
# First, prove that with -S (no 'import site'), the paths are
# relative.
proc = subprocess.Popen([sys.executable, '-S', '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
if check_impl_detail(cpython=True):
self.assertFalse(os.path.isabs(os__file__))
self.assertFalse(os.path.isabs(os__cached__))
# Now, with 'import site', it works.
proc = subprocess.Popen([sys.executable, '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
self.assertTrue(os.path.isabs(os__file__))
self.assertTrue(os.path.isabs(os__cached__))
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into builtins
self.assertTrue(hasattr(builtins, "quit"))
self.assertTrue(hasattr(builtins, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in builtins
self.assertTrue(hasattr(builtins, "copyright"))
self.assertTrue(hasattr(builtins, "credits"))
def test_setting_help(self):
# 'help' should be set in builtins
self.assertTrue(hasattr(builtins, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.values():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
|
[
"ryan@rfk.id.au"
] |
ryan@rfk.id.au
|
bb65a6f3ade0146e33605ebc9725287bcf876ccf
|
d028fe782122a52b2e8664296a3b7091016f4f8e
|
/reports/views.py
|
fb9fd75cf95647724dda4993490d1439c23156c5
|
[] |
no_license
|
uhexos/Online-Learning-Platform
|
74b44650c4cc7234becff208a8030cc339973602
|
364d6e2a00ce3800c88e8a83b616a9a341bea7a4
|
refs/heads/master
| 2022-11-13T06:22:13.809303
| 2020-07-05T00:39:06
| 2020-07-05T00:39:06
| 224,997,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
from django.shortcuts import render
from rest_framework import generics, permissions
from courses.models import EnrolledCourses
from .serializers import SalesSerializer
# Create your views here.
from django.db.models import Sum
class SalesList(generics.ListAPIView):
queryset = EnrolledCourses.objects.all()
serializer_class = SalesSerializer
def get_queryset(self):
return EnrolledCourses.objects.filter(course__owner=self.request.user)
|
[
"uhexos@gmail.com"
] |
uhexos@gmail.com
|
c662727a689e9edfd8365de13c7fc29fe0578ba0
|
1b6e60aa1b7b10ffc4f64f89fca9d33761d10764
|
/alembic/versions/7d92610a4f0d_create_post_table.py
|
1e133101adef7ac41e5323553f885498e2f642fa
|
[] |
no_license
|
epm157/fastapitutorial
|
2e9464df1f843c07447a7c58153b7bf8590bae81
|
56944bd8bb5402eed72a300fc603992a83d61c96
|
refs/heads/main
| 2023-08-27T08:13:04.850666
| 2021-11-15T06:51:45
| 2021-11-15T06:51:45
| 425,637,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
"""create post table
Revision ID: 7d92610a4f0d
Revises:
Create Date: 2021-11-07 17:44:45.124468
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7d92610a4f0d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('posts', sa.Column('id', sa.Integer(), nullable=False, primary_key=True), sa.Column('title', sa.String(), nullable=False),)
pass
def downgrade():
op.drop_table('posts')
pass
|
[
"ehsan.poormohammady@gdata.de"
] |
ehsan.poormohammady@gdata.de
|
d32fa628d376417ab535348d4f96ac45c4116ea7
|
7b1af6defe4797d9d4a815c73f72965bd5cf0245
|
/python string formatting.py
|
4683b9c5a780b6aa9c6c9712cc6b3aecaea51cd1
|
[] |
no_license
|
sivathedev/python65
|
e60a6777a02dfd664aeac8a0f0b6173c79db9c02
|
d16848c9b7889d7b51a49814af02d01e9c8162b7
|
refs/heads/master
| 2023-02-23T21:05:54.456743
| 2021-01-26T14:02:14
| 2021-01-26T14:02:14
| 289,531,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
quantity = 3
itemno = 567
price = 49
myorder = "I want {} pieces of item number {} for {:.2f} dollars."
print(myorder.format(quantity, itemno, price))
|
[
"noreply@github.com"
] |
sivathedev.noreply@github.com
|
9ca816dea7c494fb4a933882aa28ad760c09847e
|
29c70f2129b1f7882abb7858c556c0287a0d1a6b
|
/data.py
|
926b485a09b547d8062eb0a2d818941f393714e9
|
[] |
no_license
|
rakeshls/class103
|
379ccf8b77cc1589f919b2617a7deadac63397ed
|
34b2e0b3dbc31c75ee5984822679603bb93e1a3b
|
refs/heads/main
| 2023-05-24T01:24:22.772724
| 2021-06-09T13:26:49
| 2021-06-09T13:26:49
| 375,364,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
import pandas as pd
import plotly.express as px
#data = [10,20,30,40,50]
#df = pd.DataFrame(data)
#print(df)
df = pd.read_csv('csvfiles/line_chart.csv')
fig = px.line(df,x="Year",y="Per capita income",color="Country",title="Per capita income")
fig.show()
|
[
"noreply@github.com"
] |
rakeshls.noreply@github.com
|
f322a1c72f7e1c96948330f94ad1172954476178
|
a081bd5057e7deb8c1596b673b4a35d719477e20
|
/cliente.py
|
1311b2a5ba40b559612cd0b2178abe1df21c74be
|
[] |
no_license
|
pablocandela/Practica08-Modelado20171
|
39e373b1ddc284d27b115d22a920ed35c47528bf
|
1f0d8290a735b44f96cd5b05e43d40498df612de
|
refs/heads/master
| 2021-01-24T11:28:21.170076
| 2016-10-07T01:49:20
| 2016-10-07T01:49:20
| 70,206,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import sys
from PyQt4 import QtGui,QtCore, uic
class Ventana(QtGui.QMainWindow):
def __init__(self):
super(Ventana, self).__init__()
uic.loadUi('cliente.ui', self)
header_horizontal = self.tableWidget.horizontalHeader()
header_horizontal.setResizeMode(QtGui.QHeaderView.Stretch)
header_vertical = self.tableWidget.verticalHeader()
header_vertical.setResizeMode(QtGui.QHeaderView.Stretch)
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ventana = Ventana()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"pabloantonio@ciencias.unam.mx"
] |
pabloantonio@ciencias.unam.mx
|
365c552603fdb1b0f4540f30a40ca52234d87850
|
f6f02db9ea3fe650c759c0873d92a2a42edb588d
|
/bracketed.py
|
a5c276ec87a3fb5e8c9d09d4d90a8c97e8e03edc
|
[] |
no_license
|
eclairss17/Python-test-cases-solving
|
f54e38b9f266bfdeea2431025744c2b95c4174bb
|
aeee800bad54dc41d1a6ff80329f7e169f2ad797
|
refs/heads/master
| 2021-02-18T06:26:34.084673
| 2020-03-05T13:30:53
| 2020-03-05T13:30:53
| 245,170,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
#count pair bracket and return true
PAIRINGS = {
'(': ')'
}
def bracketed(symbols):
stack = []
for s in symbols:
if s in PAIRINGS:
stack.append(s)
continue
try:
expected_opening_symbol = stack.pop()
except IndexError: # too many closing symbols
return False
if s != PAIRINGS[expected_opening_symbol]: # mismatch
return False
return len(stack) == 0 # false if too many opening symbols
print(is_balanced('(a+b)'))
|
[
"td_1"
] |
td_1
|
8e00f6ef0dc2ae442b4838f710c9a49f9afad966
|
426596e98832bded86dbc7ea829cff98aaec520e
|
/mysite/news/migrations/0008_auto_20210112_1636.py
|
ed9da176ed6c7023caabf61949acaab83ee76d46
|
[] |
no_license
|
yaki771/yakiswork
|
8382458b23801e901be255dc6ec7146464a1c32b
|
679bb644ae119ba4fe87faa5eca3b128e70926b2
|
refs/heads/master
| 2023-02-15T23:01:29.422690
| 2021-01-12T15:24:50
| 2021-01-12T15:24:50
| 309,275,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
# Generated by Django 3.1.2 on 2021-01-12 08:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('news', '0007_交通工具'),
]
operations = [
migrations.AlterField(
model_name='homework',
name='交通工具',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.交通工具'),
),
]
|
[
"fqjxxlyq@163.com"
] |
fqjxxlyq@163.com
|
72dd8fbe932cd89098d3f68000ca5f2becadaee9
|
36548a42ad2e651d670d225c9b0d72fe3138dce2
|
/Exercise/4_Astrologer's_Stars.py
|
42f8f606c57db4a5c6a3b71afe97a654abda2c23
|
[] |
no_license
|
Parth731/Python-Tutorial
|
a46f2f1ed85211eb23567778f092c24c5752f62a
|
a8ab0effe31e8d186b20296de171bc6e964b627c
|
refs/heads/master
| 2023-01-11T19:55:14.979699
| 2020-11-03T08:58:57
| 2020-11-03T08:58:57
| 288,523,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# pattern printing
'''
input = integer n
Boolean = true/false
True n=5
*
**
***
****
false n=5
****
***
**
*
'''
n = int(input("How many rows want to print\n"))
b = bool(int(input("Please enter True or False")))
print(b)
if b == True:
for x in range(n):
for y in range(x+1):
print("*",end="")
print()
elif b == False:
for x in range(n, 0, -1):
for y in range(x):
print("*", end="")
print()
|
[
"patelparth31795@gmail.com"
] |
patelparth31795@gmail.com
|
dfebb92b8ffa8311fe658092c45a2e4618fbedc4
|
0b26df6178c717f914356d17e5e78731b88e2de5
|
/session4/exercise2.py
|
112e877d297a076103c4209914bc9452d83e9b46
|
[] |
no_license
|
Jacquelinediaznieto/PythonCourse
|
a6509656dbfcc71ab642b50da304e9506517846f
|
623579ed924cc33ea6c44956f798759bffa8ff07
|
refs/heads/main
| 2023-06-11T11:02:50.563680
| 2021-07-01T07:45:08
| 2021-07-01T07:45:08
| 371,450,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#2a
def shopping_calculator(shopping_cost, discount_applicable):
if (discount_applicable == 'y'):
total_cost = shopping_cost * 0.9
return total_cost
elif(shopping_cost > 100):
total_cost = shopping_cost * 0.95
return total_cost
else:
total_cost = shopping_cost
return total_cost
total_cost = shopping_calculator(200, 'y')
print(f'The total cost is {total_cost}')
|
[
"Jacquelinediaznieto@hotmail.com"
] |
Jacquelinediaznieto@hotmail.com
|
9c67ad30e03acf740b3284a4d84c947ee548dd6a
|
395aa16d8d8a1e34445387d40a02488c6c94db95
|
/lex/RE2NFA.py
|
33169968295b4a023bed937b421047fc308fd5c5
|
[] |
no_license
|
1170300808/LexicalAnalysis
|
bf0a9827de2d4b158057e9b00e73197fe1d261c7
|
856e5f5899871e3db2f0ece10ee73ab0d21952f7
|
refs/heads/master
| 2021-05-22T00:43:46.971638
| 2020-04-04T03:46:13
| 2020-04-04T03:46:13
| 252,890,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,308
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/3/27 12:06
# @Author : WDW
# @File : RE2NFA.py
'''
使用thompson算法构建nfa,首先分析语法树,自底向上构建nfa
例子 ab*(a|b)*
语法树为
+
+ *
a * |
b a b
构建语法树的算法是:
使用两个栈,一个存放符号+,*,( 一个存放子表达式对应的子树
扫描表达式,遇到符号入栈1,遇到字符入栈2. (+表示连接)
理论上符号优先级 () > * > + > | 符号栈应该满足优先级递增.遇到右括号就一直出栈直到左括号出栈
括号比较特殊,左括号应该直接入栈,但是这会使得括号后面的符号无法入栈.所以,让括号优先级最低
出栈时,将运算结果放回栈2
这样可以构建一个树了.
例如对于a+b*+(a|b)*
最初: 读a: 读+ 读b 读*
s1 : s1 : s1:+ s1:+ s1:+ *是一个单目运算符,遇到*时直接处理
s2 : s2 :a s2:a s2:a,b s2:a,b* 此时取出栈顶的子树,让*成为他的根节点
读+ 读(
s1:+ 先让+出栈,构建对应子树后,后来的+入栈 s1:+(
s2:a+b* s2:a+b*
读a 读| 读b 读) 读*
s1:+( s1:+(| s1:+(| s1:+ 处理符号直到( s1:+
s1:a+b*,a s2:a+b*,a s2:a+b*,a,b s2:a+b*,a|b s2:a+b*,(a|b)*
读完后,s1还剩了一个符号+,对之处理得a+b*+(a|b)*的树
这里的过程本来可以简化的,比如可以直接搞出regex的后缀表达式,或者直接从语法树得到dfa.
由于本人很菜,所以只能搞出这样一个简陋的东西,之后会考虑支持[a-z]这样的操作
另外还需要转义字符,\\,\*这样的
'''
priority = {'?': 2, '*': 2, '(': -1, ')': -1, '`': 1, '|': 0}
def getlevel(c): # 优先级
return priority[c]
def isop(c):
return c in {'*', '(', ')', '`', '|', '?'}
class treenode():
def __init__(self, val=None, left=None, right=None):
self.left = left
self.right = right
self.val = val
class tree():
'''
语法树
'''
def __init__(self, regex):
print(regex)
self.rawexp = regex
self.regex = proprocess(regex)
self.s1 = []
self.s2 = []
def getTree(self):
l, i = len(self.regex), -1
while i < l - 1:
i += 1
c = self.regex[i]
# print(self.s1,self.s2,c)
if c == '\\':
i += 1
self.escape_char(self.regex[i])
continue
if isop(c):
if c == '(':
self.s1.append(c)
continue
if c == ')': # 括号有点特殊,需要特殊处理
while self.s1[-1] != '(':
self.connect(self.s1.pop())
self.s1.pop()
continue
if not self.s1 or getlevel(c) > getlevel(self.s1[-1]):
self.s1.append(c)
else:
while self.s1 and getlevel(self.s1[-1]) >= getlevel(c):
self.connect(self.s1.pop())
self.s1.append(c)
else:
self.s2.append(treenode(c))
while self.s1:
c = self.s1.pop()
self.connect(c)
return self.s2.pop()
def connect(self, c): # 连接节点
newnode = treenode(c)
if c == '*' or c == '?':
oldnode = self.s2.pop()
newnode.left = oldnode
else: # 这里的情况是 | +
n1 = self.s2.pop()
n2 = self.s2.pop()
newnode.left = n1
newnode.right = n2
self.s2.append(newnode)
def escape_char(self, c): # 处理转义字符
self.s2.append(treenode(c))
# _todo: 操,集合[],能不能实现了啊,操,你们这些狗,怎么跟狗一样
def proprocess(exp):
# 或许会把表达式扩展,以支持更强大灵活的语法
# 何时该加+?两个表达式的中间.两个表达式的中间,
# 要么是字母,字母 加+
# 要么是字母,符号 除了(都不用加+ a* a| a) a? a(
# 要么是符号,字母 如果是单目运算符要加+了 (a |a *`a ?`a
# 要么是符号,符号 不加 *| *) |) () |* )(
def kuohao(exp):
newexp = ""
for c in exp:
if c == '\\':
continue
exp = newexp
# kuohao(exp)
newexp = ""
lastchar = None
l, i = len(exp), -1
while i < l - 1:
i += 1
c = exp[i]
if lastchar == '\\':
lastchar = '\\' + c
newexp += c
continue
if c == '\\': # 转义符号,先忽略
if lastchar is None:
newexp += c
else:
newexp += '`'
newexp += c
continue
if lastchar is None:
newexp += c
elif not isop(lastchar):
if c == '(' or not isop(c):
newexp += '`'
newexp += c
else:
if isop(c):
if c == '(':
newexp += '`'
newexp += c
else:
if lastchar == '*' or lastchar == '?':
newexp += '`'
newexp += c
lastchar = c
return newexp
'''
下面是AST-->NFA的过程
'''
class nfaNode():
def __init__(self, isend=False): # 节点最多只有两条出边,且入边,要么一条或多条空边,要么一条非空边
self.epsilon = set() # ε边
self.char = {} # 非空边
self.state = nfa.cnt # 状态编号
nfa.cnt += 1
self.isend = isend
nfa.pool.append(self)
class nfa():
cnt = 0
pool = [] # 状态池
def __init__(self, startstate: nfaNode = None, endstate: nfaNode = None):
self.startstate = startstate
self.endstate = endstate
def transit_table(self): # 输出转换表
print("开始状态:", self.startstate.state, "接收状态:", self.endstate.state)
for node in nfa.pool:
print("状态:", node.state, "空转移:", node.epsilon, "非空转移:", node.char)
def constructNFA(tree: treenode):
c = tree.val
start = nfaNode()
end = nfaNode(isend=True)
if not tree.left and not tree.right:
start.char[c] = end.state
elif c == '*':
subnfa = constructNFA(tree.left)
subnfa.endstate.isend = False
subnfa.endstate.epsilon.add(subnfa.startstate.state)
start.epsilon.add(end.state)
start.epsilon.add(subnfa.startstate.state)
subnfa.endstate.epsilon.add(end.state)
elif c == '?': # 直接加一条ε边
subnfa = constructNFA(tree.left)
subnfa.endstate.isend = False
subnfa.endstate.epsilon.add(end.state)
start.epsilon.add(end.state)
start.epsilon.add(subnfa.startstate.state)
elif c == '|':
subnfa1 = constructNFA(tree.left)
subnfa2 = constructNFA(tree.right)
subnfa1.endstate.isend = False
subnfa2.endstate.isend = False
start.epsilon.add(subnfa1.startstate.state)
start.epsilon.add(subnfa2.startstate.state)
subnfa1.endstate.epsilon.add(end.state)
subnfa2.endstate.epsilon.add(end.state)
elif c == '`': # 注意先后顺序哦,right是在前面的
subnfa1 = constructNFA(tree.left)
subnfa2 = constructNFA(tree.right)
subnfa2.endstate.isend = False
subnfa2.endstate.epsilon.add(subnfa1.startstate.state)
start.epsilon.add(subnfa2.startstate.state)
subnfa1.endstate.epsilon.add(end.state)
return nfa(start, end)
def getnfa(re: str):
t = tree(re)
_t = t.getTree()
ret = constructNFA(_t)
# ret.endstate = [ret.endstate.state]
return ret
def main():
def houxu(t):
if not t:
return
houxu(t.right)
houxu(t.left)
print(t.val, end="")
re = '\\('
tre = tree(re)
# print(tre.regex)
t = tre.getTree()
print("begin"), houxu(t), print('')
mynfa = getnfa(re)
# my = constructNFA(t)
mynfa.transit_table()
print(mynfa.endstate.state)
if __name__ == "__main__":
main()
|
[
"40755640+1170300808@users.noreply.github.com"
] |
40755640+1170300808@users.noreply.github.com
|
d43dbdace5111dbc765bf9b16abcd3affd99e162
|
07a1d15b4ab9b34ae845a056f21ee5d407ec42da
|
/gx_gltf_type.py
|
5242828b7eca97cc6fa95c1c0662b11d16f04e64
|
[] |
no_license
|
andreytata/py_gen_util
|
d987ebb2c04aca244e99f1cae9f61eacf568b0aa
|
f7bf2877ab511de860febf7949acd788cae9aef8
|
refs/heads/master
| 2020-06-01T03:56:59.335305
| 2019-07-26T12:43:34
| 2019-07-26T12:43:34
| 190,624,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50,207
|
py
|
#!/usr/bin/env python
# coding: utf-8
"""GLTF Definitions (schema: http://json-schema.org/draft-04/schema)"""
# LESSONS/06 DIFFS: Tests delegated to python "unittest" framework
# Tests code moved to test_gx_gltf_type.py
# VSCode unittest support must be enabled and configured
import inspect, pdb
from pprint import pprint
class Schema(object):
refs = { 'accessor.schema.json' : 'GxAccessor'
, 'animation.schema.json' : 'GxAnimation'
, 'bufferView.schema.json' : 'GxBufferView'
, 'buffer.schema.json' : 'GxBuffer'
, 'camera.schema.json' : 'GxCamera'
, 'image.schema.json' : 'GxImage'
, 'material.schema.json' : 'GxMaterial'
, 'mesh.schema.json' : 'GxMesh'
, 'node.schema.json' : 'GxNode'
, 'sampler.schema.json' : 'GxSampler'
, 'scene.schema.json' : 'GxScene'
, 'skin.schema.json' : 'GxSkin'
, 'texture.schema.json' : 'GxTexture'
, 'animation.channel.schema.json' : 'GxAnimationChannel'
, 'animation.sampler.schema.json' : 'GxAnimationSampler'
, 'mesh.primitive.schema.json' : 'GxMeshPrimitive'
, 'glTFid.schema.json' : 'GLTF_ID'
}
schema = {
}
deps = { # when produced c++ class, some members can have complex type definition
}
@staticmethod
def any_of(src):
if list == type(src):
enum = []
enum_type = 'UNDEFINED'
for d in src:
if 'enum' in d:
enum.append(d['enum'][0])
elif 'type' in d:
enum_type = d['type']
if 'integer' == enum_type:
enum_type = 'int'
else:
enum_type = "<ERROR>"+repr(d)
return [ enum_type, enum ]
return repr(src)
@classmethod # use decorator to define method bounded with class object, (not class instance)
def get_schema(cls): # and collect type info for each class property from class.schema JSON-like info
if not hasattr(cls, 'meta'):
meta = dict() # __class_name = cls.__name__ )
prop = cls.schema['properties']
prop = sorted([ (i, prop[i]) for i in prop ])
for p, defs in prop:
if 'type' in defs:
if 'integer' == defs['type']: meta[p] = {'type': 'int'}
elif 'string' == defs['type']: meta[p] = {'type': 'QString'}
elif 'boolean' == defs['type']: meta[p] = {'type': 'bool'}
elif 'array' == defs['type']:
item_type = defs['items']['type'] if 'type' in defs['items'] else defs['items']
if dict == type(item_type):
item_type = Schema.refs[item_type['$ref']]
meta[p] = {'type': ('GxArray', 'int', item_type)}
elif 'object' == defs['type']: meta[p] = {'type': defs['type']}
else: meta[p] = {'type': 'GxDict'}
elif 'allOf' in defs: meta[p] = {'type': ('int','allOf')}
elif 'anyOf' in defs: meta[p] = {'type': (cls.any_of(defs['anyOf']),'anyOf')}
else:
if 'extensions' == p: continue
elif 'extras' == p: continue
elif 'name' == p: meta[p] = {'type': 'QString'}
else: meta[p] = {'type': defs }
setattr(cls,'meta',meta)
return getattr(cls, 'meta')
class Image(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Image",
"type": "object",
"description": "Image data used to create a texture. Image can be referenced by URI or `bufferView` index. `mimeType` is required in the latter case.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"uri": {
"type": "string",
"description": "The uri of the image.",
"format": "uriref",
"gltf_detailedDescription": "The uri of the image. Relative paths are relative to the .gltf file. Instead of referencing an external file, the uri can also be a data-uri. The image format must be jpg or png.",
"gltf_uriType": "image"
},
"mimeType": {
"anyOf": [
{
"enum": [ "image/jpeg" ]
},
{
"enum": [ "image/png" ]
},
{
"type": "string"
}
],
"description": "The image's MIME type. Required if `bufferView` is defined."
},
"bufferView": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the bufferView that contains the image. Use this instead of the image's uri property."
},
"name": { },
"extensions": { },
"extras": { }
},
"dependencies": {
"bufferView": [ "mimeType" ]
},
"oneOf": [
{ "required": [ "uri" ] },
{ "required": [ "bufferView" ] }
]
}
class Skin(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Skin",
"type": "object",
"description": "Joints and matrices defining a skin.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"inverseBindMatrices": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the accessor containing the floating-point 4x4 inverse-bind matrices. The default is that each matrix is a 4x4 identity matrix, which implies that inverse-bind matrices were pre-applied."
},
"skeleton": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the node used as a skeleton root.",
"gltf_detailedDescription": "The index of the node used as a skeleton root. The node must be the closest common root of the joints hierarchy or a direct or indirect parent node of the closest common root."
},
"joints": {
"type": "array",
"description": "Indices of skeleton nodes, used as joints in this skin.",
"items": {
"$ref": "glTFid.schema.json"
},
"uniqueItems": True,
"minItems": 1,
"gltf_detailedDescription": "Indices of skeleton nodes, used as joints in this skin. The array length must be the same as the `count` property of the `inverseBindMatrices` accessor (when defined)."
},
"name": { },
"extensions": { },
"extras": { }
},
"required": [ "joints" ]
}
class Scene(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Scene",
"type": "object",
"description": "The root nodes of a scene.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"nodes": {
"type": "array",
"description": "The indices of each root node.",
"items": {
"$ref": "glTFid.schema.json"
},
"uniqueItems": True,
"minItems": 1
},
"name": { },
"extensions": { },
"extras": { }
}
}
class Gltf(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "glTF",
"type": "object",
"description": "The root object for a glTF asset.",
"allOf": [ { "$ref": "glTFProperty.schema.json" } ],
"properties": {
# "extensionsUsed": {
# "type": "array",
# "description": "Names of glTF extensions used somewhere in this asset.",
# "items": {
# "type": "string"
# },
# "uniqueItems": True,
# "minItems": 1
# },
# "extensionsRequired": {
# "type": "array",
# "description": "Names of glTF extensions required to properly load this asset.",
# "items": {
# "type": "string"
# },
# "uniqueItems": True,
# "minItems": 1
# },
"accessors": {
"type": "array",
"description": "An array of accessors.",
"items": {
"$ref": "accessor.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of accessors. An accessor is a typed view into a bufferView."
},
"animations": {
"type": "array",
"description": "An array of keyframe animations.",
"items": {
"$ref": "animation.schema.json"
},
"minItems": 1
},
"asset": {
"allOf": [ { "$ref": "asset.schema.json" } ],
"description": "Metadata about the glTF asset."
},
"buffers": {
"type": "array",
"description": "An array of buffers.",
"items": {
"$ref": "buffer.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of buffers. A buffer points to binary geometry, animation, or skins."
},
"bufferViews": {
"type": "array",
"description": "An array of bufferViews.",
"items": {
"$ref": "bufferView.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of bufferViews. A bufferView is a view into a buffer generally representing a subset of the buffer."
},
"cameras": {
"type": "array",
"description": "An array of cameras.",
"items": {
"$ref": "camera.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of cameras. A camera defines a projection matrix."
},
"images": {
"type": "array",
"description": "An array of images.",
"items": {
"$ref": "image.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of images. An image defines data used to create a texture."
},
"materials": {
"type": "array",
"description": "An array of materials.",
"items": {
"$ref": "material.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of materials. A material defines the appearance of a primitive."
},
"meshes": {
"type": "array",
"description": "An array of meshes.",
"items": {
"$ref": "mesh.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of meshes. A mesh is a set of primitives to be rendered."
},
"nodes": {
"type": "array",
"description": "An array of nodes.",
"items": {
"$ref": "node.schema.json"
},
"minItems": 1
},
"samplers": {
"type": "array",
"description": "An array of samplers.",
"items": {
"$ref": "sampler.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of samplers. A sampler contains properties for texture filtering and wrapping modes."
},
"scene": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the default scene."
},
"scenes": {
"type": "array",
"description": "An array of scenes.",
"items": {
"$ref": "scene.schema.json"
},
"minItems": 1
},
"skins": {
"type": "array",
"description": "An array of skins.",
"items": {
"$ref": "skin.schema.json"
},
"minItems": 1,
"gltf_detailedDescription": "An array of skins. A skin is defined by joints and matrices."
},
"textures": {
"type": "array",
"description": "An array of textures.",
"items": {
"$ref": "texture.schema.json"
},
"minItems": 1
},
"extensions": { },
"extras": { }
},
"dependencies": {
"scene": [ "scenes" ]
},
"required": [ "asset" ]
}
class Sampler(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Sampler",
"type": "object",
"description": "Texture sampler properties for filtering and wrapping modes.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"magFilter": {
"description": "Magnification filter.",
"gltf_detailedDescription": "Magnification filter. Valid values correspond to WebGL enums: `9728` (NEAREST) and `9729` (LINEAR).",
"gltf_webgl": "`texParameterf()` with pname equal to TEXTURE_MAG_FILTER",
"anyOf": [
{
"enum": [ 9728 ],
"description": "NEAREST",
"type": "integer"
},
{
"enum": [ 9729 ],
"description": "LINEAR",
"type": "integer"
},
{
"type": "integer"
}
]
},
"minFilter": {
"description": "Minification filter.",
"gltf_detailedDescription": "Minification filter. All valid values correspond to WebGL enums.",
"gltf_webgl": "`texParameterf()` with pname equal to TEXTURE_MIN_FILTER",
"anyOf": [
{
"enum": [ 9728 ],
"description": "NEAREST",
"type": "integer"
},
{
"enum": [ 9729 ],
"description": "LINEAR",
"type": "integer"
},
{
"enum": [ 9984 ],
"description": "NEAREST_MIPMAP_NEAREST",
"type": "integer"
},
{
"enum": [ 9985 ],
"description": "LINEAR_MIPMAP_NEAREST",
"type": "integer"
},
{
"enum": [ 9986 ],
"description": "NEAREST_MIPMAP_LINEAR",
"type": "integer"
},
{
"enum": [ 9987 ],
"description": "LINEAR_MIPMAP_LINEAR",
"type": "integer"
},
{
"type": "integer"
}
]
},
"wrapS": {
"description": "s wrapping mode.",
"default": 10497,
"gltf_detailedDescription": "S (U) wrapping mode. All valid values correspond to WebGL enums.",
"gltf_webgl": "`texParameterf()` with pname equal to TEXTURE_WRAP_S",
"anyOf": [
{
"enum": [ 33071 ],
"description": "CLAMP_TO_EDGE",
"type": "integer"
},
{
"enum": [ 33648 ],
"description": "MIRRORED_REPEAT",
"type": "integer"
},
{
"enum": [ 10497 ],
"description": "REPEAT",
"type": "integer"
},
{
"type": "integer"
}
]
},
"wrapT": {
"description": "t wrapping mode.",
"default": 10497,
"gltf_detailedDescription": "T (V) wrapping mode. All valid values correspond to WebGL enums.",
"gltf_webgl": "`texParameterf()` with pname equal to TEXTURE_WRAP_T",
"anyOf": [
{
"enum": [ 33071 ],
"description": "CLAMP_TO_EDGE",
"type": "integer"
},
{
"enum": [ 33648 ],
"description": "MIRRORED_REPEAT",
"type": "integer"
},
{
"enum": [ 10497 ],
"description": "REPEAT",
"type": "integer"
},
{
"type": "integer"
}
]
},
"name": { },
"extensions": { },
"extras": { }
},
"gltf_webgl": "`texParameterf()`"
}
class Texture(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Texture",
"type": "object",
"description": "A texture and its sampler.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"sampler": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the sampler used by this texture. When undefined, a sampler with repeat wrapping and auto filtering should be used."
},
"source": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the image used by this texture. When undefined, it is expected that an extension or other mechanism will supply an alternate texture source, otherwise behavior is undefined."
},
"name": { },
"extensions": { },
"extras": { }
},
"gltf_webgl": "`createTexture()`, `deleteTexture()`, `bindTexture()`, `texImage2D()`, and `texParameterf()`"
}
class Material(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Material",
"type": "object",
"description": "The material appearance of a primitive.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"name": { },
"extensions": { },
"extras": { },
"pbrMetallicRoughness": {
"allOf": [ { "$ref": "material.pbrMetallicRoughness.schema.json" } ],
"description": "A set of parameter values that are used to define the metallic-roughness material model from Physically-Based Rendering (PBR) methodology. When not specified, all the default values of `pbrMetallicRoughness` apply."
},
"normalTexture": {
"allOf": [ { "$ref": "material.normalTextureInfo.schema.json" } ],
"description": "The normal map texture.",
"gltf_detailedDescription": "A tangent space normal map. The texture contains RGB components in linear space. Each texel represents the XYZ components of a normal vector in tangent space. Red [0 to 255] maps to X [-1 to 1]. Green [0 to 255] maps to Y [-1 to 1]. Blue [128 to 255] maps to Z [1/255 to 1]. The normal vectors use OpenGL conventions where +X is right and +Y is up. +Z points toward the viewer. In GLSL, this vector would be unpacked like so: `float3 normalVector = tex2D(<sampled normal map texture value>, texCoord) * 2 - 1`. Client implementations should normalize the normal vectors before using them in lighting equations."
},
"occlusionTexture": {
"allOf": [ { "$ref": "material.occlusionTextureInfo.schema.json" } ],
"description": "The occlusion map texture.",
"gltf_detailedDescription": "The occlusion map texture. The occlusion values are sampled from the R channel. Higher values indicate areas that should receive full indirect lighting and lower values indicate no indirect lighting. These values are linear. If other channels are present (GBA), they are ignored for occlusion calculations."
},
"emissiveTexture": {
"allOf": [ { "$ref": "textureInfo.schema.json" } ],
"description": "The emissive map texture.",
"gltf_detailedDescription": "The emissive map controls the color and intensity of the light being emitted by the material. This texture contains RGB components in sRGB color space. If a fourth component (A) is present, it is ignored."
},
"emissiveFactor": {
"type": "array",
"items": {
"type": "number",
"minimum": 0.0,
"maximum": 1.0
},
"minItems": 3,
"maxItems": 3,
"default": [ 0.0, 0.0, 0.0 ],
"description": "The emissive color of the material.",
"gltf_detailedDescription": "The RGB components of the emissive color of the material. These values are linear. If an emissiveTexture is specified, this value is multiplied with the texel values."
},
"alphaMode": {
"default": "OPAQUE",
"description": "The alpha rendering mode of the material.",
"gltf_detailedDescription": "The material's alpha rendering mode enumeration specifying the interpretation of the alpha value of the main factor and texture.",
"anyOf": [
{
"enum": [ "OPAQUE" ],
"description": "The alpha value is ignored and the rendered output is fully opaque."
},
{
"enum": [ "MASK" ],
"description": "The rendered output is either fully opaque or fully transparent depending on the alpha value and the specified alpha cutoff value."
},
{
"enum": [ "BLEND" ],
"description": "The alpha value is used to composite the source and destination areas. The rendered output is combined with the background using the normal painting operation (i.e. the Porter and Duff over operator)."
},
{
"type": "string"
}
]
},
"alphaCutoff": {
"type": "number",
"minimum": 0.0,
"default": 0.5,
"description": "The alpha cutoff value of the material.",
"gltf_detailedDescription": "Specifies the cutoff threshold when in `MASK` mode. If the alpha value is greater than or equal to this value then it is rendered as fully opaque, otherwise, it is rendered as fully transparent. A value greater than 1.0 will render the entire material as fully transparent. This value is ignored for other modes."
},
"doubleSided": {
"type": "boolean",
"default": False,
"description": "Specifies whether the material is double sided.",
"gltf_detailedDescription": "Specifies whether the material is double sided. When this value is false, back-face culling is enabled. When this value is true, back-face culling is disabled and double sided lighting is enabled. The back-face must have its normals reversed before the lighting equation is evaluated."
}
},
"dependencies" : {
"alphaCutoff" : ["alphaMode"]
}
}
class Node(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Node",
"type": "object",
"description": "A node in the node hierarchy. When the node contains `skin`, all `mesh.primitives` must contain `JOINTS_0` and `WEIGHTS_0` attributes. A node can have either a `matrix` or any combination of `translation`/`rotation`/`scale` (TRS) properties. TRS properties are converted to matrices and postmultiplied in the `T * R * S` order to compose the transformation matrix; first the scale is applied to the vertices, then the rotation, and then the translation. If none are provided, the transform is the identity. When a node is targeted for animation (referenced by an animation.channel.target), only TRS properties may be present; `matrix` will not be present.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"camera": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the camera referenced by this node."
},
"children": {
"type": "array",
"description": "The indices of this node's children.",
"items": {
"$ref": "glTFid.schema.json"
},
"uniqueItems": True,
"minItems": 1
},
"skin": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the skin referenced by this node.",
"gltf_detailedDescription": "The index of the skin referenced by this node. When a skin is referenced by a node within a scene, all joints used by the skin must belong to the same scene."
},
"matrix": {
"type": "array",
"description": "A floating-point 4x4 transformation matrix stored in column-major order.",
"items": {
"type": "number"
},
"minItems": 16,
"maxItems": 16,
"default": [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0 ],
"gltf_detailedDescription": "A floating-point 4x4 transformation matrix stored in column-major order.",
"gltf_webgl": "`uniformMatrix4fv()` with the transpose parameter equal to false"
},
"mesh": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the mesh in this node."
},
"rotation": {
"type": "array",
"description": "The node's unit quaternion rotation in the order (x, y, z, w), where w is the scalar.",
"items": {
"type": "number",
"minimum": -1.0,
"maximum": 1.0
},
"minItems": 4,
"maxItems": 4,
"default": [ 0.0, 0.0, 0.0, 1.0 ]
},
"scale": {
"type": "array",
"description": "The node's non-uniform scale, given as the scaling factors along the x, y, and z axes.",
"items": {
"type": "number"
},
"minItems": 3,
"maxItems": 3,
"default": [ 1.0, 1.0, 1.0 ]
},
"translation": {
"type": "array",
"description": "The node's translation along the x, y, and z axes.",
"items": {
"type": "number"
},
"minItems": 3,
"maxItems": 3,
"default": [ 0.0, 0.0, 0.0 ]
},
"weights": {
"type": "array",
"description": "The weights of the instantiated Morph Target. Number of elements must match number of Morph Targets of used mesh.",
"minItems": 1,
"items": {
"type": "number"
}
},
"name": { },
"extensions": { },
"extras": { }
},
"dependencies": {
"weights": [ "mesh" ],
"skin": [ "mesh" ]
},
"not": {
"anyOf": [
{ "required": [ "matrix", "translation" ] },
{ "required": [ "matrix", "rotation" ] },
{ "required": [ "matrix", "scale" ] }
]
}
}
class MeshPrimitive(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Mesh Primitive",
"type": "object",
"description": "Geometry to be rendered with the given material.",
"allOf": [ { "$ref": "glTFProperty.schema.json" } ],
"properties": {
"attributes": {
"type": "object",
"description": "A dictionary object, where each key corresponds to mesh attribute semantic and each value is the index of the accessor containing attribute's data.",
"minProperties": 1,
"additionalProperties": {
"$ref": "glTFid.schema.json"
}
},
"indices": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the accessor that contains the indices.",
"gltf_detailedDescription": "The index of the accessor that contains mesh indices. When this is not defined, the primitives should be rendered without indices using `drawArrays()`. When defined, the accessor must contain indices: the `bufferView` referenced by the accessor should have a `target` equal to 34963 (ELEMENT_ARRAY_BUFFER); `componentType` must be 5121 (UNSIGNED_BYTE), 5123 (UNSIGNED_SHORT) or 5125 (UNSIGNED_INT), the latter may require enabling additional hardware support; `type` must be `\"SCALAR\"`. For triangle primitives, the front face has a counter-clockwise (CCW) winding order. Values of the index accessor must not include the maximum value for the given component type, which triggers primitive restart in several graphics APIs and would require client implementations to rebuild the index buffer. Primitive restart values are disallowed and all index values must refer to actual vertices. As a result, the index accessor's values must not exceed the following maxima: BYTE `< 255`, UNSIGNED_SHORT `< 65535`, UNSIGNED_INT `< 4294967295`."
},
"material": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the material to apply to this primitive when rendering."
},
"mode": {
"description": "The type of primitives to render.",
"default": 4,
"gltf_detailedDescription": "The type of primitives to render. All valid values correspond to WebGL enums.",
"anyOf": [
{
"enum": [ 0 ],
"description": "POINTS",
"type": "integer"
},
{
"enum": [ 1 ],
"description": "LINES",
"type": "integer"
},
{
"enum": [ 2 ],
"description": "LINE_LOOP",
"type": "integer"
},
{
"enum": [ 3 ],
"description": "LINE_STRIP",
"type": "integer"
},
{
"enum": [ 4 ],
"description": "TRIANGLES",
"type": "integer"
},
{
"enum": [ 5 ],
"description": "TRIANGLE_STRIP",
"type": "integer"
},
{
"enum": [ 6 ],
"description": "TRIANGLE_FAN",
"type": "integer"
},
{
"type": "integer"
}
]
},
"targets": {
"type": "array",
"description": "An array of Morph Targets, each Morph Target is a dictionary mapping attributes (only `POSITION`, `NORMAL`, and `TANGENT` supported) to their deviations in the Morph Target.",
"items": {
"type": "object",
"minProperties": 1,
"additionalProperties": {
"$ref": "glTFid.schema.json"
},
"description": "A dictionary object specifying attributes displacements in a Morph Target, where each key corresponds to one of the three supported attribute semantic (`POSITION`, `NORMAL`, or `TANGENT`) and each value is the index of the accessor containing the attribute displacements' data."
},
"minItems": 1
},
"extensions": { },
"extras": { }
},
"gltf_webgl": "`drawElements()` and `drawArrays()`",
"required": [ "attributes" ]
}
class Mesh(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Mesh",
"type": "object",
"description": "A set of primitives to be rendered. A node can contain one mesh. A node's transform places the mesh in the scene.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"primitives": {
"type": "array",
"description": "An array of primitives, each defining geometry to be rendered with a material.",
"items": {
"$ref": "mesh.primitive.schema.json"
},
"minItems": 1
},
"weights": {
"type": "array",
"description": "Array of weights to be applied to the Morph Targets.",
"items": {
"type": "number"
},
"minItems": 1
},
"name": { },
"extensions": { },
"extras": { }
},
"required": [ "primitives" ]
}
class Animation(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Animation",
"type": "object",
"description": "A keyframe animation.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"channels": {
"type": "array",
"description": "An array of channels, each of which targets an animation's sampler at a node's property. Different channels of the same animation can't have equal targets.",
"items": {
"$ref": "animation.channel.schema.json"
},
"minItems": 1
},
"samplers": {
"type": "array",
"description": "An array of samplers that combines input and output accessors with an interpolation algorithm to define a keyframe graph (but not its target).",
"items": {
"$ref": "animation.sampler.schema.json"
},
"minItems": 1
},
"name": { },
"extensions": { },
"extras": { }
},
"required": [ "channels", "samplers" ]
}
class Buffer(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Buffer",
"type": "object",
"description": "A buffer points to binary geometry, animation, or skins.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"uri": {
"type": "string",
"description": "The uri of the buffer.",
"format": "uriref",
"gltf_detailedDescription": "The uri of the buffer. Relative paths are relative to the .gltf file. Instead of referencing an external file, the uri can also be a data-uri.",
"gltf_uriType": "application"
},
"byteLength": {
"type": "integer",
"description": "The length of the buffer in bytes.",
"minimum": 1
},
"name": { },
"extensions": { },
"extras": { }
},
"required": [ "byteLength" ]
}
class BufferView(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Buffer View",
"type": "object",
"description": "A view into a buffer generally representing a subset of the buffer.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"buffer": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the buffer."
},
"byteOffset": {
"type": "integer",
"description": "The offset into the buffer in bytes.",
"minimum": 0,
"default": 0
},
"byteLength": {
"type": "integer",
"description": "The total byte length of the buffer view.",
"minimum": 1
},
"byteStride": {
"type": "integer",
"description": "The stride, in bytes.",
"minimum": 4,
"maximum": 252,
"multipleOf": 4,
"gltf_detailedDescription": "The stride, in bytes, between vertex attributes. When this is not defined, data is tightly packed. When two or more accessors use the same bufferView, this field must be defined.",
"gltf_webgl": "`vertexAttribPointer()` stride parameter"
},
"target": {
"description": "The target that the GPU buffer should be bound to.",
"gltf_webgl": "`bindBuffer()`",
"anyOf": [
{
"enum": [ 34962 ],
"description": "ARRAY_BUFFER",
"type": "integer"
},
{
"enum": [ 34963 ],
"description": "ELEMENT_ARRAY_BUFFER",
"type": "integer"
},
{
"type": "integer"
}
]
},
"name": { },
"extensions": { },
"extras": { }
},
"required": [ "buffer", "byteLength" ]
}
class Camera(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Camera",
"type": "object",
"description": "A camera's projection. A node can reference a camera to apply a transform to place the camera in the scene.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"orthographic": {
"allOf": [ { "$ref": "camera.orthographic.schema.json" } ],
"description": "An orthographic camera containing properties to create an orthographic projection matrix."
},
"perspective": {
"allOf": [ { "$ref": "camera.perspective.schema.json" } ],
"description": "A perspective camera containing properties to create a perspective projection matrix."
},
"type": {
"description": "Specifies if the camera uses a perspective or orthographic projection.",
"gltf_detailedDescription": "Specifies if the camera uses a perspective or orthographic projection. Based on this, either the camera's `perspective` or `orthographic` property will be defined.",
"anyOf": [
{
"enum": [ "perspective" ]
},
{
"enum": [ "orthographic" ]
},
{
"type": "string"
}
]
},
"name": { },
"extensions": { },
"extras": { }
},
"required": [ "type" ],
"not": {
"required": [ "perspective", "orthographic" ]
}
}
class Accessor(Schema):
schema = {
"$schema": "http://json-schema.org/draft-04/schema",
"title": "Accessor",
"type": "object",
"description": "A typed view into a bufferView. A bufferView contains raw binary data. An accessor provides a typed view into a bufferView or a subset of a bufferView similar to how WebGL's `vertexAttribPointer()` defines an attribute in a buffer.",
"allOf": [ { "$ref": "glTFChildOfRootProperty.schema.json" } ],
"properties": {
"bufferView": {
"allOf": [ { "$ref": "glTFid.schema.json" } ],
"description": "The index of the bufferView.",
"gltf_detailedDescription": "The index of the bufferView. When not defined, accessor must be initialized with zeros; `sparse` property or extensions could override zeros with actual values."
},
"byteOffset": {
"type": "integer",
"description": "The offset relative to the start of the bufferView in bytes.",
"minimum": 0,
"default": 0,
"gltf_detailedDescription": "The offset relative to the start of the bufferView in bytes. This must be a multiple of the size of the component datatype.",
"gltf_webgl": "`vertexAttribPointer()` offset parameter"
},
"componentType": {
"description": "The datatype of components in the attribute.",
"gltf_detailedDescription": "The datatype of components in the attribute. All valid values correspond to WebGL enums. The corresponding typed arrays are `Int8Array`, `Uint8Array`, `Int16Array`, `Uint16Array`, `Uint32Array`, and `Float32Array`, respectively. 5125 (UNSIGNED_INT) is only allowed when the accessor contains indices, i.e., the accessor is only referenced by `primitive.indices`.",
"gltf_webgl": "`vertexAttribPointer()` type parameter",
"anyOf": [
{
"enum": [ 5120 ],
"description": "BYTE",
"type": "integer"
},
{
"enum": [ 5121 ],
"description": "UNSIGNED_BYTE",
"type": "integer"
},
{
"enum": [ 5122 ],
"description": "SHORT",
"type": "integer"
},
{
"enum": [ 5123 ],
"description": "UNSIGNED_SHORT",
"type": "integer"
},
{
"enum": [ 5125 ],
"description": "UNSIGNED_INT",
"type": "integer"
},
{
"enum": [ 5126 ],
"description": "FLOAT",
"type": "integer"
},
{
"type": "integer"
}
]
},
"normalized": {
"type": "boolean",
"description": "Specifies whether integer data values should be normalized.",
"default": False,
"gltf_detailedDescription": "Specifies whether integer data values should be normalized (`true`) to [0, 1] (for unsigned types) or [-1, 1] (for signed types), or converted directly (`false`) when they are accessed. This property is defined only for accessors that contain vertex attributes or animation output data.",
"gltf_webgl": "`vertexAttribPointer()` normalized parameter"
},
"count": {
"type": "integer",
"description": "The number of attributes referenced by this accessor.",
"minimum": 1,
"gltf_detailedDescription": "The number of attributes referenced by this accessor, not to be confused with the number of bytes or number of components."
},
"type": {
"description": "Specifies if the attribute is a scalar, vector, or matrix.",
"anyOf": [
{
"enum": [ "SCALAR" ]
},
{
"enum": [ "VEC2" ]
},
{
"enum": [ "VEC3" ]
},
{
"enum": [ "VEC4" ]
},
{
"enum": [ "MAT2" ]
},
{
"enum": [ "MAT3" ]
},
{
"enum": [ "MAT4" ]
},
{
"type": "string"
}
]
},
"max": {
"type": "array",
"description": "Maximum value of each component in this attribute.",
"items": {
"type": "number"
},
"minItems": 1,
"maxItems": 16,
"gltf_detailedDescription": "Maximum value of each component in this attribute. Array elements must be treated as having the same data type as accessor's `componentType`. Both min and max arrays have the same length. The length is determined by the value of the type property; it can be 1, 2, 3, 4, 9, or 16.\n\n`normalized` property has no effect on array values: they always correspond to the actual values stored in the buffer. When accessor is sparse, this property must contain max values of accessor data with sparse substitution applied."
},
"min": {
"type": "array",
"description": "Minimum value of each component in this attribute.",
"items": {
"type": "number"
},
"minItems": 1,
"maxItems": 16,
"gltf_detailedDescription": "Minimum value of each component in this attribute. Array elements must be treated as having the same data type as accessor's `componentType`. Both min and max arrays have the same length. The length is determined by the value of the type property; it can be 1, 2, 3, 4, 9, or 16.\n\n`normalized` property has no effect on array values: they always correspond to the actual values stored in the buffer. When accessor is sparse, this property must contain min values of accessor data with sparse substitution applied."
},
"sparse": {
"allOf": [ { "$ref": "accessor.sparse.schema.json" } ],
"description": "Sparse storage of attributes that deviate from their initialization value."
},
"name": { },
"extensions": { },
"extras": { }
},
"dependencies": {
"byteOffset": [ "bufferView" ]
},
"required": [ "componentType", "count", "type" ]
}
def get_schema_based_list(vars_dict):
res = list()
names = vars_dict.keys()
for n in names:
o = vars_dict[n]
if inspect.isclass(o) and issubclass(o, Schema):
if o == Schema:
continue
res.append(o)
return res
|
[
"andreytata@ex.ua"
] |
andreytata@ex.ua
|
7013dae4b744ed9423390c43b9f1563544a5bd17
|
5f3fff9854b6e2c52b7dc28c4854387867a5f761
|
/GDog/681. Next Closest Time.py
|
81064a5757776daa0c9ddbe7cd0e00af16edf3af
|
[] |
no_license
|
taochenlei/leetcode_algorithm
|
444cc2204676fd703ae5f0f976fabd74868c2c98
|
b1a1d965ea99586e03fd975afca8815cd47a3c0f
|
refs/heads/master
| 2020-06-06T10:20:40.231897
| 2019-04-28T11:55:21
| 2019-04-28T11:55:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
class Solution:
def nextClosestTime(self, time: str) -> str:
h, m = time.split(":")
origin = set(time)
i = int(h)
j = int(m)
while True:
j += 1
if j == 60:
j = 0
i += 1
if i == 24:
i = 0
str_i = str(i) if i > 9 else "0" + str(i)
str_j = str(j) if j > 9 else "0" + str(j)
cur_time = str_i + ":" + str_j
if set(cur_time).issubset(origin):
return cur_time
|
[
"noreply@github.com"
] |
taochenlei.noreply@github.com
|
2801eeab4a6e20f5849681320dd45c0e879000c8
|
09eb1bf36a025af39dd10d28cf6fe94bacd4ce7f
|
/tests/test_core.py
|
b8abdc61c59b14e2863a3f3fc34caf0b26e54780
|
[
"MIT"
] |
permissive
|
ExpertsTreinamentos/iron-erp
|
a538c34e96e8ed04142033815debe4cfc2d82d56
|
ca6d018eea8daef98d87b676beca371de92e2df2
|
refs/heads/master
| 2020-04-08T20:15:29.085835
| 2018-12-05T21:40:06
| 2018-12-05T21:40:06
| 159,691,047
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
from datetime import date
from django.db.utils import IntegrityError
from django.utils import timezone
import pytest
pytestmark = pytest.mark.django_db
from iron.core import models
def test_cadastros_basicos():
curso = models.Curso.objects.create(nome='Curso', carga_horaria=20)
prof = models.Professor.objects.create(nome='Professor')
turma = models.Turma.objects.create(
curso = curso,
professor = prof,
data_inicio = date(2000,1,1),
vagas = 10,
)
aluno1 = models.Aluno.objects.create(nome='Aluno 1')
inscricao1 = models.Inscricao.objects.create(
turma = turma,
aluno = aluno1,
data_entrada = timezone.now().date(),
)
aluno2 = models.Aluno.objects.create(nome='Aluno 2')
inscricao2 = models.Inscricao.objects.create(
turma = turma,
aluno = aluno2,
data_entrada = timezone.now().date(),
)
# consultas
assert turma == curso.turmas.all()[0]
assert turma == prof.turmas.all()[0]
assert turma.inscricoes.count() == 2
assert turma == models.Turma.objects.filter(inscricoes__aluno=aluno1).get()
def test_cadastro_aluno():
aluno1 = models.Aluno.objects.create(nome='Aluno 1')
aluno2 = models.Aluno.objects.create(nome='Aluno 2')
aluno3 = models.Aluno.objects.create(nome='Aluno 3', cpf='1234')
with pytest.raises(IntegrityError):
aluno4 = models.Aluno.objects.create(nome='Aluno 4', cpf='1234')
|
[
"david@kwast.net"
] |
david@kwast.net
|
106b8ec454d21701a83da39b2bcf05c80452ce3d
|
7fb9d4e18e915e4be50a4f387586eee01c2c6d74
|
/venv/bin/flask
|
ce2b74cc4df4fa54c21d939cea2b4831346732ff
|
[] |
no_license
|
Dezynre/project19
|
08825d583f340e0b394495f6549092dd717c1e99
|
22f51ab0157100e65818b89914936f9f42157337
|
refs/heads/master
| 2023-01-02T20:06:45.783947
| 2020-10-31T08:17:48
| 2020-10-31T08:17:48
| 308,834,896
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
#!/home/super-user/Desktop/WEB_PROJECTS/PUBLIC_BLOG/venv/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"dezynre254@gmail.com"
] |
dezynre254@gmail.com
|
|
bc0d107eda0134dbc9d7e3850953956f9a76fa90
|
acc45babeb8c7d49b4845b24b3be5b55c4dff2ba
|
/motif_mark.py
|
9f4bae9e93a6de06dcb5a7876c1eb6089374ea6d
|
[] |
no_license
|
ryanjdecourcy/motif-mark
|
3cacce10213685d97dff4647a28e4e697d33cbee
|
e2bf4716fc5243d5b86c92f51a2c36dfcf232188
|
refs/heads/main
| 2023-03-13T17:19:08.439642
| 2021-03-06T03:54:01
| 2021-03-06T03:54:01
| 335,823,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,901
|
py
|
#!/usr/bin/env python
# Importing modules
# in bash terminal run "pip install pycairo"
import cairo
import re
import argparse
# Setting up argparse inputs
parser = argparse.ArgumentParser(description="'-fa' to specify fasta file; '-m' to specify motif mark file")
parser.add_argument("-fa", "--fasta_file", help="input file with fasta sequences", required = True)
parser.add_argument("-m", "--motif_mark_file", help="input file with motif marks", required = True)
args = parser.parse_args()
fasta_input = args.fasta_file
motif_input = args.motif_mark_file
###############################################################
################# BUILDING MOTIF CONVERSION FUNCTION BELOW
def motif_conversion(amotif):
reg_building = ''
for x in range(len(amotif)):
if (len(vIUPAC[amotif[x]])) == 1:
reg_building += (vIUPAC[amotif[x]][0])
elif (len(vIUPAC[amotif[x]])) > 1:
reg_building += "["
for y in range(len(vIUPAC[amotif[x]])):
reg_building += (vIUPAC[amotif[x]][y])
reg_building += "]"
return reg_building
########### END OF MOTIF CONVERSION FUNCTION
###############################################################
######## Start of exon_placer function
def exon_placer(aread):
exon_regex = "[A-Z]+"
test_exon_indices = [match.start() for match in re.finditer(exon_regex, aread)]
test_exon = re.findall(exon_regex, aread)
anexon_reg_found = re.search(exon_regex, aread)
anexon = anexon_reg_found[0]
exon_start_indices = [match.start() for match in re.finditer(exon_regex, aread)]
exon_start = exon_start_indices[0]
exon_end = exon_start + len(anexon)
# list to output: the exon sequence, start position and end position
read_exon_start_end = [anexon, exon_start, exon_end]
return read_exon_start_end
# so exon_placer fn() outputs a list of 3 elements:
# the exon found, the start position and the end position
###### End of exon_placer function
#################################################################
# Dict to translate ambiguous IUPAC nt symbols
# Credit to wikipedia: https://en.wikipedia.org/wiki/Nucleic_acid_notation)
# Although the dictionary was expanded to include lowercase nt symbols
vIUPAC = {
"A":["A" ],
"C":[ "C" ],
"G":[ "G" ],
"T":[ "T"],
"U":[ "U"],
"W":["A", "T"],
"S":[ "C","G" ],
"M":["A","C" ],
"K":[ "G","T"],
"R":["A", "G", ],
"Y":[ "C", "T"],
"B":[ "C","G","T"],
"D":["A", "G","T"],
"H":["A","C", "T"],
"V":["A","C","G", ],
"N":["A","C","G","T"],
"Z":[ ],
"a":["a" ],
"c":[ "c" ],
"g":[ "g" ],
"t":[ "t"],
"u":[ "u"],
"w":["a", "t"],
"s":[ "c","g" ],
"m":["a","c" ],
"k":[ "g","t"],
"r":["a", "g", ],
"y":[ "c", "t"],
"b":[ "c","g","t"],
"d":["a", "g","t"],
"h":["a","c", "t"],
"v":["a","c","g", ],
"n":["a","c","g","t"],
"z":[ ],
}
########################################################################
# Building list of motifs from input file
motif_list = []
with open(motif_input, "r") as mo:
for line in mo:
motif_list.append(line.replace('\n',''))
########################################################################
########## USE MOTIF CONV. FN() TO MAKE DICTIONARY OF REGEX
motif_regex_dict = {}
for m in range(len(motif_list)):
motif_regex_dict[motif_list[m]] = motif_conversion(motif_list[m])
# motif_regex_dict has keys with original motifs (with ambiguous IUPAC symbols)
# and values with their corresponding reg. expressions
# built with motif_conversion() fn previously defined
########################################################################
# Creating read and header lists to work from
current_read_list = []
all_reads = []
all_read_headers = []
with open(fasta_input, "r") as fa:
for line in fa:
if ">" not in line:
current_read_list.append(line.replace('\n',''))
else:
all_reads.append(''.join(current_read_list))
current_read_list = []
all_read_headers.append(line.replace('\n',''))
all_reads.append(''.join(current_read_list))
# removing first (empty) read
all_reads.pop(0)
# Get list of just the gene symbols of the headers from the FASTA header lines
header_list = [x.split()[0][1:] for x in all_read_headers]
##########################################################
##########################################################
# OUTPUT TO GRAPHIC AND LOOPING THROUGH ALL READS
# FOR PROCESSING STARTS BELOW HERE
##########################################################
# Setting up cairo surface
num_reads = len(all_reads)
width, height = 1000, (500 * num_reads) + 200
# Setting up output to write to [file prefix of fasta input].svg
surface = cairo.SVGSurface( ("%s.svg" % (fasta_input)), width, height)
ctx = cairo.Context(surface)
# Adding text to annotate motif output
# Setting color, size and style of font
ctx.set_source_rgb(0, 0, 0)
ctx.set_font_size(15)
ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
# Getting dimensions of font output for centering
(x, y, width, height, dx, dy) = ctx.text_extents("Longer vertical bars indicate start of motifs for discerning ambiguous overlapping sites")
# Positioning and outputting text
ctx.move_to( (500 - (width / 2)), ( (500 * num_reads) + 100 - height))
ctx.show_text("Longer vertical bars indicate start of motifs for discerning ambiguous overlapping sites")
# Looping through all the FASTA records, using variable rec (for record)
for rec in range(len(all_reads)):
aread = all_reads[rec]
aheader = all_read_headers[rec]
###############################################################
# Getting indices from reads - where motifs start
# Adding to empty (temporary) dictionary
aread_motif_indices = {}
# Looping through motif_list to check through each motif - adding all to temporary dictionary
for x in range(len(motif_list)):
current_motif_indices = [match.start() for match in re.finditer(motif_regex_dict[motif_list[x]], aread)]
aread_motif_indices[motif_list[x]] = current_motif_indices
# Above outputs "aread_motif_indices" dictionary with lists as indices of original motifs
# empty lists indicate that the motif was not found in the read at all
###################################################################
########### CREATING GRAPHIC WITH PYCAIRO BELOW
########### Starting with defining functions
#
# Note:
## All positions and sizes in graphic output are scaled
# to the size of the graphic output, in proportion to the actual genetic information
x = 0
#####################################################3
# Defining color functions
def intron_exon_col():
ctx.set_source_rgb(0.541176470588235, 0.772549019607843, 1.0)
def red():
ctx.set_source_rgb((247/255), (35/255), (35/255))
def orange():
ctx.set_source_rgb((255/255), (140/255), 0)
def yellow():
ctx.set_source_rgb((255/255), (234/255), 0)
def green():
ctx.set_source_rgb((77/255), (255/255), 0)
def indigo():
ctx.set_source_rgb((85/255), (43/255), (117/255))
def violet():
ctx.set_source_rgb((252/255), (50/255), (209/255))
def dark_green():
ctx.set_source_rgb((46/255), (102/255), (62/255))
def navy_blue():
ctx.set_source_rgb((10/255), (92/255), (170/255))
def burgundy():
ctx.set_source_rgb((128/255), (60/255), (60/255))
def purple():
ctx.set_source_rgb((158/255), (21/255), (142/255))
def black():
ctx.set_source_rgb(0, 0, 0)
######################################################
### Setting up combination function to iterate through color-picking
def col_fn_list(x):
if x == 0:
intron_exon_col()
elif x == 1:
red()
elif x == 2:
orange()
elif x == 3:
yellow()
elif x == 4:
green()
elif x == 5:
indigo()
elif x == 6:
violet()
elif x == 7:
dark_green()
elif x == 8:
navy_blue()
elif x == 9:
burgundy()
elif x == 10:
purple()
elif x == 11:
black()
#########################################################
# col_fn_list() function chooses color based on number - pulls from previously defined color functions
# 0: for intron/exon
# 1 - 10 for colors of motifs
# 11: black
######################################################
### Drawing intron to overlay exon on
# Variables for placement
# Intron dimensions
intron_tl_x = 0
intron_tl_y = 150 + (rec * 500)
intron_width = 1000
intron_height = 10
# Drawing intron
col_fn_list(0)
ctx.rectangle(intron_tl_x, intron_tl_y, intron_width, intron_height)
ctx.fill()
# Getting positions for exon
# test_exon_placer_output is a list containing the following: exon sequence, start and end (order-specific)
test_exon_placer_output = (exon_placer(aread))
start_pct = (test_exon_placer_output[1] / len(aread)) * 1000
end_pct = ( (test_exon_placer_output[2] - test_exon_placer_output[1]) / len(aread) ) * 1000
# Exon
# Variables for placement
y_exon = 135 + (rec * 500)
vert_len_exon = 40
# Drawing exon
col_fn_list(0)
ctx.rectangle(start_pct, y_exon, end_pct, vert_len_exon)
ctx.fill()
######################################################
### Adding header of read to top of output
# Variables for placement
# header_x_placement is picked based of text_extents, to center text in output
header_y_placement = 25 + (rec * 500)
# Making header text - only need gene name -
# splitting on whitespace and leaving out the leading ">" character
gene_name = aheader.split()[0][1:]
# Setting color, size, style of font
col_fn_list(11)
ctx.set_font_size(20)
ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
# Getting dimensions of text in order to place with respect to center in .svg graphic output
(x, y, width, height, dx, dy) = ctx.text_extents("Gene Symbol: " + gene_name)
# Placing and outputting text
ctx.move_to( (500 - (width / 2)), header_y_placement)
ctx.show_text("Gene Symbol: " + gene_name)
######################################################
### Making legend of different motif colors (max of 10)
### Building working_motif_list
# Array containing:
# list of motif, and corresponding list (in 2nd dimension) of position of occurrences
# built from previous dictionary -
# need list instead of dict to keep ordered (varying versions of python are different in this regard)
working_motif_list = []
for (k, v) in aread_motif_indices.items():
if len(v) > 0:
alist = [k, v]
working_motif_list.append(alist)
#####################################################################
# Making motif legend
# Variables for placement
legend_text_x = 50
legend_text_y = 280 + (rec * 500)
# Color, size, style of font
col_fn_list(11)
ctx.set_font_size(15)
ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
# Positioning and outputting font
ctx.move_to(legend_text_x, legend_text_y)
ctx.show_text("Motif Legend")
# Making motif legend outline
# based on length of working_motif_list
# how many motifs are found in this sequence
# Setting dimensions of outline
left = 50
right = 175
top = 290 + (rec * 500)
bottom = top + (len(working_motif_list) * 25 )
# Setting color, line width, choosing path and drawing to .svg output
col_fn_list(11)
ctx.set_line_width(1)
ctx.move_to(left, top)
ctx.line_to(right, top)
ctx.line_to(right, bottom)
ctx.line_to(left, bottom)
ctx.line_to(left, top)
ctx.close_path()
ctx.stroke()
###########################
# Setting variable x for further use positioning individual genes in output graphic
x = 0 + (rec * 500)
# Setting loop - variable "m" for: 1 motif, and associated information
for m in range(len(working_motif_list)):
# Setting color; moving through list of colors in pre-defined swatch
col_fn_list(m + 1)
motif_len = len(working_motif_list[m][0])
curr_motif_width = (motif_len / len(aread)) * 1000
# Inner loop to move through each of the occurrences of finding a motif
for p in range(len(working_motif_list[m][1])):
# Setting position for motif start and scaling to size of graphic output
raw_posn = working_motif_list[m][1][p]
curr_motif_posn = (raw_posn / len(aread) ) * 1000
# Setting dimensions for motif
y_top_main = 130 + (rec * 500)
y_main_length = 50
y_top_starting = 100 + (rec * 500)
y_starting_length = 110
# Drawing motifs, composed of two parts:
# Main portion - the entire length of the motif is represented by this rectangle
ctx.rectangle(curr_motif_posn, y_top_main, curr_motif_width, y_main_length)
# "Starting" portion - the exact start of the motif is marked by this thin "line" (also a rectangle)
ctx.rectangle(curr_motif_posn, y_top_starting, 1, y_starting_length)
# Making swatches of color (squares) in the key legend,
# to annotate motif colors with sequences from input
# Variables for placement
legend_col_x = 60
# Drawing squares to output
ctx.rectangle(legend_col_x, (300 + x), 10, 10)
ctx.fill()
# Annotating legend with motif sequences:
# Variables for placement
motif_seqs_x = 85
# Choosing color, size and style of font
col_fn_list(11)
ctx.set_font_size(10)
ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
# Writing out individual motifs to their corresponding color swatches in the legend
# Moving through system in a loop
ctx.move_to(motif_seqs_x, (310 + x) )
ctx.show_text(working_motif_list[m][0])
# Incrementing x (used in placement)
x += 15
|
[
"ryanjdecourcy@gmail.com"
] |
ryanjdecourcy@gmail.com
|
707db000c00d3186f450c3d7d0744a961b6c3e59
|
d9ca319514ffe757ab36e87597f562b81153aefe
|
/estimator/TrainValTensorBoard.py
|
3028fd4759f4b4a693b510da10656d7c3076d1a2
|
[] |
no_license
|
josecyc/PUBG
|
0f9ebde4dcc147ab52db0d237aa084121b898e5b
|
1b3cd74e101e6d56fd4099313a9e4dede91d193a
|
refs/heads/master
| 2020-04-27T22:24:18.404145
| 2019-03-11T18:15:41
| 2019-03-11T18:15:41
| 174,735,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
import os
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.python.eager import context
from time import gmtime, strftime
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
st = strftime("training %a_%d_%b_%Y_%H:%M", gmtime())
sv = strftime("validation %a_%d_%b_%Y_%H:%M", gmtime())
self.val_log_dir = os.path.join(log_dir, sv)
training_log_dir = os.path.join(log_dir, st)
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
def set_model(self, model):
if context.executing_eagerly():
self.val_writer = tf.contrib.summary.create_file_writer(self.val_log_dir)
else:
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def _write_custom_summaries(self, step, logs=None):
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if 'val_' in k}
if context.executing_eagerly():
with self.val_writer.as_default(), tf.contrib.summary.always_record_summaries():
for name, value in val_logs.items():
tf.contrib.summary.scalar(name, value.item(), step=step)
else:
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, step)
self.val_writer.flush()
logs = {k: v for k, v in logs.items() if not 'val_' in k}
super(TrainValTensorBoard, self)._write_custom_summaries(step, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
|
[
"jcruz-y-@e1z4r13p9.42.us.org"
] |
jcruz-y-@e1z4r13p9.42.us.org
|
9caa5ee05bebb29025bf46c0402d861d527d04ca
|
76ee0f818060fa568b00b3eebf8ab65743820069
|
/polls/views.py
|
f955bb01b6b83665363d93246ee1383a1250e46a
|
[] |
no_license
|
seal031/DjangoProject
|
b592685616d7511dccd5a9826e08e1fa67655d3c
|
7c21affaf3a670a8e3b0a8823382cef8f89db492
|
refs/heads/master
| 2021-08-15T22:44:21.198212
| 2017-11-18T13:54:00
| 2017-11-18T13:54:00
| 111,088,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from .models import Question
from django.shortcuts import get_object_or_404, render
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
|
[
"87749356@qq.com"
] |
87749356@qq.com
|
8bdbcae864b577faa345cd9b564655bdc7663ecc
|
8fe3e891cbe304b1a0261dbcc3cb95e4ecbddd90
|
/bettron/soccer/views.py
|
36f51755b19c5c89efb4e3ec1d5e5ba1b032eb2b
|
[] |
no_license
|
jtorresr1/BETTRON
|
e1be746b0fbb6f855c86fad5793c453519a59574
|
2e7d8b93945b391952e0714ca03085389d75f02e
|
refs/heads/master
| 2020-08-08T05:39:38.023916
| 2019-10-08T19:39:27
| 2019-10-08T19:39:27
| 213,737,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,665
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.db.models import Count
import csv
from soccer.models import Ligas,Partidos,Equipos
from soccer.forms import *
from soccer.utils import *
import os
import time
from datetime import datetime,timedelta
from django.template.loader import get_template
from selenium import webdriver
def index(request):
return render(request, 'soccer/index.html')
def error(request):
return render(request,'soccer/error.html')
def apuestas(request):
context = {}
if request.method == 'POST':
Nacion = request.POST['Nacion']
league = request.POST['Liga']
HomeTeam = request.POST['HomeTeam']
AwayTeam = request.POST['AwayTeam']
if Equipos.objects.filter(Nombre_Equipo=HomeTeam).count() != 1 or Equipos.objects.filter(
Nombre_Equipo=AwayTeam).count() != 1:
return redirect('futbol:error')
probabilidadlocal, probabilidadvisita = promedio_goles(Nacion, league, HomeTeam, AwayTeam)
if probabilidadlocal == 0:
return HttpResponse("no tiene partidos previos para evaluar")
evaluaciongoles = 8
probloc, probvis = probabilidad_goles(probabilidadlocal, probabilidadvisita, evaluaciongoles)
context = context_goles(evaluaciongoles, probloc, probvis)
context['local_team'] = HomeTeam
context['away_team'] = AwayTeam
context['local'] = probloc
context['visita'] = probvis
return render(request, 'soccer/result.html', context)
else:
league_form = RegPais()
return render(request, 'soccer/apuestas.html', {'league_form':league_form})
def match_all(request):
today = datetime.now()
date_from_match = today.strftime("%Y-%m-%d")
lugar = '/home/jaime/partidos/' + date_from_match
if not os.path.exists(lugar):
os.mkdir(lugar)
Nacion = request.POST['Nacion']
league = request.POST['Liga']
HomeTeam = request.POST['HomeTeam']
AwayTeam = request.POST['AwayTeam']
if Equipos.objects.filter(Nombre_Equipo=HomeTeam).count() != 1 or Equipos.objects.filter(
Nombre_Equipo=AwayTeam).count() != 1:
return redirect('futbol:form_general')
probabilidadlocal, probabilidadvisita = promedio_goles(Nacion, league, HomeTeam, AwayTeam)
if probabilidadlocal == 0:
return redirect('futbol:form_general')
evaluaciongoles = 8
probloc, probvis = probabilidad_goles(probabilidadlocal, probabilidadvisita, evaluaciongoles)
context = context_goles(evaluaciongoles, probloc, probvis)
context['local_team'] = HomeTeam
context['away_team'] = AwayTeam
context['local'] = probloc
context['visita'] = probvis
template = get_template('forpdf.html')
html = template.render(context)
pdf = render_to_pdf('forpdf.html', context)
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
filename = str(HomeTeam) + "vs" + str(AwayTeam) + ".pdf"
content = "inline; filename="+filename
content = "attachment; filename="+filename
response['Content-Disposition'] = content
return response
return redirect('futbol:index')
def newmatch(request):
actualizar_csv()
context = {}
with open('/home/jaime/PycharmProjects/Test1/Test/partidos_ayer.csv', 'r') as files:
data = csv.reader(files)
for row in data:
nac = row[0].strip()
league = row[1].strip()
testeador = Ligas.objects.filter(Nacion=nac, Liga=league).count()
if testeador == 0:
for i in range(2):
if testeador == 1 or len(league.split("-")) == 1:
break
if len(league.split("-")) == 2:
leag, extra = league.split("-")
else:
leag, extra, extra1 = league.split("-")
leag = leag.strip()
league = leag
testeador = Ligas.objects.filter(Nacion=nac, Liga=league).count()
if testeador == 0:
continue
re = Ligas.objects.get(Nacion=nac, Liga=league)
value = Partidos(Cod_Liga=re, HomeTeam=row[2], AwayTeam=row[3], date_match=row[4],
GoalsHome=int(row[5]), GoalsAway=int(row[6]), Result=row[7])
try:
value.save()
except:
context["error"] = "Fallo"
context["exito"] = "Funciona"
return render(request, 'soccer/add_ligas.html', context)
def add_ligas(request):
context = {}
a = 1
with open('/home/jaime/PycharmProjects/Test1/Test/test1.csv', 'r') as files:
data = csv.reader(files)
for i in range(1):
next(data)
for row in data:
value = Ligas(Nacion=row[0], Liga=row[1])
try:
value.save()
except:
context["error"] = "Fallo"
a=0
break
if a:
context["exito"] = "Funciona"
return render(request,'soccer/add_ligas.html', context)
def add_matches(request):
context = {}
with open('/home/jaime/PycharmProjects/Test1/Test/partidos.csv', 'r') as files:
data = csv.reader(files)
for row in data:
codigo_league = int(row[0])
re = Ligas.objects.get(Codigo_liga=codigo_league)
print (str(row))
fecha = row[3]
dia, mes, anho = fecha.split(".")
dateoriginal = anho + "-" + mes + "-" + dia
value = Partidos(Cod_Liga=re, HomeTeam=row[1], AwayTeam=row[2], date_match=dateoriginal, GoalsHome=int(row[4]), GoalsAway=int(row[5]), Result=row[6])
try:
value.save()
except:
context["error"] = "Fallo"
context["exito"] = "Funciona"
return render(request, 'soccer/add_ligas.html', context)
def actualizar_matches(request):
with open('/home/jaime/PycharmProjects/Test1/Test/partidos_actualizador.csv', 'r') as files:
data = csv.reader(files)
for row in data:
codigo_league = int(row[0])
re = Ligas.objects.get(Codigo_liga=codigo_league)
print(str(row))
fecha = row[3]
dia, mes, anho = fecha.split(".")
dateoriginal = anho + "-" + mes + "-" + dia
value = Partidos(Cod_Liga=re, HomeTeam=row[1], AwayTeam=row[2], date_match=dateoriginal,
GoalsHome=int(row[4]), GoalsAway=int(row[5]), Result=row[6])
try:
value.save()
except:
return redirect('futbol:error')
return redirect('futbol:index')
def add_teams(request):
context = {}
with open('/home/jaime/PycharmProjects/Test1/Test/equipos.csv', 'r') as files:
data = csv.reader(files)
for row in data:
value = Equipos(Nombre_Equipo=row[0])
try:
value.save()
except:
context["error"] = "Se repitio"
context["exito"] = "Funciona"
return render(request, 'soccer/add_ligas.html', context)
def general(request):
if not os.path.exists('/home/jaime/PycharmProjects/Test1/Test/partidos_tomorrow.csv'):
get_match_tomorrow()
return redirect('futbol:form_general')
def formulario_general(request):
league_form = Checking()
return render(request, 'soccer/complete_form.html', {'league_form': league_form})
def download_pdfs(request):
manipulate()
return redirect('futbol:index')
|
[
"jaimetr97@gmail.com"
] |
jaimetr97@gmail.com
|
775808f38e23723aac6429aafe7a2baf9dfd7c3b
|
b6e31b6160efd3209badacf194b748dfbca3b2df
|
/blog/tests.py
|
99aea8f491c8cd1c815d91ccede6965e6a2fcc57
|
[] |
no_license
|
AntonChernov/wonderslab_test
|
ef68f86a4c5b4f8de9973c48ad0c18a22f3ffc8a
|
3b394f9830244d7e469cf3b8585f27fc62524e6d
|
refs/heads/master
| 2021-05-07T17:23:32.851068
| 2017-11-01T05:51:29
| 2017-11-01T05:51:29
| 108,728,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,704
|
py
|
from django.contrib.auth import get_user_model
from django.test import Client
from django.test import TestCase
# Create your tests here.
from django.urls import reverse_lazy
from blog.models import Post
class PostTests(TestCase):
def setUp(self):
self.user1 = get_user_model().objects.create(
username='test1',
email='test1@gmail.com',
first_name='Billy',
last_name='Dodson'
)
self.user1.set_password('test1pass')
self.user2 = get_user_model().objects.create(
username='test2',
email='test2@gmail.com',
first_name='Jilian',
last_name='Sommers'
)
self.user2.set_password('test2pass')
Post.objects.bulk_create(
[
Post(author=self.user1, title='Some title 1', text='some text 1'),
Post(author=self.user2, title='Some title 2', text='some text 2'),
Post(author=self.user2, title='Some title 3', text='some text 3'),
Post(author=self.user1, title='Some title 4', text='some text 4'),
Post(author=self.user1, title='Some title 5', text='some text 5'),
Post(author=self.user2, title='Some title 6', text='some text 6'),
]
)
self.client = Client()
def test_login(self):
response = self.client.post(reverse_lazy('login'), data={
'username': self.user1.username,
'password': 'test1pass'
})
self.assertEqual(response.status_code, 200)
def show_post_authenticate_user(self):
user = self.client.force_login(user=self.user1)
response = self.client.get(reverse_lazy('user_posts', kwargs={'pk': self.user1.id}))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse_lazy('user_posts', kwargs={'pk': 12}))
self.assertEqual(response.status_code, 404)
self.client.logout()
def test_redirect_to_login(self):
response = self.client.get(reverse_lazy('user_posts', kwargs={'pk': self.user1.id}), follow=True)
last_url, status_code = response.redirect_chain[-1]
self.assertEqual(last_url, reverse_lazy('login') + '?next=/blog/users/1/')
self.assertEqual(status_code, 302)
def test_change_if_not_post_owner(self):
user = self.client.post(reverse_lazy('login'), {'username': self.user1.username,
'password': 'test1pass'})
response = self.client.post(reverse_lazy('update_post', kwargs={'pk': 3}), follow=True)
print(response)
self.assertEqual(response.status_code, 200)
|
[
"anton.chernov@steelkiwi.com"
] |
anton.chernov@steelkiwi.com
|
70ee1afcd2e3855551589e7899518ff75e94ec41
|
656def2ca5c0bd959b31b98cdbc53fea3420b2dc
|
/Python3.7-VideoComposition/src/tencentcloud/cbs/v20170312/models.py
|
5b4e27df779ac6484fcc48eafcc445c9ede68e0e
|
[] |
no_license
|
tencentyun/serverless-demo
|
120271b96f8f960b6125c9d1481a5d8fe56165ae
|
4c324bb186c460fe78252f0ca5c28132a8bce6c9
|
refs/heads/master
| 2023-08-25T17:07:04.959745
| 2023-08-25T08:10:49
| 2023-08-25T08:10:49
| 281,120,881
| 94
| 119
| null | 2023-08-31T06:34:36
| 2020-07-20T13:15:46
| null |
UTF-8
|
Python
| false
| false
| 128,684
|
py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class ApplySnapshotRequest(AbstractModel):
"""ApplySnapshot请求参数结构体
"""
def __init__(self):
r"""
:param SnapshotId: 快照ID, 可通过[DescribeSnapshots](/document/product/362/15647)查询。
:type SnapshotId: str
:param DiskId: 快照原云硬盘ID,可通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskId: str
:param AutoStopInstance: 回滚前是否执行自动关机
:type AutoStopInstance: bool
:param AutoStartInstance: 回滚完成后是否自动开机
:type AutoStartInstance: bool
"""
self.SnapshotId = None
self.DiskId = None
self.AutoStopInstance = None
self.AutoStartInstance = None
def _deserialize(self, params):
self.SnapshotId = params.get("SnapshotId")
self.DiskId = params.get("DiskId")
self.AutoStopInstance = params.get("AutoStopInstance")
self.AutoStartInstance = params.get("AutoStartInstance")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ApplySnapshotResponse(AbstractModel):
"""ApplySnapshot返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AttachDetail(AbstractModel):
"""描述一个实例已挂载和可挂载数据盘的数量。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param AttachedDiskCount: 实例已挂载数据盘的数量。
:type AttachedDiskCount: int
:param MaxAttachCount: 实例最大可挂载数据盘的数量。
:type MaxAttachCount: int
"""
self.InstanceId = None
self.AttachedDiskCount = None
self.MaxAttachCount = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.AttachedDiskCount = params.get("AttachedDiskCount")
self.MaxAttachCount = params.get("MaxAttachCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AttachDisksRequest(AbstractModel):
"""AttachDisks请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 云服务器实例ID。云盘将被挂载到此云服务器上,通过[DescribeInstances](/document/product/213/15728)接口查询。
:type InstanceId: str
:param DiskIds: 将要被挂载的弹性云盘ID。通过[DescribeDisks](/document/product/362/16315)接口查询。单次最多可挂载10块弹性云盘。
:type DiskIds: list of str
:param DeleteWithInstance: 可选参数,不传该参数则仅执行挂载操作。传入`True`时,会在挂载成功后将云硬盘设置为随云主机销毁模式,仅对按量计费云硬盘有效。
:type DeleteWithInstance: bool
:param AttachMode: 可选参数,用于控制云盘挂载时使用的挂载模式,目前仅对黑石裸金属机型有效。取值范围:<br><li>PF<br><li>VF
:type AttachMode: str
"""
self.InstanceId = None
self.DiskIds = None
self.DeleteWithInstance = None
self.AttachMode = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.DiskIds = params.get("DiskIds")
self.DeleteWithInstance = params.get("DeleteWithInstance")
self.AttachMode = params.get("AttachMode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AttachDisksResponse(AbstractModel):
"""AttachDisks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AutoMountConfiguration(AbstractModel):
"""自动初始化、挂载云盘时指定配置。
"""
def __init__(self):
r"""
:param InstanceId: 要挂载到的实例ID。
:type InstanceId: list of str
:param MountPoint: 子机内的挂载点。
:type MountPoint: list of str
:param FileSystemType: 文件系统类型,支持的有 ext4、xfs。
:type FileSystemType: str
"""
self.InstanceId = None
self.MountPoint = None
self.FileSystemType = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.MountPoint = params.get("MountPoint")
self.FileSystemType = params.get("FileSystemType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AutoSnapshotPolicy(AbstractModel):
"""描述了定期快照策略的详细信息
"""
def __init__(self):
r"""
:param AutoSnapshotPolicyId: 定期快照策略ID。
:type AutoSnapshotPolicyId: str
:param AutoSnapshotPolicyName: 定期快照策略名称。
:type AutoSnapshotPolicyName: str
:param AutoSnapshotPolicyState: 定期快照策略的状态。取值范围:<br><li>NORMAL:正常<br><li>ISOLATED:已隔离。
:type AutoSnapshotPolicyState: str
:param IsActivated: 定期快照策略是否激活。
:type IsActivated: bool
:param IsPermanent: 使用该定期快照策略创建出来的快照是否永久保留。
:type IsPermanent: bool
:param RetentionDays: 使用该定期快照策略创建出来的快照保留天数。
:type RetentionDays: int
:param CreateTime: 定期快照策略的创建时间。
:type CreateTime: str
:param NextTriggerTime: 定期快照下次触发的时间。
:type NextTriggerTime: str
:param Policy: 定期快照的执行策略。
:type Policy: list of Policy
:param DiskIdSet: 已绑定当前定期快照策略的云盘ID列表。
:type DiskIdSet: list of str
"""
self.AutoSnapshotPolicyId = None
self.AutoSnapshotPolicyName = None
self.AutoSnapshotPolicyState = None
self.IsActivated = None
self.IsPermanent = None
self.RetentionDays = None
self.CreateTime = None
self.NextTriggerTime = None
self.Policy = None
self.DiskIdSet = None
def _deserialize(self, params):
self.AutoSnapshotPolicyId = params.get("AutoSnapshotPolicyId")
self.AutoSnapshotPolicyName = params.get("AutoSnapshotPolicyName")
self.AutoSnapshotPolicyState = params.get("AutoSnapshotPolicyState")
self.IsActivated = params.get("IsActivated")
self.IsPermanent = params.get("IsPermanent")
self.RetentionDays = params.get("RetentionDays")
self.CreateTime = params.get("CreateTime")
self.NextTriggerTime = params.get("NextTriggerTime")
if params.get("Policy") is not None:
self.Policy = []
for item in params.get("Policy"):
obj = Policy()
obj._deserialize(item)
self.Policy.append(obj)
self.DiskIdSet = params.get("DiskIdSet")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BindAutoSnapshotPolicyRequest(AbstractModel):
"""BindAutoSnapshotPolicy请求参数结构体
"""
def __init__(self):
r"""
:param AutoSnapshotPolicyId: 要绑定的定期快照策略ID。
:type AutoSnapshotPolicyId: str
:param DiskIds: 要绑定的云硬盘ID列表,一次请求最多绑定80块云盘。
:type DiskIds: list of str
"""
self.AutoSnapshotPolicyId = None
self.DiskIds = None
def _deserialize(self, params):
self.AutoSnapshotPolicyId = params.get("AutoSnapshotPolicyId")
self.DiskIds = params.get("DiskIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BindAutoSnapshotPolicyResponse(AbstractModel):
"""BindAutoSnapshotPolicy返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Cdc(AbstractModel):
"""描述独享集群的详细信息。
"""
def __init__(self):
r"""
:param CageId: 独享集群围笼ID。
注意:此字段可能返回 null,表示取不到有效值。
:type CageId: str
:param CdcState: 独享集群状态。取值范围:<br><li>NORMAL:正常;<br><li>CLOSED:关闭,此时将不可使用该独享集群创建新的云硬盘;<br><li>FAULT:独享集群状态异常,此时独享集群将不可操作,腾讯云运维团队将会及时修复该集群;<br><li>ISOLATED:因未及时续费导致独享集群被隔离,此时将不可使用该独享集群创建新的云硬盘,对应的云硬盘也将不可操作。
:type CdcState: str
:param Zone: 独享集群所属的[可用区](/document/product/213/15753#ZoneInfo)ID。
注意:此字段可能返回 null,表示取不到有效值。
:type Zone: str
:param CdcName: 独享集群实例名称。
:type CdcName: str
:param CdcResource: 独享集群的资源容量大小。
注意:此字段可能返回 null,表示取不到有效值。
:type CdcResource: :class:`tencentcloud.cbs.v20170312.models.CdcSize`
:param CdcId: 独享集群实例id。
:type CdcId: str
:param DiskType: 独享集群类型。取值范围:<br><li>CLOUD_BASIC:表示普通云硬盘集群<br><li>CLOUD_PREMIUM:表示高性能云硬盘集群<br><li>CLOUD_SSD:SSD表示SSD云硬盘集群。
:type DiskType: str
:param ExpiredTime: 独享集群到期时间。
:type ExpiredTime: str
"""
self.CageId = None
self.CdcState = None
self.Zone = None
self.CdcName = None
self.CdcResource = None
self.CdcId = None
self.DiskType = None
self.ExpiredTime = None
def _deserialize(self, params):
self.CageId = params.get("CageId")
self.CdcState = params.get("CdcState")
self.Zone = params.get("Zone")
self.CdcName = params.get("CdcName")
if params.get("CdcResource") is not None:
self.CdcResource = CdcSize()
self.CdcResource._deserialize(params.get("CdcResource"))
self.CdcId = params.get("CdcId")
self.DiskType = params.get("DiskType")
self.ExpiredTime = params.get("ExpiredTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CdcSize(AbstractModel):
"""显示独享集群的大小
"""
def __init__(self):
r"""
:param DiskAavilable: 独享集群的可用容量大小,单位GiB
:type DiskAavilable: int
:param DiskTotal: 独享集群的总容量大小,单位GiB
:type DiskTotal: int
"""
self.DiskAavilable = None
self.DiskTotal = None
def _deserialize(self, params):
self.DiskAavilable = params.get("DiskAavilable")
self.DiskTotal = params.get("DiskTotal")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateAutoSnapshotPolicyRequest(AbstractModel):
"""CreateAutoSnapshotPolicy请求参数结构体
"""
def __init__(self):
r"""
:param Policy: 定期快照的执行策略。
:type Policy: list of Policy
:param AutoSnapshotPolicyName: 要创建的定期快照策略名。不传则默认为“未命名”。最大长度不能超60个字节。
:type AutoSnapshotPolicyName: str
:param IsActivated: 是否激活定期快照策略,FALSE表示未激活,TRUE表示激活,默认为TRUE。
:type IsActivated: bool
:param IsPermanent: 通过该定期快照策略创建的快照是否永久保留。FALSE表示非永久保留,TRUE表示永久保留,默认为FALSE。
:type IsPermanent: bool
:param RetentionDays: 通过该定期快照策略创建的快照保留天数,默认保留7天。如果指定本参数,则IsPermanent入参不可指定为TRUE,否则会产生冲突。
:type RetentionDays: int
:param DryRun: 是否创建定期快照的执行策略。TRUE表示只需获取首次开始备份的时间,不实际创建定期快照策略,FALSE表示创建,默认为FALSE。
:type DryRun: bool
"""
self.Policy = None
self.AutoSnapshotPolicyName = None
self.IsActivated = None
self.IsPermanent = None
self.RetentionDays = None
self.DryRun = None
def _deserialize(self, params):
if params.get("Policy") is not None:
self.Policy = []
for item in params.get("Policy"):
obj = Policy()
obj._deserialize(item)
self.Policy.append(obj)
self.AutoSnapshotPolicyName = params.get("AutoSnapshotPolicyName")
self.IsActivated = params.get("IsActivated")
self.IsPermanent = params.get("IsPermanent")
self.RetentionDays = params.get("RetentionDays")
self.DryRun = params.get("DryRun")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateAutoSnapshotPolicyResponse(AbstractModel):
"""CreateAutoSnapshotPolicy返回参数结构体
"""
def __init__(self):
r"""
:param AutoSnapshotPolicyId: 新创建的定期快照策略ID。
:type AutoSnapshotPolicyId: str
:param NextTriggerTime: 首次开始备份的时间。
:type NextTriggerTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AutoSnapshotPolicyId = None
self.NextTriggerTime = None
self.RequestId = None
def _deserialize(self, params):
self.AutoSnapshotPolicyId = params.get("AutoSnapshotPolicyId")
self.NextTriggerTime = params.get("NextTriggerTime")
self.RequestId = params.get("RequestId")
class CreateDisksRequest(AbstractModel):
"""CreateDisks请求参数结构体
"""
def __init__(self):
r"""
:param Placement: 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目。若不指定项目,将在默认项目下进行创建。
:type Placement: :class:`tencentcloud.cbs.v20170312.models.Placement`
:param DiskChargeType: 云硬盘计费类型。<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:按小时后付费<br><li>CDCPAID:独享集群付费<br>各类型价格请参考云硬盘[价格总览](/document/product/362/2413)。
:type DiskChargeType: str
:param DiskType: 硬盘介质类型。取值范围:<br><li>CLOUD_BASIC:表示普通云硬盘<br><li>CLOUD_PREMIUM:表示高性能云硬盘<br><li>CLOUD_SSD:表示SSD云硬盘<br><li>CLOUD_HSSD:表示增强型SSD云硬盘<br><li>CLOUD_TSSD:表示极速型SSD云硬盘。
:type DiskType: str
:param DiskName: 云盘显示名称。不传则默认为“未命名”。最大长度不能超60个字节。
:type DiskName: str
:param Tags: 云盘绑定的标签。
:type Tags: list of Tag
:param SnapshotId: 快照ID,如果传入则根据此快照创建云硬盘,快照类型必须为数据盘快照,可通过[DescribeSnapshots](/document/product/362/15647)接口查询快照,见输出参数DiskUsage解释。
:type SnapshotId: str
:param DiskCount: 创建云硬盘数量,不传则默认为1。单次请求最多可创建的云盘数有限制,具体参见[云硬盘使用限制](https://cloud.tencent.com/doc/product/362/5145)。
:type DiskCount: int
:param ThroughputPerformance: 可选参数。使用此参数可给云硬盘购买额外的性能。<br>当前仅支持极速型云盘(CLOUD_TSSD)和增强型SSD云硬盘(CLOUD_HSSD)
:type ThroughputPerformance: int
:param DiskSize: 云硬盘大小,单位为GB。<br><li>如果传入`SnapshotId`则可不传`DiskSize`,此时新建云盘的大小为快照大小<br><li>如果传入`SnapshotId`同时传入`DiskSize`,则云盘大小必须大于或等于快照大小<br><li>云盘大小取值范围参见云硬盘[产品分类](/document/product/362/2353)的说明。
:type DiskSize: int
:param Shareable: 可选参数,默认为False。传入True时,云盘将创建为共享型云盘。
:type Shareable: bool
:param ClientToken: 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。
:type ClientToken: str
:param Encrypt: 传入该参数用于创建加密云盘,取值固定为ENCRYPT。
:type Encrypt: str
:param DiskChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数指定包年包月云盘的购买时长、是否设置自动续费等属性。<br>创建预付费云盘该参数必传,创建按小时后付费云盘无需传该参数。
:type DiskChargePrepaid: :class:`tencentcloud.cbs.v20170312.models.DiskChargePrepaid`
:param DeleteSnapshot: 销毁云盘时删除关联的非永久保留快照。0 表示非永久快照不随云盘销毁而销毁,1表示非永久快照随云盘销毁而销毁,默认取0。快照是否永久保留可以通过DescribeSnapshots接口返回的快照详情的IsPermanent字段来判断,true表示永久快照,false表示非永久快照。
:type DeleteSnapshot: int
:param AutoMountConfiguration: 创建云盘时指定自动挂载并初始化该数据盘。
:type AutoMountConfiguration: :class:`tencentcloud.cbs.v20170312.models.AutoMountConfiguration`
"""
self.Placement = None
self.DiskChargeType = None
self.DiskType = None
self.DiskName = None
self.Tags = None
self.SnapshotId = None
self.DiskCount = None
self.ThroughputPerformance = None
self.DiskSize = None
self.Shareable = None
self.ClientToken = None
self.Encrypt = None
self.DiskChargePrepaid = None
self.DeleteSnapshot = None
self.AutoMountConfiguration = None
def _deserialize(self, params):
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.DiskChargeType = params.get("DiskChargeType")
self.DiskType = params.get("DiskType")
self.DiskName = params.get("DiskName")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.SnapshotId = params.get("SnapshotId")
self.DiskCount = params.get("DiskCount")
self.ThroughputPerformance = params.get("ThroughputPerformance")
self.DiskSize = params.get("DiskSize")
self.Shareable = params.get("Shareable")
self.ClientToken = params.get("ClientToken")
self.Encrypt = params.get("Encrypt")
if params.get("DiskChargePrepaid") is not None:
self.DiskChargePrepaid = DiskChargePrepaid()
self.DiskChargePrepaid._deserialize(params.get("DiskChargePrepaid"))
self.DeleteSnapshot = params.get("DeleteSnapshot")
if params.get("AutoMountConfiguration") is not None:
self.AutoMountConfiguration = AutoMountConfiguration()
self.AutoMountConfiguration._deserialize(params.get("AutoMountConfiguration"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDisksResponse(AbstractModel):
"""CreateDisks返回参数结构体
"""
def __init__(self):
r"""
:param DiskIdSet: 创建的云硬盘ID列表。
:type DiskIdSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiskIdSet = None
self.RequestId = None
def _deserialize(self, params):
self.DiskIdSet = params.get("DiskIdSet")
self.RequestId = params.get("RequestId")
class CreateSnapshotRequest(AbstractModel):
"""CreateSnapshot请求参数结构体
"""
def __init__(self):
r"""
:param DiskId: 需要创建快照的云硬盘ID,可通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskId: str
:param SnapshotName: 快照名称,不传则新快照名称默认为“未命名”。
:type SnapshotName: str
:param Deadline: 快照的到期时间,到期后该快照将会自动删除,需要传入UTC时间下的ISO-8601标准时间格式,例如:2022-01-08T09:47:55+00:00
:type Deadline: str
"""
self.DiskId = None
self.SnapshotName = None
self.Deadline = None
def _deserialize(self, params):
self.DiskId = params.get("DiskId")
self.SnapshotName = params.get("SnapshotName")
self.Deadline = params.get("Deadline")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSnapshotResponse(AbstractModel):
"""CreateSnapshot返回参数结构体
"""
def __init__(self):
r"""
:param SnapshotId: 新创建的快照ID。
:type SnapshotId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SnapshotId = None
self.RequestId = None
def _deserialize(self, params):
self.SnapshotId = params.get("SnapshotId")
self.RequestId = params.get("RequestId")
class DeleteAutoSnapshotPoliciesRequest(AbstractModel):
"""DeleteAutoSnapshotPolicies请求参数结构体
"""
def __init__(self):
r"""
:param AutoSnapshotPolicyIds: 要删除的定期快照策略ID列表。
:type AutoSnapshotPolicyIds: list of str
"""
self.AutoSnapshotPolicyIds = None
def _deserialize(self, params):
self.AutoSnapshotPolicyIds = params.get("AutoSnapshotPolicyIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteAutoSnapshotPoliciesResponse(AbstractModel):
"""DeleteAutoSnapshotPolicies返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteSnapshotsRequest(AbstractModel):
"""DeleteSnapshots请求参数结构体
"""
def __init__(self):
r"""
:param SnapshotIds: 要删除的快照ID列表,可通过[DescribeSnapshots](/document/product/362/15647)查询。
:type SnapshotIds: list of str
:param DeleteBindImages: 是否强制删除快照关联的镜像
:type DeleteBindImages: bool
"""
self.SnapshotIds = None
self.DeleteBindImages = None
def _deserialize(self, params):
self.SnapshotIds = params.get("SnapshotIds")
self.DeleteBindImages = params.get("DeleteBindImages")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteSnapshotsResponse(AbstractModel):
"""DeleteSnapshots返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAutoSnapshotPoliciesRequest(AbstractModel):
"""DescribeAutoSnapshotPolicies请求参数结构体
"""
def __init__(self):
r"""
:param AutoSnapshotPolicyIds: 要查询的定期快照策略ID列表。参数不支持同时指定`AutoSnapshotPolicyIds`和`Filters`。
:type AutoSnapshotPolicyIds: list of str
:param Filters: 过滤条件。参数不支持同时指定`AutoSnapshotPolicyIds`和`Filters`。<br><li>auto-snapshot-policy-id - Array of String - 是否必填:否 -(过滤条件)按定期快照策略ID进行过滤。定期快照策略ID形如:`asp-11112222`。<br><li>auto-snapshot-policy-state - Array of String - 是否必填:否 -(过滤条件)按定期快照策略的状态进行过滤。定期快照策略ID形如:`asp-11112222`。(NORMAL:正常 | ISOLATED:已隔离。)<br><li>auto-snapshot-policy-name - Array of String - 是否必填:否 -(过滤条件)按定期快照策略名称进行过滤。
:type Filters: list of Filter
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](/document/product/362/15633)中的相关小节。
:type Limit: int
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考API[简介](/document/product/362/15633)中的相关小节。
:type Offset: int
:param Order: 输出定期快照列表的排列顺序。取值范围:<br><li>ASC:升序排列<br><li>DESC:降序排列。
:type Order: str
:param OrderField: 定期快照列表排序的依据字段。取值范围:<br><li>CREATETIME:依据定期快照的创建时间排序<br>默认按创建时间排序。
:type OrderField: str
"""
self.AutoSnapshotPolicyIds = None
self.Filters = None
self.Limit = None
self.Offset = None
self.Order = None
self.OrderField = None
def _deserialize(self, params):
self.AutoSnapshotPolicyIds = params.get("AutoSnapshotPolicyIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Order = params.get("Order")
self.OrderField = params.get("OrderField")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAutoSnapshotPoliciesResponse(AbstractModel):
"""DescribeAutoSnapshotPolicies返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 有效的定期快照策略数量。
:type TotalCount: int
:param AutoSnapshotPolicySet: 定期快照策略列表。
:type AutoSnapshotPolicySet: list of AutoSnapshotPolicy
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.AutoSnapshotPolicySet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("AutoSnapshotPolicySet") is not None:
self.AutoSnapshotPolicySet = []
for item in params.get("AutoSnapshotPolicySet"):
obj = AutoSnapshotPolicy()
obj._deserialize(item)
self.AutoSnapshotPolicySet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDiskAssociatedAutoSnapshotPolicyRequest(AbstractModel):
"""DescribeDiskAssociatedAutoSnapshotPolicy请求参数结构体
"""
def __init__(self):
r"""
:param DiskId: 要查询的云硬盘ID。
:type DiskId: str
"""
self.DiskId = None
def _deserialize(self, params):
self.DiskId = params.get("DiskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiskAssociatedAutoSnapshotPolicyResponse(AbstractModel):
"""DescribeDiskAssociatedAutoSnapshotPolicy返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 云盘绑定的定期快照数量。
:type TotalCount: int
:param AutoSnapshotPolicySet: 云盘绑定的定期快照列表。
:type AutoSnapshotPolicySet: list of AutoSnapshotPolicy
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.AutoSnapshotPolicySet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("AutoSnapshotPolicySet") is not None:
self.AutoSnapshotPolicySet = []
for item in params.get("AutoSnapshotPolicySet"):
obj = AutoSnapshotPolicy()
obj._deserialize(item)
self.AutoSnapshotPolicySet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDiskConfigQuotaRequest(AbstractModel):
"""DescribeDiskConfigQuota请求参数结构体
"""
def __init__(self):
r"""
:param InquiryType: 查询类别,取值范围。<br><li>INQUIRY_CBS_CONFIG:查询云盘配置列表<br><li>INQUIRY_CVM_CONFIG:查询云盘与实例搭配的配置列表。
:type InquiryType: str
:param Zones: 查询一个或多个[可用区](/document/product/213/15753#ZoneInfo)下的配置。
:type Zones: list of str
:param DiskChargeType: 付费模式。取值范围:<br><li>PREPAID:预付费<br><li>POSTPAID_BY_HOUR:后付费。
:type DiskChargeType: str
:param DiskTypes: 硬盘介质类型。取值范围:<br><li>CLOUD_BASIC:表示普通云硬盘<br><li>CLOUD_PREMIUM:表示高性能云硬盘<br><li>CLOUD_SSD:表示SSD云硬盘<br><li>CLOUD_HSSD:表示增强型SSD云硬盘。
:type DiskTypes: list of str
:param DiskUsage: 系统盘或数据盘。取值范围:<br><li>SYSTEM_DISK:表示系统盘<br><li>DATA_DISK:表示数据盘。
:type DiskUsage: str
:param InstanceFamilies: 按照实例机型系列过滤。实例机型系列形如:S1、I1、M1等。详见[实例类型](https://cloud.tencent.com/document/product/213/11518)
:type InstanceFamilies: list of str
:param CPU: 实例CPU核数。
:type CPU: int
:param Memory: 实例内存大小。
:type Memory: int
"""
self.InquiryType = None
self.Zones = None
self.DiskChargeType = None
self.DiskTypes = None
self.DiskUsage = None
self.InstanceFamilies = None
self.CPU = None
self.Memory = None
def _deserialize(self, params):
self.InquiryType = params.get("InquiryType")
self.Zones = params.get("Zones")
self.DiskChargeType = params.get("DiskChargeType")
self.DiskTypes = params.get("DiskTypes")
self.DiskUsage = params.get("DiskUsage")
self.InstanceFamilies = params.get("InstanceFamilies")
self.CPU = params.get("CPU")
self.Memory = params.get("Memory")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiskConfigQuotaResponse(AbstractModel):
"""DescribeDiskConfigQuota返回参数结构体
"""
def __init__(self):
r"""
:param DiskConfigSet: 云盘配置列表。
:type DiskConfigSet: list of DiskConfig
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiskConfigSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DiskConfigSet") is not None:
self.DiskConfigSet = []
for item in params.get("DiskConfigSet"):
obj = DiskConfig()
obj._deserialize(item)
self.DiskConfigSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDiskOperationLogsRequest(AbstractModel):
"""DescribeDiskOperationLogs请求参数结构体
"""
def __init__(self):
r"""
:param Filters: 过滤条件。支持以下条件:
<li>disk-id - Array of String - 是否必填:是 - 按云盘ID过滤,每个请求最多可指定10个云盘ID。
:type Filters: list of Filter
:param EndTime: 要查询的操作日志的截止时间,例如:“2019-11-22 23:59:59"
:type EndTime: str
:param BeginTime: 要查询的操作日志的起始时间,例如:“2019-11-22 00:00:00"
:type BeginTime: str
"""
self.Filters = None
self.EndTime = None
self.BeginTime = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.EndTime = params.get("EndTime")
self.BeginTime = params.get("BeginTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiskOperationLogsResponse(AbstractModel):
"""DescribeDiskOperationLogs返回参数结构体
"""
def __init__(self):
r"""
:param DiskOperationLogSet: 云盘的操作日志列表。
:type DiskOperationLogSet: list of DiskOperationLog
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiskOperationLogSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DiskOperationLogSet") is not None:
self.DiskOperationLogSet = []
for item in params.get("DiskOperationLogSet"):
obj = DiskOperationLog()
obj._deserialize(item)
self.DiskOperationLogSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDiskStoragePoolRequest(AbstractModel):
"""DescribeDiskStoragePool请求参数结构体
"""
def __init__(self):
r"""
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](/document/product/362/15633)中的相关小节。
:type Limit: int
:param CdcIds: 指定需要查询的独享集群ID列表,该入参不能与Filters一起使用。
:type CdcIds: list of str
:param Filters: 过滤条件。参数不支持同时指定`CdcIds`和`Filters`。<br><li>cdc-id - Array of String - 是否必填:否 -(过滤条件)按独享集群ID过滤。<br><li>zone - Array of String - 是否必填:否 -(过滤条件)按独享集群所在[可用区](/document/product/213/15753#ZoneInfo)过滤。<br><li>cage-id - Array of String - 是否必填:否 -(过滤条件)按独享集群所在围笼的ID过滤。<br><li>disk-type - Array of String - 是否必填:否 -(过滤条件)按照云盘介质类型过滤。(CLOUD_BASIC:表示普通云硬盘 | CLOUD_PREMIUM:表示高性能云硬盘。| CLOUD_SSD:SSD表示SSD云硬盘。)
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考API[简介](/document/product/362/15633)中的相关小节。
:type Offset: int
"""
self.Limit = None
self.CdcIds = None
self.Filters = None
self.Offset = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.CdcIds = params.get("CdcIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiskStoragePoolResponse(AbstractModel):
"""DescribeDiskStoragePool返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 符合条件的独享集群的数量
:type TotalCount: int
:param DiskStoragePoolSet: 独享集群的详细信息列表
:type DiskStoragePoolSet: list of Cdc
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.DiskStoragePoolSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("DiskStoragePoolSet") is not None:
self.DiskStoragePoolSet = []
for item in params.get("DiskStoragePoolSet"):
obj = Cdc()
obj._deserialize(item)
self.DiskStoragePoolSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDisksRequest(AbstractModel):
"""DescribeDisks请求参数结构体
"""
def __init__(self):
r"""
:param Filters: 过滤条件。参数不支持同时指定`DiskIds`和`Filters`。<br><li>disk-usage - Array of String - 是否必填:否 -(过滤条件)按云盘类型过滤。 (SYSTEM_DISK:表示系统盘 | DATA_DISK:表示数据盘)<br><li>disk-charge-type - Array of String - 是否必填:否 -(过滤条件)按照云硬盘计费模式过滤。 (PREPAID:表示预付费,即包年包月 | POSTPAID_BY_HOUR:表示后付费,即按量计费。)<br><li>portable - Array of String - 是否必填:否 -(过滤条件)按是否为弹性云盘过滤。 (TRUE:表示弹性云盘 | FALSE:表示非弹性云盘。)<br><li>project-id - Array of Integer - 是否必填:否 -(过滤条件)按云硬盘所属项目ID过滤。<br><li>disk-id - Array of String - 是否必填:否 -(过滤条件)按照云硬盘ID过滤。云盘ID形如:`disk-11112222`。<br><li>disk-name - Array of String - 是否必填:否 -(过滤条件)按照云盘名称过滤。<br><li>disk-type - Array of String - 是否必填:否 -(过滤条件)按照云盘介质类型过滤。(CLOUD_BASIC:表示普通云硬盘 | CLOUD_PREMIUM:表示高性能云硬盘。| CLOUD_SSD:表示SSD云硬盘 | CLOUD_HSSD:表示增强型SSD云硬盘。| CLOUD_TSSD:表示极速型云硬盘。)<br><li>disk-state - Array of String - 是否必填:否 -(过滤条件)按照云盘状态过滤。(UNATTACHED:未挂载 | ATTACHING:挂载中 | ATTACHED:已挂载 | DETACHING:解挂中 | EXPANDING:扩容中 | ROLLBACKING:回滚中 | TORECYCLE:待回收。)<br><li>instance-id - Array of String - 是否必填:否 -(过滤条件)按照云盘挂载的云主机实例ID过滤。可根据此参数查询挂载在指定云主机下的云硬盘。<br><li>zone - Array of String - 是否必填:否 -(过滤条件)按照[可用区](/document/product/213/15753#ZoneInfo)过滤。<br><li>instance-ip-address - Array of String - 是否必填:否 -(过滤条件)按云盘所挂载云主机的内网或外网IP过滤。<br><li>instance-name - Array of String - 是否必填:否 -(过滤条件)按云盘所挂载的实例名称过滤。<br><li>tag-key - Array of String - 是否必填:否 -(过滤条件)按照标签键进行过滤。<br><li>tag-value - Array of String - 是否必填:否 -(过滤条件)照标签值进行过滤。<br><li>tag:tag-key - Array of String - 是否必填:否 -(过滤条件)按照标签键值对进行过滤。 tag-key使用具体的标签键进行替换。
:type Filters: list of Filter
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](/document/product/362/15633)中的相关小节。
:type Limit: int
:param OrderField: 云盘列表排序的依据字段。取值范围:<br><li>CREATE_TIME:依据云盘的创建时间排序<br><li>DEADLINE:依据云盘的到期时间排序<br>默认按云盘创建时间排序。
:type OrderField: str
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考API[简介](/document/product/362/15633)中的相关小节。
:type Offset: int
:param ReturnBindAutoSnapshotPolicy: 云盘详情中是否需要返回云盘绑定的定期快照策略ID,TRUE表示需要返回,FALSE表示不返回。
:type ReturnBindAutoSnapshotPolicy: bool
:param DiskIds: 按照一个或者多个云硬盘ID查询。云硬盘ID形如:`disk-11112222`,此参数的具体格式可参考API[简介](/document/product/362/15633)的ids.N一节)。参数不支持同时指定`DiskIds`和`Filters`。
:type DiskIds: list of str
:param Order: 输出云盘列表的排列顺序。取值范围:<br><li>ASC:升序排列<br><li>DESC:降序排列。
:type Order: str
"""
self.Filters = None
self.Limit = None
self.OrderField = None
self.Offset = None
self.ReturnBindAutoSnapshotPolicy = None
self.DiskIds = None
self.Order = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Limit = params.get("Limit")
self.OrderField = params.get("OrderField")
self.Offset = params.get("Offset")
self.ReturnBindAutoSnapshotPolicy = params.get("ReturnBindAutoSnapshotPolicy")
self.DiskIds = params.get("DiskIds")
self.Order = params.get("Order")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDisksResponse(AbstractModel):
"""DescribeDisks返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 符合条件的云硬盘数量。
:type TotalCount: int
:param DiskSet: 云硬盘的详细信息列表。
:type DiskSet: list of Disk
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.DiskSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("DiskSet") is not None:
self.DiskSet = []
for item in params.get("DiskSet"):
obj = Disk()
obj._deserialize(item)
self.DiskSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeInstancesDiskNumRequest(AbstractModel):
"""DescribeInstancesDiskNum请求参数结构体
"""
def __init__(self):
r"""
:param InstanceIds: 云服务器实例ID,通过[DescribeInstances](/document/product/213/15728)接口查询。
:type InstanceIds: list of str
"""
self.InstanceIds = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeInstancesDiskNumResponse(AbstractModel):
"""DescribeInstancesDiskNum返回参数结构体
"""
def __init__(self):
r"""
:param AttachDetail: 各个云服务器已挂载和可挂载弹性云盘的数量。
:type AttachDetail: list of AttachDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AttachDetail = None
self.RequestId = None
def _deserialize(self, params):
if params.get("AttachDetail") is not None:
self.AttachDetail = []
for item in params.get("AttachDetail"):
obj = AttachDetail()
obj._deserialize(item)
self.AttachDetail.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSnapshotOperationLogsRequest(AbstractModel):
"""DescribeSnapshotOperationLogs请求参数结构体
"""
def __init__(self):
r"""
:param Filters: 过滤条件。支持以下条件:
<li>snapshot-id - Array of String - 是否必填:是 - 按快照ID过滤,每个请求最多可指定10个快照ID。
:type Filters: list of Filter
:param BeginTime: 要查询的操作日志的起始时间,例如:“2019-11-22 00:00:00"
:type BeginTime: str
:param EndTime: 要查询的操作日志的截止时间,例如:“2019-11-22 23:59:59"
:type EndTime: str
"""
self.Filters = None
self.BeginTime = None
self.EndTime = None
def _deserialize(self, params):
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.BeginTime = params.get("BeginTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSnapshotOperationLogsResponse(AbstractModel):
"""DescribeSnapshotOperationLogs返回参数结构体
"""
def __init__(self):
r"""
:param SnapshotOperationLogSet: 快照操作日志列表。
:type SnapshotOperationLogSet: list of SnapshotOperationLog
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SnapshotOperationLogSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SnapshotOperationLogSet") is not None:
self.SnapshotOperationLogSet = []
for item in params.get("SnapshotOperationLogSet"):
obj = SnapshotOperationLog()
obj._deserialize(item)
self.SnapshotOperationLogSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSnapshotSharePermissionRequest(AbstractModel):
"""DescribeSnapshotSharePermission请求参数结构体
"""
def __init__(self):
r"""
:param SnapshotId: 要查询快照的ID。可通过[DescribeSnapshots](https://cloud.tencent.com/document/api/362/15647)查询获取。
:type SnapshotId: str
"""
self.SnapshotId = None
def _deserialize(self, params):
self.SnapshotId = params.get("SnapshotId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSnapshotSharePermissionResponse(AbstractModel):
"""DescribeSnapshotSharePermission返回参数结构体
"""
def __init__(self):
r"""
:param SharePermissionSet: 快照的分享信息的集合
:type SharePermissionSet: list of SharePermission
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SharePermissionSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SharePermissionSet") is not None:
self.SharePermissionSet = []
for item in params.get("SharePermissionSet"):
obj = SharePermission()
obj._deserialize(item)
self.SharePermissionSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSnapshotsRequest(AbstractModel):
"""DescribeSnapshots请求参数结构体
"""
def __init__(self):
r"""
:param SnapshotIds: 要查询快照的ID列表。参数不支持同时指定`SnapshotIds`和`Filters`。
:type SnapshotIds: list of str
:param Filters: 过滤条件。参数不支持同时指定`SnapshotIds`和`Filters`。<br><li>snapshot-id - Array of String - 是否必填:否 -(过滤条件)按照快照的ID过滤。快照ID形如:`snap-11112222`。<br><li>snapshot-name - Array of String - 是否必填:否 -(过滤条件)按照快照名称过滤。<br><li>snapshot-state - Array of String - 是否必填:否 -(过滤条件)按照快照状态过滤。 (NORMAL:正常 | CREATING:创建中 | ROLLBACKING:回滚中。)<br><li>disk-usage - Array of String - 是否必填:否 -(过滤条件)按创建快照的云盘类型过滤。 (SYSTEM_DISK:代表系统盘 | DATA_DISK:代表数据盘。)<br><li>project-id - Array of String - 是否必填:否 -(过滤条件)按云硬盘所属项目ID过滤。<br><li>disk-id - Array of String - 是否必填:否 -(过滤条件)按照创建快照的云硬盘ID过滤。<br><li>zone - Array of String - 是否必填:否 -(过滤条件)按照[可用区](/document/product/213/15753#ZoneInfo)过滤。<br><li>encrypt - Array of String - 是否必填:否 -(过滤条件)按是否加密盘快照过滤。 (TRUE:表示加密盘快照 | FALSE:表示非加密盘快照。)
<li>snapshot-type- Array of String - 是否必填:否 -(过滤条件)根据snapshot-type指定的快照类型查询对应的快照。
(SHARED_SNAPSHOT:表示共享过来的快照 | PRIVATE_SNAPSHOT:表示自己私有快照。)
:type Filters: list of Filter
:param Offset: 偏移量,默认为0。关于`Offset`的更进一步介绍请参考API[简介](/document/product/362/15633)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](/document/product/362/15633)中的相关小节。
:type Limit: int
:param Order: 输出云盘列表的排列顺序。取值范围:<br><li>ASC:升序排列<br><li>DESC:降序排列。
:type Order: str
:param OrderField: 快照列表排序的依据字段。取值范围:<br><li>CREATE_TIME:依据快照的创建时间排序<br>默认按创建时间排序。
:type OrderField: str
"""
self.SnapshotIds = None
self.Filters = None
self.Offset = None
self.Limit = None
self.Order = None
self.OrderField = None
def _deserialize(self, params):
self.SnapshotIds = params.get("SnapshotIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Order = params.get("Order")
self.OrderField = params.get("OrderField")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSnapshotsResponse(AbstractModel):
"""DescribeSnapshots返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 快照的数量。
:type TotalCount: int
:param SnapshotSet: 快照的详情列表。
:type SnapshotSet: list of Snapshot
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.SnapshotSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("SnapshotSet") is not None:
self.SnapshotSet = []
for item in params.get("SnapshotSet"):
obj = Snapshot()
obj._deserialize(item)
self.SnapshotSet.append(obj)
self.RequestId = params.get("RequestId")
class DetachDisksRequest(AbstractModel):
"""DetachDisks请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 将要卸载的云硬盘ID, 通过[DescribeDisks](/document/product/362/16315)接口查询,单次请求最多可卸载10块弹性云盘。
:type DiskIds: list of str
:param InstanceId: 对于非共享型云盘,会忽略该参数;对于共享型云盘,该参数表示要从哪个CVM实例上卸载云盘。
:type InstanceId: str
"""
self.DiskIds = None
self.InstanceId = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetachDisksResponse(AbstractModel):
"""DetachDisks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Disk(AbstractModel):
"""描述了云硬盘的详细信息
"""
def __init__(self):
r"""
:param DeleteWithInstance: 云盘是否与挂载的实例一起销毁。<br><li>true:销毁实例时会同时销毁云盘,只支持按小时后付费云盘。<br><li>false:销毁实例时不销毁云盘。
注意:此字段可能返回 null,表示取不到有效值。
:type DeleteWithInstance: bool
:param RenewFlag: 自动续费标识。取值范围:<br><li>NOTIFY_AND_AUTO_RENEW:通知过期且自动续费<br><li>NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费<br><li>DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费。
注意:此字段可能返回 null,表示取不到有效值。
:type RenewFlag: str
:param DiskType: 硬盘介质类型。取值范围:<br><li>CLOUD_BASIC:表示普通云硬盘<br><li>CLOUD_PREMIUM:表示高性能云硬盘<br><li>CLOUD_SSD:表示SSD云硬盘<br><li>CLOUD_HSSD:表示增强型SSD云硬盘<br><li>CLOUD_TSSD:表示极速型SSD云硬盘。
:type DiskType: str
:param DiskState: 云盘状态。取值范围:<br><li>UNATTACHED:未挂载<br><li>ATTACHING:挂载中<br><li>ATTACHED:已挂载<br><li>DETACHING:解挂中<br><li>EXPANDING:扩容中<br><li>ROLLBACKING:回滚中<br><li>TORECYCLE:待回收<br><li>DUMPING:拷贝硬盘中。
:type DiskState: str
:param SnapshotCount: 云盘拥有的快照总数。
:type SnapshotCount: int
:param AutoRenewFlagError: 云盘已挂载到子机,且子机与云盘都是包年包月。<br><li>true:子机设置了自动续费标识,但云盘未设置<br><li>false:云盘自动续费标识正常。
注意:此字段可能返回 null,表示取不到有效值。
:type AutoRenewFlagError: bool
:param Rollbacking: 云盘是否处于快照回滚状态。取值范围:<br><li>false:表示不处于快照回滚状态<br><li>true:表示处于快照回滚状态。
:type Rollbacking: bool
:param InstanceIdList: 对于非共享型云盘,该参数为空数组。对于共享型云盘,则表示该云盘当前被挂载到的CVM实例InstanceId
:type InstanceIdList: list of str
:param Encrypt: 云盘是否为加密盘。取值范围:<br><li>false:表示非加密盘<br><li>true:表示加密盘。
:type Encrypt: bool
:param DiskName: 云硬盘名称。
:type DiskName: str
:param BackupDisk: 云硬盘因欠费销毁或者到期销毁时, 是否使用快照备份数据的标识。true表示销毁时创建快照进行数据备份。false表示直接销毁,不进行数据备份。
:type BackupDisk: bool
:param Tags: 与云盘绑定的标签,云盘未绑定标签则取值为空。
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of Tag
:param InstanceId: 云硬盘挂载的云主机ID。
:type InstanceId: str
:param AttachMode: 云盘的挂载类型。取值范围:<br><li>PF: PF挂载<br><li>VF: VF挂载
注意:此字段可能返回 null,表示取不到有效值。
:type AttachMode: str
:param AutoSnapshotPolicyIds: 云盘关联的定期快照ID。只有在调用DescribeDisks接口时,入参ReturnBindAutoSnapshotPolicy取值为TRUE才会返回该参数。
注意:此字段可能返回 null,表示取不到有效值。
:type AutoSnapshotPolicyIds: list of str
:param ThroughputPerformance: 云硬盘额外性能值,单位MB/s。
注意:此字段可能返回 null,表示取不到有效值。
:type ThroughputPerformance: int
:param Migrating: 云盘是否处于类型变更中。取值范围:<br><li>false:表示云盘不处于类型变更中<br><li>true:表示云盘已发起类型变更,正处于迁移中。
注意:此字段可能返回 null,表示取不到有效值。
:type Migrating: bool
:param DiskId: 云硬盘ID。
:type DiskId: str
:param SnapshotSize: 云盘拥有的快照总容量,单位为MB。
:type SnapshotSize: int
:param Placement: 云硬盘所在的位置。
:type Placement: :class:`tencentcloud.cbs.v20170312.models.Placement`
:param IsReturnable: 判断预付费的云盘是否支持主动退还。<br><li>true:支持主动退还<br><li>false:不支持主动退还。
注意:此字段可能返回 null,表示取不到有效值。
:type IsReturnable: bool
:param DeadlineTime: 云硬盘的到期时间。
:type DeadlineTime: str
:param Attached: 云盘是否挂载到云主机上。取值范围:<br><li>false:表示未挂载<br><li>true:表示已挂载。
:type Attached: bool
:param DiskSize: 云硬盘大小,单位GB。
:type DiskSize: int
:param MigratePercent: 云盘类型变更的迁移进度,取值0到100。
注意:此字段可能返回 null,表示取不到有效值。
:type MigratePercent: int
:param DiskUsage: 云硬盘类型。取值范围:<br><li>SYSTEM_DISK:系统盘<br><li>DATA_DISK:数据盘。
:type DiskUsage: str
:param DiskChargeType: 付费模式。取值范围:<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:后付费,即按量计费。
:type DiskChargeType: str
:param Portable: 是否为弹性云盘,false表示非弹性云盘,true表示弹性云盘。
:type Portable: bool
:param SnapshotAbility: 云盘是否具备创建快照的能力。取值范围:<br><li>false表示不具备<br><li>true表示具备。
:type SnapshotAbility: bool
:param DeadlineError: 在云盘已挂载到实例,且实例与云盘都是包年包月的条件下,此字段才有意义。<br><li>true:云盘到期时间早于实例。<br><li>false:云盘到期时间晚于实例。
注意:此字段可能返回 null,表示取不到有效值。
:type DeadlineError: bool
:param RollbackPercent: 云盘快照回滚的进度。
:type RollbackPercent: int
:param DifferDaysOfDeadline: 当前时间距离盘到期的天数(仅对预付费盘有意义)。
注意:此字段可能返回 null,表示取不到有效值。
:type DifferDaysOfDeadline: int
:param ReturnFailCode: 预付费云盘在不支持主动退还的情况下,该参数表明不支持主动退还的具体原因。取值范围:<br><li>1:云硬盘已经退还<br><li>2:云硬盘已过期<br><li>3:云盘不支持退还<br><li>8:超过可退还数量的限制。
注意:此字段可能返回 null,表示取不到有效值。
:type ReturnFailCode: int
:param Shareable: 云盘是否为共享型云盘。
:type Shareable: bool
:param CreateTime: 云硬盘的创建时间。
:type CreateTime: str
:param DeleteSnapshot: 销毁云盘时删除关联的非永久保留快照。0 表示非永久快照不随云盘销毁而销毁,1表示非永久快照随云盘销毁而销毁,默认取0。快照是否永久保留可以通过DescribeSnapshots接口返回的快照详情的IsPermanent字段来判断,true表示永久快照,false表示非永久快照。
:type DeleteSnapshot: int
"""
self.DeleteWithInstance = None
self.RenewFlag = None
self.DiskType = None
self.DiskState = None
self.SnapshotCount = None
self.AutoRenewFlagError = None
self.Rollbacking = None
self.InstanceIdList = None
self.Encrypt = None
self.DiskName = None
self.BackupDisk = None
self.Tags = None
self.InstanceId = None
self.AttachMode = None
self.AutoSnapshotPolicyIds = None
self.ThroughputPerformance = None
self.Migrating = None
self.DiskId = None
self.SnapshotSize = None
self.Placement = None
self.IsReturnable = None
self.DeadlineTime = None
self.Attached = None
self.DiskSize = None
self.MigratePercent = None
self.DiskUsage = None
self.DiskChargeType = None
self.Portable = None
self.SnapshotAbility = None
self.DeadlineError = None
self.RollbackPercent = None
self.DifferDaysOfDeadline = None
self.ReturnFailCode = None
self.Shareable = None
self.CreateTime = None
self.DeleteSnapshot = None
def _deserialize(self, params):
self.DeleteWithInstance = params.get("DeleteWithInstance")
self.RenewFlag = params.get("RenewFlag")
self.DiskType = params.get("DiskType")
self.DiskState = params.get("DiskState")
self.SnapshotCount = params.get("SnapshotCount")
self.AutoRenewFlagError = params.get("AutoRenewFlagError")
self.Rollbacking = params.get("Rollbacking")
self.InstanceIdList = params.get("InstanceIdList")
self.Encrypt = params.get("Encrypt")
self.DiskName = params.get("DiskName")
self.BackupDisk = params.get("BackupDisk")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.InstanceId = params.get("InstanceId")
self.AttachMode = params.get("AttachMode")
self.AutoSnapshotPolicyIds = params.get("AutoSnapshotPolicyIds")
self.ThroughputPerformance = params.get("ThroughputPerformance")
self.Migrating = params.get("Migrating")
self.DiskId = params.get("DiskId")
self.SnapshotSize = params.get("SnapshotSize")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.IsReturnable = params.get("IsReturnable")
self.DeadlineTime = params.get("DeadlineTime")
self.Attached = params.get("Attached")
self.DiskSize = params.get("DiskSize")
self.MigratePercent = params.get("MigratePercent")
self.DiskUsage = params.get("DiskUsage")
self.DiskChargeType = params.get("DiskChargeType")
self.Portable = params.get("Portable")
self.SnapshotAbility = params.get("SnapshotAbility")
self.DeadlineError = params.get("DeadlineError")
self.RollbackPercent = params.get("RollbackPercent")
self.DifferDaysOfDeadline = params.get("DifferDaysOfDeadline")
self.ReturnFailCode = params.get("ReturnFailCode")
self.Shareable = params.get("Shareable")
self.CreateTime = params.get("CreateTime")
self.DeleteSnapshot = params.get("DeleteSnapshot")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DiskChargePrepaid(AbstractModel):
"""描述了实例的计费模式
"""
def __init__(self):
r"""
:param Period: 购买云盘的时长,默认单位为月,取值范围:1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 24, 36。
:type Period: int
:param RenewFlag: 自动续费标识。取值范围:<br><li>NOTIFY_AND_AUTO_RENEW:通知过期且自动续费<br><li>NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费<br><li>DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费<br><br>默认取值:NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费。
:type RenewFlag: str
:param CurInstanceDeadline: 需要将云盘的到期时间与挂载的子机对齐时,可传入该参数。该参数表示子机当前的到期时间,此时Period如果传入,则表示子机需要续费的时长,云盘会自动按对齐到子机续费后的到期时间续费,示例取值:2018-03-30 20:15:03。
:type CurInstanceDeadline: str
"""
self.Period = None
self.RenewFlag = None
self.CurInstanceDeadline = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.RenewFlag = params.get("RenewFlag")
self.CurInstanceDeadline = params.get("CurInstanceDeadline")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DiskConfig(AbstractModel):
"""云盘配置。
"""
def __init__(self):
r"""
:param Available: 配置是否可用。
:type Available: bool
:param DiskType: 云盘介质类型。取值范围:<br><li>CLOUD_BASIC:表示普通云硬盘<br><li>CLOUD_PREMIUM:表示高性能云硬盘<br><li>CLOUD_SSD:SSD表示SSD云硬盘。
:type DiskType: str
:param DiskUsage: 云盘类型。取值范围:<br><li>SYSTEM_DISK:表示系统盘<br><li>DATA_DISK:表示数据盘。
:type DiskUsage: str
:param DiskChargeType: 付费模式。取值范围:<br><li>PREPAID:表示预付费,即包年包月<br><li>POSTPAID_BY_HOUR:表示后付费,即按量计费。
:type DiskChargeType: str
:param MaxDiskSize: 最大可配置云盘大小,单位GB。
:type MaxDiskSize: int
:param MinDiskSize: 最小可配置云盘大小,单位GB。
:type MinDiskSize: int
:param Zone: 云硬盘所属的[可用区](/document/product/213/15753#ZoneInfo)。
:type Zone: str
:param DeviceClass: 实例机型。
注意:此字段可能返回 null,表示取不到有效值。
:type DeviceClass: str
:param InstanceFamily: 实例机型系列。详见[实例类型](https://cloud.tencent.com/document/product/213/11518)
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceFamily: str
"""
self.Available = None
self.DiskType = None
self.DiskUsage = None
self.DiskChargeType = None
self.MaxDiskSize = None
self.MinDiskSize = None
self.Zone = None
self.DeviceClass = None
self.InstanceFamily = None
def _deserialize(self, params):
self.Available = params.get("Available")
self.DiskType = params.get("DiskType")
self.DiskUsage = params.get("DiskUsage")
self.DiskChargeType = params.get("DiskChargeType")
self.MaxDiskSize = params.get("MaxDiskSize")
self.MinDiskSize = params.get("MinDiskSize")
self.Zone = params.get("Zone")
self.DeviceClass = params.get("DeviceClass")
self.InstanceFamily = params.get("InstanceFamily")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DiskOperationLog(AbstractModel):
"""云盘操作日志。
"""
def __init__(self):
r"""
:param OperationState: 操作的状态。取值范围:
SUCCESS :表示操作成功
FAILED :表示操作失败
PROCESSING :表示操作中。
:type OperationState: str
:param StartTime: 开始时间。
:type StartTime: str
:param Operator: 操作者的UIN。
:type Operator: str
:param Operation: 操作类型。取值范围:
CBS_OPERATION_ATTACH:挂载云硬盘
CBS_OPERATION_DETACH:解挂云硬盘
CBS_OPERATION_RENEW:续费
CBS_OPERATION_EXPAND:扩容
CBS_OPERATION_CREATE:创建
CBS_OPERATION_ISOLATE:隔离
CBS_OPERATION_MODIFY:修改云硬盘属性
ASP_OPERATION_BIND:关联定期快照策略
ASP_OPERATION_UNBIND:取消关联定期快照策略
:type Operation: str
:param EndTime: 结束时间。
:type EndTime: str
:param DiskId: 操作的云盘ID。
:type DiskId: str
"""
self.OperationState = None
self.StartTime = None
self.Operator = None
self.Operation = None
self.EndTime = None
self.DiskId = None
def _deserialize(self, params):
self.OperationState = params.get("OperationState")
self.StartTime = params.get("StartTime")
self.Operator = params.get("Operator")
self.Operation = params.get("Operation")
self.EndTime = params.get("EndTime")
self.DiskId = params.get("DiskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Filter(AbstractModel):
"""描述键值对过滤器,用于条件过滤查询。
"""
def __init__(self):
r"""
:param Values: 一个或者多个过滤值。
:type Values: list of str
:param Name: 过滤键的名称。
:type Name: str
"""
self.Values = None
self.Name = None
def _deserialize(self, params):
self.Values = params.get("Values")
self.Name = params.get("Name")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetSnapOverviewRequest(AbstractModel):
"""GetSnapOverview请求参数结构体
"""
class GetSnapOverviewResponse(AbstractModel):
"""GetSnapOverview返回参数结构体
"""
def __init__(self):
r"""
:param TotalSize: 用户快照总大小
:type TotalSize: float
:param RealTradeSize: 用户快照总大小(用于计费)
:type RealTradeSize: float
:param FreeQuota: 快照免费额度
:type FreeQuota: float
:param TotalNums: 快照总个数
:type TotalNums: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalSize = None
self.RealTradeSize = None
self.FreeQuota = None
self.TotalNums = None
self.RequestId = None
def _deserialize(self, params):
self.TotalSize = params.get("TotalSize")
self.RealTradeSize = params.get("RealTradeSize")
self.FreeQuota = params.get("FreeQuota")
self.TotalNums = params.get("TotalNums")
self.RequestId = params.get("RequestId")
class Image(AbstractModel):
"""镜像。
"""
def __init__(self):
r"""
:param ImageName: 镜像名称。
:type ImageName: str
:param ImageId: 镜像实例ID。
:type ImageId: str
"""
self.ImageName = None
self.ImageId = None
def _deserialize(self, params):
self.ImageName = params.get("ImageName")
self.ImageId = params.get("ImageId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InitializeDisksRequest(AbstractModel):
"""InitializeDisks请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 待重新初始化的云硬盘ID列表, 单次初始化限制20块以内
:type DiskIds: list of str
"""
self.DiskIds = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InitializeDisksResponse(AbstractModel):
"""InitializeDisks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class InquirePriceModifyDiskExtraPerformanceRequest(AbstractModel):
"""InquirePriceModifyDiskExtraPerformance请求参数结构体
"""
def __init__(self):
r"""
:param DiskId: 云硬盘ID, 通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskId: str
:param ThroughputPerformance: 额外购买的云硬盘性能值,单位MB/s。
:type ThroughputPerformance: int
"""
self.DiskId = None
self.ThroughputPerformance = None
def _deserialize(self, params):
self.DiskId = params.get("DiskId")
self.ThroughputPerformance = params.get("ThroughputPerformance")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InquirePriceModifyDiskExtraPerformanceResponse(AbstractModel):
"""InquirePriceModifyDiskExtraPerformance返回参数结构体
"""
def __init__(self):
r"""
:param DiskPrice: 描述了调整云盘额外性能时对应的价格。
:type DiskPrice: :class:`tencentcloud.cbs.v20170312.models.Price`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiskPrice = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DiskPrice") is not None:
self.DiskPrice = Price()
self.DiskPrice._deserialize(params.get("DiskPrice"))
self.RequestId = params.get("RequestId")
class InquiryPriceCreateDisksRequest(AbstractModel):
"""InquiryPriceCreateDisks请求参数结构体
"""
def __init__(self):
r"""
:param DiskType: 硬盘介质类型。取值范围:<br><li>CLOUD_BASIC:表示普通云硬盘<br><li>CLOUD_PREMIUM:表示高性能云硬盘<br><li>CLOUD_SSD:表示SSD云硬盘<br><li>CLOUD_HSSD:表示增强型SSD云硬盘<br><li>CLOUD_TSSD:表示极速型SSD云硬盘。
:type DiskType: str
:param DiskSize: 云硬盘大小,单位为GB。云盘大小取值范围参见云硬盘[产品分类](/document/product/362/2353)的说明。
:type DiskSize: int
:param DiskChargeType: 云硬盘计费类型。<br><li>PREPAID:预付费,即包年包月<br><li>POSTPAID_BY_HOUR:按小时后付费
:type DiskChargeType: str
:param DiskChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数指定包年包月云盘的购买时长、是否设置自动续费等属性。<br>创建预付费云盘该参数必传,创建按小时后付费云盘无需传该参数。
:type DiskChargePrepaid: :class:`tencentcloud.cbs.v20170312.models.DiskChargePrepaid`
:param DiskCount: 购买云盘的数量。不填则默认为1。
:type DiskCount: int
:param ProjectId: 云盘所属项目ID。
:type ProjectId: int
:param ThroughputPerformance: 额外购买的云硬盘性能值,单位MB/s。<br>目前仅支持增强型SSD云硬盘(CLOUD_HSSD)和极速型SSD云硬盘(CLOUD_TSSD)
:type ThroughputPerformance: int
"""
self.DiskType = None
self.DiskSize = None
self.DiskChargeType = None
self.DiskChargePrepaid = None
self.DiskCount = None
self.ProjectId = None
self.ThroughputPerformance = None
def _deserialize(self, params):
self.DiskType = params.get("DiskType")
self.DiskSize = params.get("DiskSize")
self.DiskChargeType = params.get("DiskChargeType")
if params.get("DiskChargePrepaid") is not None:
self.DiskChargePrepaid = DiskChargePrepaid()
self.DiskChargePrepaid._deserialize(params.get("DiskChargePrepaid"))
self.DiskCount = params.get("DiskCount")
self.ProjectId = params.get("ProjectId")
self.ThroughputPerformance = params.get("ThroughputPerformance")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InquiryPriceCreateDisksResponse(AbstractModel):
"""InquiryPriceCreateDisks返回参数结构体
"""
def __init__(self):
r"""
:param DiskPrice: 描述了新购云盘的价格。
:type DiskPrice: :class:`tencentcloud.cbs.v20170312.models.Price`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiskPrice = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DiskPrice") is not None:
self.DiskPrice = Price()
self.DiskPrice._deserialize(params.get("DiskPrice"))
self.RequestId = params.get("RequestId")
class InquiryPriceRenewDisksRequest(AbstractModel):
"""InquiryPriceRenewDisks请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 云硬盘ID, 通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskIds: list of str
:param DiskChargePrepaids: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月云盘的购买时长。如果在该参数中指定CurInstanceDeadline,则会按对齐到子机到期时间来续费。如果是批量续费询价,该参数与Disks参数一一对应,元素数量需保持一致。
:type DiskChargePrepaids: list of DiskChargePrepaid
:param NewDeadline: 指定云盘新的到期时间,形式如:2017-12-17 00:00:00。参数`NewDeadline`和`DiskChargePrepaids`是两种指定询价时长的方式,两者必传一个。
:type NewDeadline: str
:param ProjectId: 云盘所属项目ID。 如传入则仅用于鉴权。
:type ProjectId: int
"""
self.DiskIds = None
self.DiskChargePrepaids = None
self.NewDeadline = None
self.ProjectId = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
if params.get("DiskChargePrepaids") is not None:
self.DiskChargePrepaids = []
for item in params.get("DiskChargePrepaids"):
obj = DiskChargePrepaid()
obj._deserialize(item)
self.DiskChargePrepaids.append(obj)
self.NewDeadline = params.get("NewDeadline")
self.ProjectId = params.get("ProjectId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InquiryPriceRenewDisksResponse(AbstractModel):
"""InquiryPriceRenewDisks返回参数结构体
"""
def __init__(self):
r"""
:param DiskPrice: 描述了续费云盘的价格。
:type DiskPrice: :class:`tencentcloud.cbs.v20170312.models.PrepayPrice`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiskPrice = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DiskPrice") is not None:
self.DiskPrice = PrepayPrice()
self.DiskPrice._deserialize(params.get("DiskPrice"))
self.RequestId = params.get("RequestId")
class InquiryPriceResizeDiskRequest(AbstractModel):
"""InquiryPriceResizeDisk请求参数结构体
"""
def __init__(self):
r"""
:param DiskId: 云硬盘ID, 通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskId: str
:param DiskSize: 云硬盘扩容后的大小,单位为GB,不得小于当前云硬盘大小。云盘大小取值范围参见云硬盘[产品分类](/document/product/362/2353)的说明。
:type DiskSize: int
:param ProjectId: 云盘所属项目ID。 如传入则仅用于鉴权。
:type ProjectId: int
"""
self.DiskId = None
self.DiskSize = None
self.ProjectId = None
def _deserialize(self, params):
self.DiskId = params.get("DiskId")
self.DiskSize = params.get("DiskSize")
self.ProjectId = params.get("ProjectId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InquiryPriceResizeDiskResponse(AbstractModel):
"""InquiryPriceResizeDisk返回参数结构体
"""
def __init__(self):
r"""
:param DiskPrice: 描述了扩容云盘的价格。
:type DiskPrice: :class:`tencentcloud.cbs.v20170312.models.PrepayPrice`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiskPrice = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DiskPrice") is not None:
self.DiskPrice = PrepayPrice()
self.DiskPrice._deserialize(params.get("DiskPrice"))
self.RequestId = params.get("RequestId")
class ModifyAutoSnapshotPolicyAttributeRequest(AbstractModel):
"""ModifyAutoSnapshotPolicyAttribute请求参数结构体
"""
def __init__(self):
r"""
:param AutoSnapshotPolicyId: 定期快照策略ID。
:type AutoSnapshotPolicyId: str
:param Policy: 定期快照的执行策略。
:type Policy: list of Policy
:param AutoSnapshotPolicyName: 要创建的定期快照策略名。不传则默认为“未命名”。最大长度不能超60个字节。
:type AutoSnapshotPolicyName: str
:param IsActivated: 是否激活定期快照策略,FALSE表示未激活,TRUE表示激活,默认为TRUE。
:type IsActivated: bool
:param IsPermanent: 通过该定期快照策略创建的快照是否永久保留。FALSE表示非永久保留,TRUE表示永久保留,默认为FALSE。
:type IsPermanent: bool
:param RetentionDays: 通过该定期快照策略创建的快照保留天数,该参数不可与`IsPermanent`参数冲突,即若定期快照策略设置为永久保留,`RetentionDays`应置0。
:type RetentionDays: int
"""
self.AutoSnapshotPolicyId = None
self.Policy = None
self.AutoSnapshotPolicyName = None
self.IsActivated = None
self.IsPermanent = None
self.RetentionDays = None
def _deserialize(self, params):
self.AutoSnapshotPolicyId = params.get("AutoSnapshotPolicyId")
if params.get("Policy") is not None:
self.Policy = []
for item in params.get("Policy"):
obj = Policy()
obj._deserialize(item)
self.Policy.append(obj)
self.AutoSnapshotPolicyName = params.get("AutoSnapshotPolicyName")
self.IsActivated = params.get("IsActivated")
self.IsPermanent = params.get("IsPermanent")
self.RetentionDays = params.get("RetentionDays")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyAutoSnapshotPolicyAttributeResponse(AbstractModel):
"""ModifyAutoSnapshotPolicyAttribute返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDiskAttributesRequest(AbstractModel):
"""ModifyDiskAttributes请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 一个或多个待操作的云硬盘ID。如果传入多个云盘ID,仅支持所有云盘修改为同一属性。
:type DiskIds: list of str
:param ProjectId: 新的云硬盘项目ID,只支持修改弹性云盘的项目ID。通过[DescribeProject](/document/api/378/4400)接口查询可用项目及其ID。
:type ProjectId: int
:param DiskName: 新的云硬盘名称。
:type DiskName: str
:param Portable: 是否为弹性云盘,FALSE表示非弹性云盘,TRUE表示弹性云盘。仅支持非弹性云盘修改为弹性云盘。
:type Portable: bool
:param DeleteWithInstance: 成功挂载到云主机后该云硬盘是否随云主机销毁,TRUE表示随云主机销毁,FALSE表示不随云主机销毁。仅支持按量计费云硬盘数据盘。
:type DeleteWithInstance: bool
:param DiskType: 变更云盘类型时,可传入该参数,表示变更的目标类型,取值范围:<br><li>CLOUD_PREMIUM:表示高性能云硬盘<br><li>CLOUD_SSD:表示SSD云硬盘。<br>当前不支持批量变更类型,即传入DiskType时,DiskIds仅支持传入一块云盘;<br>变更云盘类型时不支持同时变更其他属性。
:type DiskType: str
"""
self.DiskIds = None
self.ProjectId = None
self.DiskName = None
self.Portable = None
self.DeleteWithInstance = None
self.DiskType = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
self.ProjectId = params.get("ProjectId")
self.DiskName = params.get("DiskName")
self.Portable = params.get("Portable")
self.DeleteWithInstance = params.get("DeleteWithInstance")
self.DiskType = params.get("DiskType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiskAttributesResponse(AbstractModel):
"""ModifyDiskAttributes返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDiskExtraPerformanceRequest(AbstractModel):
"""ModifyDiskExtraPerformance请求参数结构体
"""
def __init__(self):
r"""
:param DiskId: 需要创建快照的云硬盘ID,可通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskId: str
:param ThroughputPerformance: 额外购买的云硬盘性能值,单位MB/s。
:type ThroughputPerformance: int
"""
self.DiskId = None
self.ThroughputPerformance = None
def _deserialize(self, params):
self.DiskId = params.get("DiskId")
self.ThroughputPerformance = params.get("ThroughputPerformance")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiskExtraPerformanceResponse(AbstractModel):
"""ModifyDiskExtraPerformance返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDisksChargeTypeRequest(AbstractModel):
"""ModifyDisksChargeType请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 一个或多个待操作的云硬盘ID。每次请求批量云盘上限为100。
:type DiskIds: list of str
:param DiskChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。
:type DiskChargePrepaid: :class:`tencentcloud.cbs.v20170312.models.DiskChargePrepaid`
:param DiskChargePostpaid: 后付费模式
:type DiskChargePostpaid: bool
"""
self.DiskIds = None
self.DiskChargePrepaid = None
self.DiskChargePostpaid = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
if params.get("DiskChargePrepaid") is not None:
self.DiskChargePrepaid = DiskChargePrepaid()
self.DiskChargePrepaid._deserialize(params.get("DiskChargePrepaid"))
self.DiskChargePostpaid = params.get("DiskChargePostpaid")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDisksChargeTypeResponse(AbstractModel):
"""ModifyDisksChargeType返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDisksRenewFlagRequest(AbstractModel):
"""ModifyDisksRenewFlag请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 一个或多个待操作的云硬盘ID。
:type DiskIds: list of str
:param RenewFlag: 云盘的续费标识。取值范围:<br><li>NOTIFY_AND_AUTO_RENEW:通知过期且自动续费<br><li>NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费<br><li>DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费。
:type RenewFlag: str
"""
self.DiskIds = None
self.RenewFlag = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
self.RenewFlag = params.get("RenewFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDisksRenewFlagResponse(AbstractModel):
"""ModifyDisksRenewFlag返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySnapshotAttributeRequest(AbstractModel):
"""ModifySnapshotAttribute请求参数结构体
"""
def __init__(self):
r"""
:param SnapshotId: 快照ID, 可通过[DescribeSnapshots](/document/product/362/15647)查询。
:type SnapshotId: str
:param SnapshotName: 新的快照名称。最长为60个字符。
:type SnapshotName: str
:param IsPermanent: 快照的保留方式,FALSE表示非永久保留,TRUE表示永久保留。
:type IsPermanent: bool
:param Deadline: 快照的到期时间;设置好快照将会被同时设置为非永久保留方式;超过到期时间后快照将会被自动删除。
:type Deadline: str
"""
self.SnapshotId = None
self.SnapshotName = None
self.IsPermanent = None
self.Deadline = None
def _deserialize(self, params):
self.SnapshotId = params.get("SnapshotId")
self.SnapshotName = params.get("SnapshotName")
self.IsPermanent = params.get("IsPermanent")
self.Deadline = params.get("Deadline")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySnapshotAttributeResponse(AbstractModel):
"""ModifySnapshotAttribute返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySnapshotsSharePermissionRequest(AbstractModel):
"""ModifySnapshotsSharePermission请求参数结构体
"""
def __init__(self):
r"""
:param AccountIds: 接收分享快照的账号Id列表,array型参数的格式可以参考[API简介](https://cloud.tencent.com/document/api/213/568)。帐号ID不同于QQ号,查询用户帐号ID请查看[帐号信息](https://console.cloud.tencent.com/developer)中的帐号ID栏。
:type AccountIds: list of str
:param Permission: 操作,包括 SHARE,CANCEL。其中SHARE代表分享操作,CANCEL代表取消分享操作。
:type Permission: str
:param SnapshotIds: 快照ID, 可通过[DescribeSnapshots](https://cloud.tencent.com/document/api/362/15647)查询获取。
:type SnapshotIds: list of str
"""
self.AccountIds = None
self.Permission = None
self.SnapshotIds = None
def _deserialize(self, params):
self.AccountIds = params.get("AccountIds")
self.Permission = params.get("Permission")
self.SnapshotIds = params.get("SnapshotIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySnapshotsSharePermissionResponse(AbstractModel):
"""ModifySnapshotsSharePermission返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Placement(AbstractModel):
"""描述了实例的抽象位置,包括其所在的可用区,所属的项目,以及所属的独享集群的ID和名字。
"""
def __init__(self):
r"""
:param Zone: 云硬盘所属的[可用区](/document/product/213/15753#ZoneInfo)。该参数也可以通过调用 [DescribeZones](/document/product/213/15707) 的返回值中的Zone字段来获取。
:type Zone: str
:param CageId: 围笼Id。作为入参时,表示对指定的CageId的资源进行操作,可为空。 作为出参时,表示资源所属围笼ID,可为空。
注意:此字段可能返回 null,表示取不到有效值。
:type CageId: str
:param ProjectId: 实例所属项目ID。该参数可以通过调用 [DescribeProject](/document/api/378/4400) 的返回值中的 projectId 字段来获取。不填为默认项目。
:type ProjectId: int
:param CdcName: 独享集群名字。作为入参时,忽略。作为出参时,表示云硬盘所属的独享集群名,可为空。
注意:此字段可能返回 null,表示取不到有效值。
:type CdcName: str
:param CdcId: 实例所属的独享集群ID。作为入参时,表示对指定的CdcId独享集群的资源进行操作,可为空。 作为出参时,表示资源所属的独享集群的ID,可为空。
注意:此字段可能返回 null,表示取不到有效值。
:type CdcId: str
:param DedicatedClusterId: 独享集群id。
:type DedicatedClusterId: str
"""
self.Zone = None
self.CageId = None
self.ProjectId = None
self.CdcName = None
self.CdcId = None
self.DedicatedClusterId = None
def _deserialize(self, params):
self.Zone = params.get("Zone")
self.CageId = params.get("CageId")
self.ProjectId = params.get("ProjectId")
self.CdcName = params.get("CdcName")
self.CdcId = params.get("CdcId")
self.DedicatedClusterId = params.get("DedicatedClusterId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Policy(AbstractModel):
"""描述了定期快照的执行策略。可理解为在DayOfWeek指定的那几天中,在Hour指定的小时执行该条定期快照策略。
"""
def __init__(self):
r"""
:param DayOfWeek: 指定每周从周一到周日需要触发定期快照的日期,取值范围:[0, 6]。0表示周日触发,1-6分别表示周一至周六。
:type DayOfWeek: list of int non-negative
:param Hour: 指定定期快照策略的触发时间。单位为小时,取值范围:[0, 23]。00:00 ~ 23:00 共 24 个时间点可选,1表示 01:00,依此类推。
:type Hour: list of int non-negative
"""
self.DayOfWeek = None
self.Hour = None
def _deserialize(self, params):
self.DayOfWeek = params.get("DayOfWeek")
self.Hour = params.get("Hour")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PrepayPrice(AbstractModel):
"""预付费订单的费用。
"""
def __init__(self):
r"""
:param OriginalPrice: 预付费云盘或快照预支费用的原价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalPrice: float
:param DiscountPrice: 预付费云盘或快照预支费用的折扣价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountPrice: float
:param OriginalPriceHigh: 高精度预付费云盘或快照预支费用的原价,单位:元
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalPriceHigh: str
:param DiscountPriceHigh: 高精度预付费云盘或快照预支费用的折扣价,单位:元
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountPriceHigh: str
:param UnitPrice: 后付费云盘原单价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPrice: float
:param ChargeUnit: 后付费云盘的计价单元,取值范围:<br><li>HOUR:表示后付费云盘的计价单元是按小时计算。
注意:此字段可能返回 null,表示取不到有效值。
:type ChargeUnit: str
:param UnitPriceDiscount: 后付费云盘折扣单价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPriceDiscount: float
:param UnitPriceHigh: 高精度后付费云盘原单价, 单位:元
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPriceHigh: str
:param UnitPriceDiscountHigh: 高精度后付费云盘折扣单价, 单位:元
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPriceDiscountHigh: str
"""
self.OriginalPrice = None
self.DiscountPrice = None
self.OriginalPriceHigh = None
self.DiscountPriceHigh = None
self.UnitPrice = None
self.ChargeUnit = None
self.UnitPriceDiscount = None
self.UnitPriceHigh = None
self.UnitPriceDiscountHigh = None
def _deserialize(self, params):
self.OriginalPrice = params.get("OriginalPrice")
self.DiscountPrice = params.get("DiscountPrice")
self.OriginalPriceHigh = params.get("OriginalPriceHigh")
self.DiscountPriceHigh = params.get("DiscountPriceHigh")
self.UnitPrice = params.get("UnitPrice")
self.ChargeUnit = params.get("ChargeUnit")
self.UnitPriceDiscount = params.get("UnitPriceDiscount")
self.UnitPriceHigh = params.get("UnitPriceHigh")
self.UnitPriceDiscountHigh = params.get("UnitPriceDiscountHigh")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Price(AbstractModel):
"""描述预付费或后付费云盘的价格。
"""
def __init__(self):
r"""
:param OriginalPrice: 预付费云盘预支费用的原价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalPrice: float
:param DiscountPrice: 预付费云盘预支费用的折扣价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountPrice: float
:param UnitPrice: 后付费云盘原单价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPrice: float
:param ChargeUnit: 后付费云盘的计价单元,取值范围:<br><li>HOUR:表示后付费云盘的计价单元是按小时计算。
注意:此字段可能返回 null,表示取不到有效值。
:type ChargeUnit: str
:param UnitPriceDiscount: 后付费云盘折扣单价,单位:元。
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPriceDiscount: float
:param OriginalPriceHigh: 高精度预付费云盘预支费用的原价, 单位:元 。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalPriceHigh: str
:param DiscountPriceHigh: 高精度预付费云盘预支费用的折扣价, 单位:元
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountPriceHigh: str
:param UnitPriceHigh: 高精度后付费云盘原单价, 单位:元
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPriceHigh: str
:param UnitPriceDiscountHigh: 高精度后付费云盘折扣单价, 单位:元
注意:此字段可能返回 null,表示取不到有效值。
:type UnitPriceDiscountHigh: str
"""
self.OriginalPrice = None
self.DiscountPrice = None
self.UnitPrice = None
self.ChargeUnit = None
self.UnitPriceDiscount = None
self.OriginalPriceHigh = None
self.DiscountPriceHigh = None
self.UnitPriceHigh = None
self.UnitPriceDiscountHigh = None
def _deserialize(self, params):
self.OriginalPrice = params.get("OriginalPrice")
self.DiscountPrice = params.get("DiscountPrice")
self.UnitPrice = params.get("UnitPrice")
self.ChargeUnit = params.get("ChargeUnit")
self.UnitPriceDiscount = params.get("UnitPriceDiscount")
self.OriginalPriceHigh = params.get("OriginalPriceHigh")
self.DiscountPriceHigh = params.get("DiscountPriceHigh")
self.UnitPriceHigh = params.get("UnitPriceHigh")
self.UnitPriceDiscountHigh = params.get("UnitPriceDiscountHigh")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RenewDiskRequest(AbstractModel):
"""RenewDisk请求参数结构体
"""
def __init__(self):
r"""
:param DiskChargePrepaid: 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月云盘的续费时长。<br>在云盘与挂载的实例一起续费的场景下,可以指定参数CurInstanceDeadline,此时云盘会按对齐到实例续费后的到期时间来续费。
:type DiskChargePrepaid: :class:`tencentcloud.cbs.v20170312.models.DiskChargePrepaid`
:param DiskId: 云硬盘ID, 通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskId: str
"""
self.DiskChargePrepaid = None
self.DiskId = None
def _deserialize(self, params):
if params.get("DiskChargePrepaid") is not None:
self.DiskChargePrepaid = DiskChargePrepaid()
self.DiskChargePrepaid._deserialize(params.get("DiskChargePrepaid"))
self.DiskId = params.get("DiskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RenewDiskResponse(AbstractModel):
"""RenewDisk返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResizeDiskRequest(AbstractModel):
"""ResizeDisk请求参数结构体
"""
def __init__(self):
r"""
:param DiskId: 云硬盘ID, 通过[DescribeDisks](/document/product/362/16315)接口查询。
:type DiskId: str
:param DiskSize: 云硬盘扩容后的大小,单位为GB,必须大于当前云硬盘大小。云盘大小取值范围参见云硬盘[产品分类](/document/product/362/2353)的说明。
:type DiskSize: int
"""
self.DiskId = None
self.DiskSize = None
def _deserialize(self, params):
self.DiskId = params.get("DiskId")
self.DiskSize = params.get("DiskSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ResizeDiskResponse(AbstractModel):
"""ResizeDisk返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class SharePermission(AbstractModel):
"""快照分享信息集合
"""
def __init__(self):
r"""
:param CreatedTime: 快照分享的时间
:type CreatedTime: str
:param AccountId: 分享的账号Id
:type AccountId: str
"""
self.CreatedTime = None
self.AccountId = None
def _deserialize(self, params):
self.CreatedTime = params.get("CreatedTime")
self.AccountId = params.get("AccountId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Snapshot(AbstractModel):
"""描述了快照的详细信息
"""
def __init__(self):
r"""
:param Placement: 快照所在的位置。
:type Placement: :class:`tencentcloud.cbs.v20170312.models.Placement`
:param CopyFromRemote: 是否为跨地域复制的快照。取值范围:<br><li>true:表示为跨地域复制的快照。<br><li>false:本地域的快照。
:type CopyFromRemote: bool
:param SnapshotState: 快照的状态。取值范围:<br><li>NORMAL:正常<br><li>CREATING:创建中<br><li>ROLLBACKING:回滚中<br><li>COPYING_FROM_REMOTE:跨地域复制中<br><li>CHECKING_COPIED:复制校验中<br><li>TORECYCLE:待回收。
:type SnapshotState: str
:param IsPermanent: 是否为永久快照。取值范围:<br><li>true:永久快照<br><li>false:非永久快照。
:type IsPermanent: bool
:param SnapshotName: 快照名称,用户自定义的快照别名。调用[ModifySnapshotAttribute](/document/product/362/15650)可修改此字段。
:type SnapshotName: str
:param DeadlineTime: 快照到期时间。如果快照为永久保留,此字段为空。
:type DeadlineTime: str
:param Percent: 快照创建进度百分比,快照创建成功后此字段恒为100。
:type Percent: int
:param Images: 快照关联的镜像列表。
:type Images: list of Image
:param ShareReference: 快照当前被共享数。
:type ShareReference: int
:param SnapshotType: 快照类型,目前该项取值可以为PRIVATE_SNAPSHOT或者SHARED_SNAPSHOT
:type SnapshotType: str
:param DiskSize: 创建此快照的云硬盘大小,单位GB。
:type DiskSize: int
:param DiskId: 创建此快照的云硬盘ID。
:type DiskId: str
:param CopyingToRegions: 快照正在跨地域复制的目的地域,默认取值为[]。
:type CopyingToRegions: list of str
:param Encrypt: 是否为加密盘创建的快照。取值范围:<br><li>true:该快照为加密盘创建的<br><li>false:非加密盘创建的快照。
:type Encrypt: bool
:param CreateTime: 快照的创建时间。
:type CreateTime: str
:param ImageCount: 快照关联的镜像个数。
:type ImageCount: int
:param DiskUsage: 创建此快照的云硬盘类型。取值范围:<br><li>SYSTEM_DISK:系统盘<br><li>DATA_DISK:数据盘。
:type DiskUsage: str
:param SnapshotId: 快照ID。
:type SnapshotId: str
:param TimeStartShare: 快照开始共享的时间。
:type TimeStartShare: str
"""
self.Placement = None
self.CopyFromRemote = None
self.SnapshotState = None
self.IsPermanent = None
self.SnapshotName = None
self.DeadlineTime = None
self.Percent = None
self.Images = None
self.ShareReference = None
self.SnapshotType = None
self.DiskSize = None
self.DiskId = None
self.CopyingToRegions = None
self.Encrypt = None
self.CreateTime = None
self.ImageCount = None
self.DiskUsage = None
self.SnapshotId = None
self.TimeStartShare = None
def _deserialize(self, params):
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.CopyFromRemote = params.get("CopyFromRemote")
self.SnapshotState = params.get("SnapshotState")
self.IsPermanent = params.get("IsPermanent")
self.SnapshotName = params.get("SnapshotName")
self.DeadlineTime = params.get("DeadlineTime")
self.Percent = params.get("Percent")
if params.get("Images") is not None:
self.Images = []
for item in params.get("Images"):
obj = Image()
obj._deserialize(item)
self.Images.append(obj)
self.ShareReference = params.get("ShareReference")
self.SnapshotType = params.get("SnapshotType")
self.DiskSize = params.get("DiskSize")
self.DiskId = params.get("DiskId")
self.CopyingToRegions = params.get("CopyingToRegions")
self.Encrypt = params.get("Encrypt")
self.CreateTime = params.get("CreateTime")
self.ImageCount = params.get("ImageCount")
self.DiskUsage = params.get("DiskUsage")
self.SnapshotId = params.get("SnapshotId")
self.TimeStartShare = params.get("TimeStartShare")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SnapshotOperationLog(AbstractModel):
"""快照操作日志。
"""
def __init__(self):
r"""
:param Operator: 操作者的UIN。
注意:此字段可能返回 null,表示取不到有效值。
:type Operator: str
:param Operation: 操作类型。取值范围:
SNAP_OPERATION_DELETE:删除快照
SNAP_OPERATION_ROLLBACK:回滚快照
SNAP_OPERATION_MODIFY:修改快照属性
SNAP_OPERATION_CREATE:创建快照
SNAP_OPERATION_COPY:跨地域复制快照
ASP_OPERATION_CREATE_SNAP:由定期快照策略创建快照
ASP_OPERATION_DELETE_SNAP:由定期快照策略删除快照
:type Operation: str
:param SnapshotId: 操作的快照ID。
:type SnapshotId: str
:param OperationState: 操作的状态。取值范围:
SUCCESS :表示操作成功
FAILED :表示操作失败
PROCESSING :表示操作中。
:type OperationState: str
:param StartTime: 开始时间。
:type StartTime: str
:param EndTime: 结束时间。
:type EndTime: str
"""
self.Operator = None
self.Operation = None
self.SnapshotId = None
self.OperationState = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.Operator = params.get("Operator")
self.Operation = params.get("Operation")
self.SnapshotId = params.get("SnapshotId")
self.OperationState = params.get("OperationState")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Tag(AbstractModel):
"""标签。
"""
def __init__(self):
r"""
:param Key: 标签健。
:type Key: str
:param Value: 标签值。
:type Value: str
"""
self.Key = None
self.Value = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TerminateDisksRequest(AbstractModel):
"""TerminateDisks请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 需退还的云盘ID列表。
:type DiskIds: list of str
:param DeleteSnapshot: 销毁云盘时删除关联的非永久保留快照。0 表示非永久快照不随云盘销毁而销毁,1表示非永久快照随云盘销毁而销毁,默认取0。快照是否永久保留可以通过DescribeSnapshots接口返回的快照详情的IsPermanent字段来判断,true表示永久快照,false表示非永久快照。
:type DeleteSnapshot: int
"""
self.DiskIds = None
self.DeleteSnapshot = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
self.DeleteSnapshot = params.get("DeleteSnapshot")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TerminateDisksResponse(AbstractModel):
"""TerminateDisks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class UnbindAutoSnapshotPolicyRequest(AbstractModel):
"""UnbindAutoSnapshotPolicy请求参数结构体
"""
def __init__(self):
r"""
:param DiskIds: 要解绑定期快照策略的云盘ID列表。
:type DiskIds: list of str
:param AutoSnapshotPolicyId: 要解绑的定期快照策略ID。
:type AutoSnapshotPolicyId: str
"""
self.DiskIds = None
self.AutoSnapshotPolicyId = None
def _deserialize(self, params):
self.DiskIds = params.get("DiskIds")
self.AutoSnapshotPolicyId = params.get("AutoSnapshotPolicyId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UnbindAutoSnapshotPolicyResponse(AbstractModel):
"""UnbindAutoSnapshotPolicy返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
|
[
"colorguitar@hotmail.com"
] |
colorguitar@hotmail.com
|
283c6b43e1fb09e472f80084576279baec25468a
|
dbd1dd2b00a3c3cb25f7dd4faf08c3fc35aae7bf
|
/leetcode/add-one-row-to-tree.py
|
6d765a9da7de47530ff7f21eacfe56ba1e0aa231
|
[
"MIT"
] |
permissive
|
hg-pyun/algorithm
|
2fa4260e96fbef1c23daf2c330db1e863401ea85
|
305100c9e9a09ee08082a1798e2599f2d4d3ebad
|
refs/heads/master
| 2023-08-03T12:52:00.362607
| 2023-07-25T13:24:52
| 2023-07-25T13:24:52
| 148,906,547
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def addOneRow(self, root: TreeNode, v: int, d: int) -> TreeNode:
if d == 1:
return TreeNode(v, root)
def traversal(node, depth, prev, direct):
if depth == d:
new = TreeNode(v)
if direct == 'left':
new.left = node
prev.left = new
elif direct == 'right':
new.right = node
prev.right = new
if node is None:
return
traversal(node.left, depth + 1, node, 'left')
traversal(node.right, depth + 1, node, 'right')
traversal(root, 1, None, None)
return root
|
[
"noreply@github.com"
] |
hg-pyun.noreply@github.com
|
823e3eb060b99776cbbc0b7143e98b3dadd30ec0
|
064a8e3d4e2a31b322dff757daf11408241a0974
|
/webeloper2021/wsgi.py
|
658ddeb56dda47b66f410e7cb331b88be208ac11
|
[] |
no_license
|
mohammadsedehi78/mosabeghe
|
f556334875986d33322e56217c86b16a6e4602e9
|
6ea5083894ba80041793c015641d1738ec3bbcca
|
refs/heads/master
| 2023-03-18T08:59:10.344490
| 2021-03-05T11:09:29
| 2021-03-05T11:09:29
| 344,783,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
WSGI config for webeloper2021 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webeloper2021.settings')
application = get_wsgi_application()
|
[
"mohammad.sedehi78@gmail.com"
] |
mohammad.sedehi78@gmail.com
|
6bbae3d5e5fcfab52cf056ce10cfcd8242cb6d43
|
f569978afb27e72bf6a88438aa622b8c50cbc61b
|
/douyin_open/EnterpriseGrouponGrouponCommonGrouponEvent/models/order_info.py
|
1c155ea165949da316be534caa2cc9ea5ce8beb2
|
[] |
no_license
|
strangebank/swagger-petstore-perl
|
4834409d6225b8a09b8195128d74a9b10ef1484a
|
49dfc229e2e897cdb15cbf969121713162154f28
|
refs/heads/master
| 2023-01-05T10:21:33.518937
| 2020-11-05T04:33:16
| 2020-11-05T04:33:16
| 310,189,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,221
|
py
|
# coding: utf-8
"""
团购活动事件回调
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OrderInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'order_id': 'str',
'channel': 'str',
'buyer_open_id': 'str',
'original_amount': 'int',
'amount': 'int',
'refund_amount': 'int',
'settle_amount': 'int',
'item_count': 'int',
'status': 'int',
'create_time': 'int',
'pay_time': 'int',
'refund_time': 'int',
'codes': 'list[CodeInfo]'
}
attribute_map = {
'order_id': 'order_id',
'channel': 'channel',
'buyer_open_id': 'buyer_open_id',
'original_amount': 'original_amount',
'amount': 'amount',
'refund_amount': 'refund_amount',
'settle_amount': 'settle_amount',
'item_count': 'item_count',
'status': 'status',
'create_time': 'create_time',
'pay_time': 'pay_time',
'refund_time': 'refund_time',
'codes': 'codes'
}
def __init__(self, order_id=None, channel=None, buyer_open_id=None, original_amount=None, amount=None, refund_amount=None, settle_amount=None, item_count=None, status=None, create_time=None, pay_time=None, refund_time=None, codes=None): # noqa: E501
"""OrderInfo - a model defined in Swagger""" # noqa: E501
self._order_id = None
self._channel = None
self._buyer_open_id = None
self._original_amount = None
self._amount = None
self._refund_amount = None
self._settle_amount = None
self._item_count = None
self._status = None
self._create_time = None
self._pay_time = None
self._refund_time = None
self._codes = None
self.discriminator = None
self.order_id = order_id
self.channel = channel
self.buyer_open_id = buyer_open_id
self.original_amount = original_amount
self.amount = amount
self.refund_amount = refund_amount
self.settle_amount = settle_amount
self.item_count = item_count
self.status = status
self.create_time = create_time
self.pay_time = pay_time
if refund_time is not None:
self.refund_time = refund_time
self.codes = codes
@property
def order_id(self):
"""Gets the order_id of this OrderInfo. # noqa: E501
团购活动订单Id # noqa: E501
:return: The order_id of this OrderInfo. # noqa: E501
:rtype: str
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""Sets the order_id of this OrderInfo.
团购活动订单Id # noqa: E501
:param order_id: The order_id of this OrderInfo. # noqa: E501
:type: str
"""
if order_id is None:
raise ValueError("Invalid value for `order_id`, must not be `None`") # noqa: E501
self._order_id = order_id
@property
def channel(self):
"""Gets the channel of this OrderInfo. # noqa: E501
订单来源 # noqa: E501
:return: The channel of this OrderInfo. # noqa: E501
:rtype: str
"""
return self._channel
@channel.setter
def channel(self, channel):
"""Sets the channel of this OrderInfo.
订单来源 # noqa: E501
:param channel: The channel of this OrderInfo. # noqa: E501
:type: str
"""
if channel is None:
raise ValueError("Invalid value for `channel`, must not be `None`") # noqa: E501
self._channel = channel
@property
def buyer_open_id(self):
"""Gets the buyer_open_id of this OrderInfo. # noqa: E501
买家的open_id # noqa: E501
:return: The buyer_open_id of this OrderInfo. # noqa: E501
:rtype: str
"""
return self._buyer_open_id
@buyer_open_id.setter
def buyer_open_id(self, buyer_open_id):
"""Sets the buyer_open_id of this OrderInfo.
买家的open_id # noqa: E501
:param buyer_open_id: The buyer_open_id of this OrderInfo. # noqa: E501
:type: str
"""
if buyer_open_id is None:
raise ValueError("Invalid value for `buyer_open_id`, must not be `None`") # noqa: E501
self._buyer_open_id = buyer_open_id
@property
def original_amount(self):
"""Gets the original_amount of this OrderInfo. # noqa: E501
原价,单位分 # noqa: E501
:return: The original_amount of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._original_amount
@original_amount.setter
def original_amount(self, original_amount):
"""Sets the original_amount of this OrderInfo.
原价,单位分 # noqa: E501
:param original_amount: The original_amount of this OrderInfo. # noqa: E501
:type: int
"""
if original_amount is None:
raise ValueError("Invalid value for `original_amount`, must not be `None`") # noqa: E501
self._original_amount = original_amount
@property
def amount(self):
"""Gets the amount of this OrderInfo. # noqa: E501
订单金额,单位分 # noqa: E501
:return: The amount of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this OrderInfo.
订单金额,单位分 # noqa: E501
:param amount: The amount of this OrderInfo. # noqa: E501
:type: int
"""
if amount is None:
raise ValueError("Invalid value for `amount`, must not be `None`") # noqa: E501
self._amount = amount
@property
def refund_amount(self):
"""Gets the refund_amount of this OrderInfo. # noqa: E501
退款金额,单位分 # noqa: E501
:return: The refund_amount of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._refund_amount
@refund_amount.setter
def refund_amount(self, refund_amount):
"""Sets the refund_amount of this OrderInfo.
退款金额,单位分 # noqa: E501
:param refund_amount: The refund_amount of this OrderInfo. # noqa: E501
:type: int
"""
if refund_amount is None:
raise ValueError("Invalid value for `refund_amount`, must not be `None`") # noqa: E501
self._refund_amount = refund_amount
@property
def settle_amount(self):
"""Gets the settle_amount of this OrderInfo. # noqa: E501
结算金额,单位分 # noqa: E501
:return: The settle_amount of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._settle_amount
@settle_amount.setter
def settle_amount(self, settle_amount):
"""Sets the settle_amount of this OrderInfo.
结算金额,单位分 # noqa: E501
:param settle_amount: The settle_amount of this OrderInfo. # noqa: E501
:type: int
"""
if settle_amount is None:
raise ValueError("Invalid value for `settle_amount`, must not be `None`") # noqa: E501
self._settle_amount = settle_amount
@property
def item_count(self):
"""Gets the item_count of this OrderInfo. # noqa: E501
团购券个数 # noqa: E501
:return: The item_count of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._item_count
@item_count.setter
def item_count(self, item_count):
"""Sets the item_count of this OrderInfo.
团购券个数 # noqa: E501
:param item_count: The item_count of this OrderInfo. # noqa: E501
:type: int
"""
if item_count is None:
raise ValueError("Invalid value for `item_count`, must not be `None`") # noqa: E501
self._item_count = item_count
@property
def status(self):
"""Gets the status of this OrderInfo. # noqa: E501
* 订单状态 * 1: 订单完成 * 101: 支付完成 * 200: 发起核销 * 201: 核销完成 * 202: 核销失败 * 299: 用户申请退款 * 300: 商户发起退款 * 301: 退款成功 * 302: 退款失败 # noqa: E501
:return: The status of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this OrderInfo.
* 订单状态 * 1: 订单完成 * 101: 支付完成 * 200: 发起核销 * 201: 核销完成 * 202: 核销失败 * 299: 用户申请退款 * 300: 商户发起退款 * 301: 退款成功 * 302: 退款失败 # noqa: E501
:param status: The status of this OrderInfo. # noqa: E501
:type: int
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = [1, 101, 200, 201, 202, 299, 300, 301, 302, ""] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def create_time(self):
"""Gets the create_time of this OrderInfo. # noqa: E501
订单创建时间 unix time # noqa: E501
:return: The create_time of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this OrderInfo.
订单创建时间 unix time # noqa: E501
:param create_time: The create_time of this OrderInfo. # noqa: E501
:type: int
"""
if create_time is None:
raise ValueError("Invalid value for `create_time`, must not be `None`") # noqa: E501
self._create_time = create_time
@property
def pay_time(self):
"""Gets the pay_time of this OrderInfo. # noqa: E501
订单支付时间 unix time # noqa: E501
:return: The pay_time of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._pay_time
@pay_time.setter
def pay_time(self, pay_time):
"""Sets the pay_time of this OrderInfo.
订单支付时间 unix time # noqa: E501
:param pay_time: The pay_time of this OrderInfo. # noqa: E501
:type: int
"""
if pay_time is None:
raise ValueError("Invalid value for `pay_time`, must not be `None`") # noqa: E501
self._pay_time = pay_time
@property
def refund_time(self):
"""Gets the refund_time of this OrderInfo. # noqa: E501
退款完成时间 unix time # noqa: E501
:return: The refund_time of this OrderInfo. # noqa: E501
:rtype: int
"""
return self._refund_time
@refund_time.setter
def refund_time(self, refund_time):
"""Sets the refund_time of this OrderInfo.
退款完成时间 unix time # noqa: E501
:param refund_time: The refund_time of this OrderInfo. # noqa: E501
:type: int
"""
self._refund_time = refund_time
@property
def codes(self):
"""Gets the codes of this OrderInfo. # noqa: E501
团购券码列表 # noqa: E501
:return: The codes of this OrderInfo. # noqa: E501
:rtype: list[CodeInfo]
"""
return self._codes
@codes.setter
def codes(self, codes):
"""Sets the codes of this OrderInfo.
团购券码列表 # noqa: E501
:param codes: The codes of this OrderInfo. # noqa: E501
:type: list[CodeInfo]
"""
if codes is None:
raise ValueError("Invalid value for `codes`, must not be `None`") # noqa: E501
self._codes = codes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OrderInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"strangebank@gmail.com"
] |
strangebank@gmail.com
|
91532410123870d4d3c1bb39763e33fc807220b4
|
931b39931325cfc51494e80439355b8a8a03ab1e
|
/parameter_fitting/FRET/compute_Jacobian_mkVI.py
|
d45ae61a0c94ae0cc439e344fe37a81d671824b1
|
[] |
no_license
|
ajkluber/project_tools
|
908fca3babd173eb22a1c2d28cbb930137c5a4a8
|
5ebbbcb10b3d8efe555e981eba6c5a401db80884
|
refs/heads/master
| 2021-06-06T15:04:29.394560
| 2017-09-08T20:10:54
| 2017-09-08T20:10:54
| 15,809,249
| 1
| 1
| null | 2014-11-13T22:11:32
| 2014-01-10T20:48:50
|
Python
|
UTF-8
|
Python
| false
| false
| 11,351
|
py
|
""" Compute Jacobian for matching a distance distribution
Description:
This module computes the jacobian of a distance distribution
such as measured with FRET.
note: as of now, only compute distances for FRET is updated
last updated: Justin Chen, May 05, 2015
"""
import numpy as np
import os
import time
import argparse
try:
import mdtraj as md
except:
pass
import model_builder as mdb
import scipy.stats as stats
from project_tools.parameter_fitting.util.util import *
global GAS_CONSTANT_KJ_MOL
GAS_CONSTANT_KJ_MOL = 0.0083144621
def_FRET_pairs = [[114,192]]
defspacing = 0.1 ## in nm
def_forster_radius = 5.1 ## in nm
def find_sim_bins(savelocation, FRETeff, fit_temp, residues=def_FRET_pairs, spacing=defspacing, weights=None):
"""find_sim_bins calculates and writes the simulation files """
##assumes nothing about where you are, but assumes the savelocation is specified correctly
print "Calculating Simulation FRET bins"
cwd = os.getcwd()
if not os.path.isdir(savelocation):
os.mkdir(savelocation)
os.chdir(savelocation)
#calcualte the parameters for binning
maxvalue = int(np.amax(FRETeff)/spacing) + 1
minvalue = int(np.amin(FRETeff)/spacing)
num_bins = maxvalue - minvalue
ran_size = (minvalue*spacing,maxvalue*spacing)
#if not weighted, set weights to ones
if weights == None:
weights = np.ones(np.shape(FRETeff)[0])
#actually histogram it
print "***************************"
print np.shape(FRETeff)
print np.shape(residues)
print "***************************"
hist, edges, slices = stats.binned_statistic(FRETeff, weights, statistic="sum", range=[ran_size], bins=num_bins)
hist = hist/(np.sum(hist)*spacing)
bincenters = 0.5 * (edges[1:] + edges[:-1])
print "Making list of values:"
#actually save it
np.savetxt("simf_valuesT%d-P%d-%d.dat"%(fit_temp, residues[0], residues[1]),hist)
np.savetxt("simf_edgesT%d-P%d-%d.dat"%(fit_temp, residues[0], residues[1]),edges)
np.savetxt("simf-paramsT%d-P%d-%d.dat"%(fit_temp, residues[0], residues[1]),np.array([num_bins,minvalue*spacing,maxvalue*spacing,spacing]))
os.chdir(cwd)
print "Calculated bins for simulation data at a spacing of %.4f" % spacing
return hist, slices
def fret_hist_calc(model, fitopts):
fit_temp = fitopts["t_fit"]
##read trace file from
cwd = os.getcwd()
subdir = model.name
#load iteration number
iteration = fitopts["iteration"]
#load fret pairs and format correctly
fret_pairs = fitopts["fret_pairs"]
FRET_pairs = np.array(fret_pairs) - 1
sub = "%s/%s/iteration_%d" % (cwd,subdir,iteration)
subdirec = "%s/fitting_%d" % (sub,iteration)
FRETfile = "%s/FRET_hist.dat" % subdirec
if not "fretdata" in fitopts:
FRETtracefile = "%s/FRET_trace.dat" % cwd
else:
FRETtracefile = fitopts["fretdata"]
print "FRETtracefile is:"
print FRETtracefile
for i in range(np.shape(FRET_pairs)[0]):
residues = FRET_pairs[i,:]
ftrace = np.loadtxt(FRETtracefile[i])
edge_file = "%s/simf_edgesT%d-P%d-%d.dat"%(subdirec, fit_temp, residues[0], residues[1])
edges = np.loadtxt(edge_file)
hist, edges = np.histogram(ftrace, bins=edges, normed=True)
bincenters = (edges[:-1] + edges[1:]) / 2
datas = np.array([bincenters,hist])
datas = np.transpose(datas)
if i == 0:
fret_total = datas
else:
fret_total = np.append(fret_total, datas, axis=0)
np.savetxt(FRETfile, fret_total)
print "Binned FRET_hist_calc Data"
def check_exp_data(FRETdata, bin_centers):
#if correct within this marigin, then thinks its okay
#Will check that the FRETdata centers and bin)centers are within 10^-6 of the spacing
terms = np.shape(FRETdata)[0]
i = 0
spacing_FRET = FRETdata[1] - FRETdata[0]
spacing_bin_centers = bin_centers[1] - bin_centers[0]
min_difference = (min([spacing_FRET, spacing_bin_centers]))/1000000 #if correct within this marigin, then thinks its okay
recalc = not np.shape(FRETdata)[0] == np.shape(bin_centers)[0]
##Verify that the bins line up
while (not recalc) and i<terms:
if not (FRETdata[i] - bin_centers[i]) < min_difference:
recalc = True
i += 1
return recalc
def add_error_log(note, fit_temp):
errfile = "error_log-JC.txt"
if not os.path.isfile(errfile):
f = open("error_log-JC.txt","w")
f.write("Error Log for This run\n\n")
f.write("Global variables are:\n")
f.write("Gas constant in kJ per mol = %d\n" % GAS_CONSTANT_KJ_MOL)
f.write("pairs used are = " + str(def_FRET_pairs) + "\n")
f.write("Temperature for Fitting used is T = %d\n" % fit_temp)
f.write("Spacing in FRET pair distance used is = %d\n" %defspacing)
f.write("\n")
f.write(note)
f.write("\n")
f.close()
else:
f = f = open("error_log-JC.txt","a")
f.write("\n")
f.write(note)
f.write("\n")
f.close()
print "ERROR: CHECK LOG \n %s" % note
def get_target_feature(model,fitopts):
""" Get target features """
fit_temp = fitopts["t_fit"]
cwd = os.getcwd()
subdir = model.name
iteration = fitopts["iteration"]
sub = "%s/%s/iteration_%d" % (cwd,subdir,iteration)
subdirec = "%s/fitting_%d" % (sub,iteration)
simfile = "%s/simf_centers%d.dat" % (subdirec,fit_temp)
FRETfile = "%s/FRET_hist.dat" % subdirec
fret_hist_calc(model, fitopts)
FRETdata = np.loadtxt(FRETfile)
print "initial FRET data and bin_centers"
print FRETdata
target = FRETdata[:,1]
target_err = target**0.5 ##for lack of a better way, take sqrt of bins for error estimate
return target, target_err
def compute_efficiency(FRETr, R0):
"""Convert a FRET distance trace to a FRET efficiency trace"""
eff = 1.0/(1.0+(FRETr/R0)**6)
print "FRET efficiencies: "
print eff
return eff
def calculate_average_Jacobian(model,fitopts, FRET_pairs=def_FRET_pairs, spacing=defspacing ):
""" Calculate the average feature vector (ddG's) and Jacobian """
if "t_fit" in fitopts:
fit_temp = fitopts["t_fit"]
else:
raise IOError("Missing the fit_temperature, please specify in .ini file")
if "fret_pairs" in fitopts:
fret_pairs = fitopts["fret_pairs"]
FRET_pairs = np.array(fret_pairs) - 1
print "The FRET pairs are:"
print FRET_pairs
if "y_shift" in fitopts:
y_shift = fitopts["y_shift"]
else:
y_shift = 0.0
fitopts["y_shift"] = 0.0
if "spacing" in fitopts:
spacing = fitopts["spacing"]
if "forster_radius" in fitopts:
forster_radius = fitopts["forster_radius"]
else:
forster_radius = def_forster_radius
##Define location of logical files
cwd = os.getcwd()
subdir = model.name
iteration = fitopts["iteration"]
sub = "%s/%s/iteration_%d" % (cwd,subdir,iteration)
traj_location = "%s/%d_0" % (sub, fit_temp)
sim_location = "%s/fitting_%d" % (sub,iteration)
##define location of logical files
os.chdir(traj_location)
## Get trajectory, state indicators, contact energy
print "Working on calculating model's trajectory and contact info"
traj,rij,qij = get_rij_Vp(model)
## Get simulation feature
print "Now working on calculating the trajectories"
beta = 1.0 / (GAS_CONSTANT_KJ_MOL*float(fit_temp))
FRETr = md.compute_distances(traj,FRET_pairs, periodic=False)
print "Computing Jacobian and Simparams for the temperature %d, with spacing %f" % (fit_temp, spacing)
for i in range(np.shape(FRET_pairs)[0]):
FRETr_use = FRETr[:,i] + y_shift
print "Shifted simulated FRET-distance data by a y_shift = %f" % y_shift
print FRETr_use
###CONVERT DISTANCE TO FRET EFFICIENCY
FRETeff = compute_efficiency(FRETr_use, forster_radius)
sim_feature, sim_slices = find_sim_bins(sim_location, FRETeff, fit_temp, residues=FRET_pairs[i,:], spacing=spacing, weights=None)
Jacobian = compute_Jacobian_basic(qij,sim_feature*spacing, sim_slices, beta)
Jacobian /= spacing
#store the sim_feature into a total array:
if i == 0:
sim_feature_all = sim_feature
Jacobian_all = Jacobian
else:
sim_feature_all = np.append(sim_feature_all, sim_feature)
Jacobian_all = np.append(Jacobian_all, Jacobian, axis=0)
#save the temperature this was done in
if not os.path.isdir("%s/newton"%sub):
os.mkdir("%s/newton"%sub)
f = open("%s/newton/temp-used-here.txt"%sub, "w")
f.write(str(fit_temp))
f.close()
os.chdir(cwd)
sim_feature_err = sim_feature_all ** 0.5
Jacobian_err = np.zeros(np.shape(Jacobian_all))
return sim_feature_all, sim_feature_err, Jacobian_all, Jacobian_err
def compute_Jacobian_basic(qij, fr, sim_slices, beta, weights=None):
""" Method for computing a Jacobian given only the rudimenary pieces necessary """
## qij is a NXM array containing the Qij values from the simulation
## fr is a RX1 array containing the normalized distributin f(r)
## Sim_slices is an NX1 array containing the bin_index+1 for the r matrix
## beta is the kbT for this particular Jacobian
## N = Number of frames, M = number of contacts to be fitted, R=number of bins of R data
## Note: assumes fr is already weighted!
nbins = np.shape(fr)[0]
(N_total_traj, npairs) = np.shape(qij)
if weights == None:
N_total_weight = N_total_traj
weights = np.ones(N_total_traj)
else:
if not np.shape(weights)[0] == N_total_traj:
raise IOError("Not every frame is weighted, aborting! Check to make sure weights is same length as trajectory")
N_total_weight = np.sum(weights)
Jacobian = np.zeros((nbins, npairs),float)
for idx, bin_location in enumerate(sim_slices):
Jacobian[bin_location-1, :] += qij[idx,:]*weights[idx]
Jacobian /= N_total_weight
Qavg = np.sum(Jacobian, axis=0)
avg_matrix = np.dot(np.array([fr]).transpose(), np.array([Qavg]))
print "The shape of these matrices are:"
print np.shape(avg_matrix)
print np.shape(Jacobian)
Jacobian -= avg_matrix
Jacobian *= (-1.0) * beta
return Jacobian
if __name__ == "__main__":
import model_builder as mdb
parser = argparse.ArgumentParser(description='Calculate .')
parser.add_argument('--name', type=str, required=True, help='name.')
parser.add_argument('--iteration', type=int, required=True, help='iteration.')
args = parser.parse_args()
name = args.name
iteration= args.iteration
pairs = np.loadtxt("%s/pairs.dat" % name,dtype=int)
defaults = True
model = mdb.models.SmogCalpha.SmogCalpha(name=name,pairs=pairs,defaults=defaults,iteration=iteration)
sim_feature_avg, sim_feature_err, Jacobian_avg, Jacobian_err = calculate_average_Jacobian(model)
|
[
"jc75@rice.edu"
] |
jc75@rice.edu
|
93fda853410026abf9e2436c1f5c95874f6cee15
|
f5963c8644391770a2fbc0aaf1af6d7545d533e2
|
/src/lib/trains/train_factory.py
|
21c6a4cef331461361faf667545a60224d3b9ab0
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
ajaichemmanam/CenterSeg
|
d33c181a952355bd6196e50b1901816ee3a5b15d
|
7a6d181961ae8a195d21c71d1fd68da36bcac70f
|
refs/heads/master
| 2022-12-24T19:01:55.609704
| 2020-09-16T06:14:01
| 2020-09-17T04:18:12
| 267,252,926
| 13
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .ctdet import CtdetTrainer
from .ddd import DddTrainer
from .exdet import ExdetTrainer
from .multi_pose import MultiPoseTrainer
from .ctseg import CtsegTrainer
train_factory = {
'exdet': ExdetTrainer,
'ddd': DddTrainer,
'ctdet': CtdetTrainer,
'multi_pose': MultiPoseTrainer,
'ctseg': CtsegTrainer
}
|
[
"ajaichemmanam@gmail.com"
] |
ajaichemmanam@gmail.com
|
cf644a12670d8aff34b7a1413556ac7855213a69
|
3e19165859b69351301f683292135cba75549db6
|
/Stanford/CS224n/a3/parser_model.py
|
05b40e4ed8149b60b1c996c72d73acb6abfa0832
|
[] |
no_license
|
k-ye/OpenCourses
|
b084638e212920a831a6baf74d740dd704b9447f
|
7ac57b6fbfe1ae574f60378cf15d308e191be3eb
|
refs/heads/master
| 2021-07-04T19:50:34.040105
| 2020-04-05T09:02:14
| 2020-04-05T09:02:14
| 99,991,859
| 27
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,174
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 3
parser_model.py: Feed-Forward Neural Network for Dependency Parsing
Sahil Chopra <schopra8@stanford.edu>
"""
import pickle
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
class ParserModel(nn.Module):
""" Feedforward neural network with an embedding layer and single hidden layer.
The ParserModel will predict which transition should be applied to a
given partial parse configuration.
PyTorch Notes:
- Note that "ParserModel" is a subclass of the "nn.Module" class. In PyTorch all neural networks
are a subclass of this "nn.Module".
- The "__init__" method is where you define all the layers and their respective parameters
(embedding layers, linear layers, dropout layers, etc.).
- "__init__" gets automatically called when you create a new instance of your class, e.g.
when you write "m = ParserModel()".
- Other methods of ParserModel can access variables that have "self." prefix. Thus,
you should add the "self." prefix layers, values, etc. that you want to utilize
in other ParserModel methods.
- For further documentation on "nn.Module" please see https://pytorch.org/docs/stable/nn.html.
"""
def __init__(self, embeddings, n_features=36,
hidden_size=200, n_classes=3, dropout_prob=0.5):
""" Initialize the parser model.
@param embeddings (Tensor): word embeddings (num_words, embedding_size)
@param n_features (int): number of input features
@param hidden_size (int): number of hidden units
@param n_classes (int): number of output classes
@param dropout_prob (float): dropout probability
"""
super(ParserModel, self).__init__()
self.n_features = n_features
self.n_classes = n_classes
self.dropout_prob = dropout_prob
self.embed_size = embeddings.shape[1]
self.hidden_size = hidden_size
self.pretrained_embeddings = nn.Embedding(
embeddings.shape[0], self.embed_size)
self.pretrained_embeddings.weight = nn.Parameter(
torch.tensor(embeddings))
# YOUR CODE HERE (~5 Lines)
# TODO:
# 1) Construct `self.embed_to_hidden` linear layer, initializing the weight matrix
# with the `nn.init.xavier_uniform_` function with `gain = 1` (default)
# 2) Construct `self.dropout` layer.
# 3) Construct `self.hidden_to_logits` linear layer, initializing the weight matrix
# with the `nn.init.xavier_uniform_` function with `gain = 1` (default)
###
# Note: Here, we use Xavier Uniform Initialization for our Weight initialization.
# It has been shown empirically, that this provides better initial weights
# for training networks than random uniform initialization.
# For more details checkout this great blogpost:
# http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization
# Hints:
# - After you create a linear layer you can access the weight
# matrix via:
# linear_layer.weight
###
# Please see the following docs for support:
# Linear Layer: https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
# Xavier Init: https://pytorch.org/docs/stable/nn.html#torch.nn.init.xavier_uniform_
# Dropout: https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout
xavier_init = torch.nn.init.xavier_uniform_
self.embed_to_hidden = torch.nn.Linear(
self.n_features * self.embed_size, self.hidden_size)
self.dropout = torch.nn.Dropout(self.dropout_prob)
self.hidden_to_logits = torch.nn.Linear(
self.hidden_size, self.n_classes)
xavier_init(self.embed_to_hidden.weight)
xavier_init(self.hidden_to_logits.weight)
# END YOUR CODE
def embedding_lookup(self, t):
""" Utilize `self.pretrained_embeddings` to map input `t` from input tokens (integers)
to embedding vectors.
PyTorch Notes:
- `self.pretrained_embeddings` is a torch.nn.Embedding object that we defined in __init__
- Here `t` is a tensor where each row represents a list of features. Each feature is represented by an integer (input token).
- In PyTorch the Embedding object, e.g. `self.pretrained_embeddings`, allows you to
go from an index to embedding. Please see the documentation (https://pytorch.org/docs/stable/nn.html#torch.nn.Embedding)
to learn how to use `self.pretrained_embeddings` to extract the embeddings for your tensor `t`.
@param t (Tensor): input tensor of tokens (batch_size, n_features)
@return x (Tensor): tensor of embeddings for words represented in t
(batch_size, n_features * embed_size)
"""
# YOUR CODE HERE (~1-3 Lines)
# TODO:
# 1) Use `self.pretrained_embeddings` to lookup the embeddings for the input tokens in `t`.
# 2) After you apply the embedding lookup, you will have a tensor shape (batch_size, n_features, embedding_size).
# Use the tensor `view` method to reshape the embeddings tensor to (batch_size, n_features * embedding_size)
###
# Note: In order to get batch_size, you may need use the tensor .size() function:
# https://pytorch.org/docs/stable/tensors.html#torch.Tensor.size
###
# Please see the following docs for support:
# Embedding Layer: https://pytorch.org/docs/stable/nn.html#torch.nn.Embedding
# View: https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view
# print(f'embedding_lookup t.shape={t.shape}')
batch_size = t.shape[0]
x = self.pretrained_embeddings(t).view(batch_size, -1)
# END YOUR CODE
return x
def forward(self, t):
""" Run the model forward.
Note that we will not apply the softmax function here because it is included in the loss function nn.CrossEntropyLoss
PyTorch Notes:
- Every nn.Module object (PyTorch model) has a `forward` function.
- When you apply your nn.Module to an input tensor `t` this function is applied to the tensor.
For example, if you created an instance of your ParserModel and applied it to some `t` as follows,
the `forward` function would called on `t` and the result would be stored in the `output` variable:
model = ParserModel()
output = model(t) # this calls the forward function
- For more details checkout: https://pytorch.org/docs/stable/nn.html#torch.nn.Module.forward
@param t (Tensor): input tensor of tokens (batch_size, n_features)
@return logits (Tensor): tensor of predictions (output after applying the layers of the network)
without applying softmax (batch_size, n_classes)
"""
# YOUR CODE HERE (~3-5 lines)
# TODO:
# 1) Apply `self.embedding_lookup` to `t` to get the embeddings
# 2) Apply `embed_to_hidden` linear layer to the embeddings
# 3) Apply relu non-linearity to the output of step 2 to get the hidden units.
# 4) Apply dropout layer to the output of step 3.
# 5) Apply `hidden_to_logits` layer to the output of step 4 to get the logits.
###
# Note: We do not apply the softmax to the logits here, because
# the loss function (torch.nn.CrossEntropyLoss) applies it more efficiently.
###
# Please see the following docs for support:
# ReLU: https://pytorch.org/docs/stable/nn.html?highlight=relu#torch.nn.functional.relu
relu = torch.nn.functional.relu
x = self.embedding_lookup(t)
h = self.dropout(relu(self.embed_to_hidden(x)))
logits = self.hidden_to_logits(h)
# END YOUR CODE
return logits
|
[
"yekuang.ky@gmail.com"
] |
yekuang.ky@gmail.com
|
841d788c0340dbd5bee7b8992b8b850a786977ba
|
3ca1902888282bc4c0ca9ffb4d1f13487df889c2
|
/tito/compiler/ir.py
|
424aa265e29adcc7de7fbc4fbea93b5267caa766
|
[] |
no_license
|
minttu/tito.py
|
0d3e2e3ca6222ca02638bdf6ea1dda1219779c6d
|
efd54c56f6944597a677785780b02dc8965418e9
|
refs/heads/master
| 2020-04-09T08:38:10.589300
| 2015-03-13T17:48:00
| 2015-03-13T17:48:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
from collections import OrderedDict
from tito.errors import InvalidCommandError, MalformedAddressError
from tito.data.commands import commands
from tito.data.registers import registers
from tito.data.symbols import symbols
from .binary_command import BinaryCommand
class IR(object):
def __init__(self):
self.binary_code = []
self.ir_code = []
self.data = []
self.symbol_table = OrderedDict()
def add_equ(self, name, val):
self.symbol_table[name] = (False, val)
def add_dc(self, name, val):
pos = len(self.data)
self.data.append(val)
self.symbol_table[name] = (True, pos)
def add_ds(self, name, val):
pos = len(self.data)
for i in range(val):
self.data.append(0)
self.symbol_table[name] = (True, pos)
def add_label(self, name, row):
self.symbol_table[name] = (False, row)
def add_line(self, code):
self.ir_code.append(code)
def generate(self):
for ind, line in enumerate(self.ir_code):
gen = BinaryCommand()
self.binary_code.append(gen)
if line.op not in commands:
raise InvalidCommandError(line.op, line.number, line.raw)
gen["op"].set(commands[line.op][0])
gen["m"].set(["=", None, "@"].index(line.m))
if line.op == "STORE" and gen["m"].value == 2:
gen["m"].set(1)
elif line.op == "STORE" or line.op == "CALL" or line.op[0] == "J":
gen["m"].set(0)
gen["rj"].set(registers[line.rj])
gen["ri"].set(registers[line.ri])
if line.addr in symbols and not line.addr in self.symbol_table:
self.symbol_table[line.addr] = (False, symbols[line.addr])
if line.addr in registers:
gen["ri"].set(registers[line.addr])
gen["m"].set(0)
elif line.addr in self.symbol_table:
sym = self.symbol_table[line.addr]
if sym[0]:
gen["addr"].set(sym[1] + len(self.ir_code))
else:
gen["addr"].set(sym[1])
elif gen["m"].value == 0:
gen["addr"].set(int(line.addr))
else:
raise MalformedAddressError(line.number, line.raw)
def __repr__(self):
ret = ""
code_len = len(self.ir_code)
data_len = len(self.data)
sec = lambda x: "___{}___\n".format(x)
ret += sec("b91")
ret += sec("code")
ret += "0 {}\n".format(code_len - 1)
ret += "\n".join(map(repr, self.binary_code))
ret += "\n"
ret += sec("data")
ret += "{} {}\n".format(code_len, code_len + data_len - 1)
ret += "\n".join(map(str, self.data))
ret += "\n"
ret += sec("symboltable")
for ind, val in self.symbol_table.items():
ret += "{} {}\n".format(ind.lower(), val[1] + code_len if val[0] else val[1])
ret += sec("end")
return ret
|
[
"juhani@imberg.fi"
] |
juhani@imberg.fi
|
92d205d31d38912cad2577572b1027e6e26ac755
|
47030207bc09be135fe9f7647610a1bb190e51dd
|
/sequencer/frontend.py
|
4000e0befa3e00d395583ab2403d69661f5f7e5f
|
[] |
no_license
|
ETH-NEXUS/lego_sequencer
|
df974c440264bf5f58bdc0a60cd65c6ddcc07108
|
ddea7b2753b577cc2c24603f7e5ee948fb789698
|
refs/heads/master
| 2022-12-11T08:44:31.957695
| 2019-09-24T14:02:06
| 2019-09-24T14:02:06
| 203,398,224
| 1
| 0
| null | 2022-12-11T06:50:05
| 2019-08-20T14:56:09
|
Python
|
UTF-8
|
Python
| false
| false
| 577
|
py
|
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for,
jsonify
)
bp = Blueprint(
'frontend', __name__, url_prefix='/',
static_folder="../frontend/dist", template_folder="../frontend/dist"
)
@bp.route('/', defaults={'path': ''})
@bp.route('/<path:path>') # url /books will fail into here
def index(path):
if path:
return bp.send_static_file(path)
return bp.send_static_file('index.html')
# @bp.route('/static/<path:path>')
# def static_file(path):
# return bp.send_static_file(path)
|
[
"alquaddoomi@nexus.ethz.ch"
] |
alquaddoomi@nexus.ethz.ch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.